path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
pymaceuticals_HW.ipynb | ###Markdown
Observations and Insights
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
from scipy.stats import linregress
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
merged_df = pd.merge(mouse_metadata, study_results, on = "Mouse ID")
# Display the data table for preview
merged_df.head(30)
# Checking the number of mice.
total_mice = merged_df["Mouse ID"].nunique()
total_mice
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint
duplicate_id = merged_df.loc[merged_df.duplicated(subset = ["Mouse ID", "Timepoint"]), "Mouse ID"].unique()
duplicate_id
# Optional: Get all the data for the duplicate mouse ID.
optional_df = merged_df.loc[merged_df["Mouse ID"]=="g989"]
optional_df
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_df = merged_df.loc[merged_df["Mouse ID"]!="g989"]
clean_df
# Checking the number of mice in the clean DataFrame.
total_mice = clean_df["Mouse ID"].nunique()
total_mice
###Output
_____no_output_____
###Markdown
Summary Statistics
###Code
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
mean_data = clean_df.groupby("Drug Regimen").mean()["Tumor Volume (mm3)"]
median_data = clean_df.groupby("Drug Regimen").median()["Tumor Volume (mm3)"]
variance_data = clean_df.groupby("Drug Regimen").var()["Tumor Volume (mm3)"]
std_data = clean_df.groupby("Drug Regimen").std()["Tumor Volume (mm3)"]
sem_data = clean_df.groupby("Drug Regimen").sem()["Tumor Volume (mm3)"]
stats_df = pd.DataFrame({"Mean":mean_data,
"Median":median_data,
"Variance":variance_data,
"STD":std_data,
"SEM":sem_data})
stats_df
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
summary_df2 = clean_df.groupby("Drug Regimen").agg({"Tumor Volume (mm3)":["mean","median","var","std","sem"]})
# Using the aggregation method, produce the same summary statistics in a single line
summary_df2
###Output
_____no_output_____
###Markdown
Bar and Pie Charts
###Code
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pandas.
bar_plot = clean_df.groupby(["Drug Regimen"]).count()["Mouse ID"]
bar_plot.plot(kind="bar", figsize=(10,5))
plt.title("Drug Distribution")
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Mice")
plt.show()
plt.tight_layout()
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pyplot.
bar_plot
x_axis= np.arange(0, len(bar_plot))
tick_locations = []
for x in x_axis:
tick_locations.append(x)
plt.title("Drug Distribution")
plt.xlabel("Drug Regimen")
plt.ylabel("# of Mice")
plt.xlim(0, len(bar_plot)-0.25)
plt.ylim(0, max(bar_plot)+20)
plt.bar(x_axis, bar_plot, facecolor="g", alpha=0.5, align="center")
plt.xticks(tick_locations, ["Capomulin", "Ceftamin", "Infubinol", "Ketapril", "Naftisol", "Placebo", "Propriva", "Ramicane", "Stelasyn", "Zoniferol"], rotation = "vertical")
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pandas
males = clean_df[clean_df["Sex"]== "Male"]["Mouse ID"].nunique()
females = clean_df[clean_df["Sex"]== "Female"]["Mouse ID"].nunique()
gender_df = pd.DataFrame({"Sex": ["Male", "Female"], "Count": [males, females]})
gender_df_index = gender_df.set_index("Sex")
plot = gender_df_index.plot(kind="pie", y="Count", autopct="%1.1f%%", startangle=120)
plot
# Generate a pie plot showing the distribution of female versus male mice using pyp
labels = ["Male", "Female"]
sizes = ["125", "123"]
colors = ["Green", "Yellow"]
plt.pie(sizes, labels=labels, colors=colors,
autopct="%1.1f%%", shadow=True, startangle=140)
plt.axis("equal")
###Output
_____no_output_____
###Markdown
Quartiles, Outliers and Boxplots
###Code
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
filt_cap = clean_df.loc[clean_df["Drug Regimen"] == "Capomulin"]
filt_ram = clean_df.loc[clean_df["Drug Regimen"] == "Ramicane"]
filt_infu = clean_df.loc[clean_df["Drug Regimen"] == "Infubinol"]
filt_ceft = clean_df.loc[clean_df["Drug Regimen"] == "Ceftamin"]
# Start by getting the last (greatest) timepoint for each mouse
last_timepoint_cap = filt_cap.groupby("Mouse ID")["Timepoint"].max()
last_timepoint_ram = filt_ram.groupby("Mouse ID")["Timepoint"].max()
last_timepoint_infu = filt_infu.groupby("Mouse ID")["Timepoint"].max()
last_timepoint_ceft = filt_ceft.groupby("Mouse ID")["Timepoint"].max()
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
fin_vol_cap = pd.DataFrame(last_timepoint_cap)
cap_merge = pd.merge(fin_vol_cap, clean_df, on = ("Mouse ID", "Timepoint"), how = "left")
fin_vol_ram = pd.DataFrame(last_timepoint_ram)
ram_merge = pd.merge(fin_vol_ram, clean_df, on = ("Mouse ID", "Timepoint"), how = "left")
fin_vol_infu = pd.DataFrame(last_timepoint_infu)
infu_merge = pd.merge(fin_vol_infu, clean_df, on = ("Mouse ID", "Timepoint"), how = "left")
fin_vol_ceft = pd.DataFrame(last_timepoint_ceft)
ceft_merge = pd.merge(fin_vol_ceft, clean_df, on = ("Mouse ID", "Timepoint"), how = "left")
# Put treatments into a list for for loop (and later for plot labels)
treatments = [cap_merge, ram_merge, infu_merge, ceft_merge]
# Create empty list to fill with tumor vol data (for plotting)
tumor_volume_data_plot = []
for treatment in treatments:
print(treatment)
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Determine outliers using upper and lower bounds
#Capomulin
cap_list = cap_merge["Tumor Volume (mm3)"]
quartiles = cap_list.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
tumor_volume_data_plot.append(cap_list)
print(f"Capomulin potential outliers could be values below {lower_bound} and above {upper_bound} could be outliers.")
print (f"Capomulin IQR is {iqr}.")
#Ramicane
ram_list = ram_merge["Tumor Volume (mm3)"]
quartiles = ram_list.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
tumor_volume_data_plot.append(ram_list)
print(f"Ramicane potential outliers could be values below {lower_bound} and above {upper_bound} could be outliers.")
print (f"Ramicane IQR is {iqr}.")
#Infubinol
infu_list = infu_merge["Tumor Volume (mm3)"]
quartiles = infu_list.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
tumor_volume_data_plot.append(infu_list)
print(f"Infubinol potential outliers could be values below {lower_bound} and above {upper_bound} could be outliers.")
print (f"Infubinol IQR is {iqr}.")
#Ceftamin
ceft_list = ceft_merge["Tumor Volume (mm3)"]
quartiles = ceft_list.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
tumor_volume_data_plot.append(ceft_list)
print(f"Ceftamin potential outliers could be values below {lower_bound} and above {upper_bound} could be outliers.")
print (f"Ceftamin IQR is {iqr}.")
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
tumor_volume_data_plot
fig1, ax1 = plt.subplots()
ax1.set_title('Final Tumor Volume of Each Mouse')
ax1.set_ylabel('Final Tumor Volume (mm3)')
ax1.set_xlabel('Drug Regimen')
ax1.boxplot(tumor_volume_data_plot, labels = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"])
plt.show()
###Output
_____no_output_____
###Markdown
Line and Scatter Plots
###Code
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
x_axis = np.arange(0,46,5)
tumor_vol = [45, 45.41, 39.11, 39.77, 36.06, 36.61, 32.91, 30.20, 28.16, 28.48]
plt.xlabel("Time Point")
plt.ylabel("Tumor Volume")
plt.title("Capomulin (x401)")
plt.ylim(25, 50)
plt.xlim(0, 45)
tumor_line, = plt.plot(x_axis, tumor_vol, marker="*", color="blue", linewidth=1, label="Capomulin")
plt.show()
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
drug_df = clean_df.loc[clean_df["Drug Regimen"] == "Capomulin"]
weight_tumor = drug_df.loc[:, ["Mouse ID", "Weight (g)", "Tumor Volume (mm3)"]]
avg_tumor_volume = pd.DataFrame(weight_tumor.groupby(["Mouse ID", "Weight (g)"])["Tumor Volume (mm3)"].mean()).reset_index()
avg_tumor_volume = avg_tumor_volume.set_index("Mouse ID")
avg_tumor_volume.plot(kind="scatter", x="Weight (g)", y="Tumor Volume (mm3)", grid=True, figsize=(8,8), title="Weight vs. Average Tumor Volume for Capomulin")
plt.show()
###Output
_____no_output_____
###Markdown
Correlation and Regression
###Code
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
mouse_weight = avg_tumor_volume.iloc[:,0]
tumor_volume = avg_tumor_volume.iloc[:,1]
correlation = st.pearsonr(mouse_weight,tumor_volume)
print(f"The correlation between both factors is {round(correlation[0],2)}")
x_values = avg_tumor_volume['Weight (g)']
y_values = avg_tumor_volume['Tumor Volume (mm3)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Mouse Weight (g)')
plt.ylabel('Average Tumor Volume (mm3)')
plt.title('Linear Regression')
plt.show()
###Output
_____no_output_____ |
vimms/notebook/MakeSpectralLibraries.ipynb | ###Markdown
Make spectral libraries
###Code
import sys, os
sys.path.append('/Users/simon/git/vimms')
sys.path.insert(0,'/Users/simon/git/mass-spec-utils/')
from vimms.Common import save_obj
from tqdm import tqdm
%load_ext autoreload
%autoreload 2
library_cache = '/Users/simon/clms_er/library_cache'
###Output
_____no_output_____
###Markdown
Massbank
###Code
from mass_spec_utils.library_matching.spec_libraries import MassBankLibrary
###Output
_____no_output_____
###Markdown
Path to the local version of the massbank repo
###Code
massbank_data_path = '/Users/simon/git/MassBank-Data/' # final slash is important!
mb = MassBankLibrary(mb_dir=massbank_data_path, polarity='POSITIVE')
save_obj(mb, os.path.join(library_cache, 'massbank_pos.p'))
mb = MassBankLibrary(mb_dir=massbank_data_path, polarity='NEGATIVE')
save_obj(mb, os.path.join(library_cache, 'massbank_neg.p'))
mb = MassBankLibrary(mb_dir=massbank_data_path, polarity='all')
save_obj(mb, os.path.join(library_cache, 'massbank_all.p'))
###Output
Loading records from /Users/simon/git/MassBank-Data/Athens_Univ/
Loaded 5252 new records
Loading records from /Users/simon/git/MassBank-Data/MetaboLights/
Loaded 58 new records
Loading records from /Users/simon/git/MassBank-Data/MPI_for_Chemical_Ecology/
Loaded 691 new records
Loading records from /Users/simon/git/MassBank-Data/JEOL_Ltd/
Loaded 45 new records
Loading records from /Users/simon/git/MassBank-Data/GL_Sciences_Inc/
Loaded 174 new records
Loading records from /Users/simon/git/MassBank-Data/Env_Anal_Chem_U_Tuebingen/
Loaded 128 new records
Loading records from /Users/simon/git/MassBank-Data/RIKEN_ReSpect/
Loaded 4642 new records
Loading records from /Users/simon/git/MassBank-Data/Boise_State_Univ/
Loaded 4 new records
Loading records from /Users/simon/git/MassBank-Data/LCSB/
Loaded 7299 new records
Loading records from /Users/simon/git/MassBank-Data/PFOS_research_group/
Loaded 413 new records
Loading records from /Users/simon/git/MassBank-Data/Eawag/
Loaded 11191 new records
Loading records from /Users/simon/git/MassBank-Data/IPB_Halle/
Loaded 677 new records
Loading records from /Users/simon/git/MassBank-Data/Washington_State_Univ/
Loaded 2626 new records
Loading records from /Users/simon/git/MassBank-Data/Univ_Toyama/
Loaded 253 new records
Loading records from /Users/simon/git/MassBank-Data/UOEH/
Loaded 35 new records
Loading records from /Users/simon/git/MassBank-Data/Fukuyama_Univ/
Loaded 340 new records
Loading records from /Users/simon/git/MassBank-Data/Waters/
Loaded 2992 new records
Loading records from /Users/simon/git/MassBank-Data/UPAO/
Loaded 12 new records
Loading records from /Users/simon/git/MassBank-Data/UFZ/
Loaded 1261 new records
Loading records from /Users/simon/git/MassBank-Data/AAFC/
Loaded 950 new records
Loading records from /Users/simon/git/MassBank-Data/Metabolon/
Loaded 149 new records
Loading records from /Users/simon/git/MassBank-Data/RIKEN_NPDepo/
Loaded 1956 new records
Loading records from /Users/simon/git/MassBank-Data/Eawag_Additional_Specs/
Loaded 895 new records
Loading records from /Users/simon/git/MassBank-Data/Nihon_Univ/
Loaded 706 new records
Loading records from /Users/simon/git/MassBank-Data/NAIST/
Loaded 621 new records
Loading records from /Users/simon/git/MassBank-Data/CASMI_2012/
Loaded 26 new records
Loading records from /Users/simon/git/MassBank-Data/HBM4EU/
Loaded 1925 new records
Loading records from /Users/simon/git/MassBank-Data/BGC_Munich/
Loaded 903 new records
Loading records from /Users/simon/git/MassBank-Data/Tottori_Univ/
Loaded 16 new records
Loading records from /Users/simon/git/MassBank-Data/BS/
Loaded 1318 new records
Loading records from /Users/simon/git/MassBank-Data/Chubu_Univ/
Loaded 2563 new records
Loading records from /Users/simon/git/MassBank-Data/MSSJ/
Loaded 328 new records
Loading records from /Users/simon/git/MassBank-Data/ISAS_Dortmund/
Loaded 513 new records
Loading records from /Users/simon/git/MassBank-Data/Kyoto_Univ/
Loaded 184 new records
Loading records from /Users/simon/git/MassBank-Data/Keio_Univ/
Loaded 4780 new records
Loading records from /Users/simon/git/MassBank-Data/RIKEN_IMS/
Loaded 1140 new records
Loading records from /Users/simon/git/MassBank-Data/Literature_Specs/
Loaded 39 new records
Loading records from /Users/simon/git/MassBank-Data/Osaka_MCHRI/
Loaded 20 new records
Loading records from /Users/simon/git/MassBank-Data/KWR/
Loaded 207 new records
Loading records from /Users/simon/git/MassBank-Data/RIKEN/
Loaded 11935 new records
Loading records from /Users/simon/git/MassBank-Data/Fiocruz/
Loaded 1107 new records
Loading records from /Users/simon/git/MassBank-Data/Fac_Eng_Univ_Tokyo/
Loaded 12379 new records
Loading records from /Users/simon/git/MassBank-Data/Univ_Connecticut/
Loaded 510 new records
Loading records from /Users/simon/git/MassBank-Data/NaToxAq/
Loaded 3756 new records
Loading records from /Users/simon/git/MassBank-Data/CASMI_2016/
Loaded 622 new records
Loading records from /Users/simon/git/MassBank-Data/Kazusa/
Loaded 273 new records
Loading records from /Users/simon/git/MassBank-Data/Osaka_Univ/
Loaded 449 new records
###Markdown
GNPSUsing Florian's file, because it has inchikeys
###Code
json_file = '/Users/simon/Downloads/gnps_positive_ionmode_cleaned_by_matchms_and_lookups.json'
import json
with open(json_file,'r') as f:
payload = json.loads(f.read())
from mass_spec_utils.library_matching.spectrum import SpectralRecord
neg_intensities = []
def json_to_spectrum(json_dat):
precursor_mz = json_dat['precursor_mz']
original_file = json_file
spectrum_id = json_dat['spectrum_id']
inchikey = json_dat['inchikey_smiles']
peaks = json_dat['peaks_json']
metadata = {}
for k,v in json_dat.items():
if not k == 'peaks':
metadata[k] = v
mz,i = zip(*peaks)
if min(i) < 0:
neg_intensities.append(spectrum_id)
return None
else:
new_spectrum = SpectralRecord(precursor_mz, peaks, metadata, original_file, spectrum_id)
return new_spectrum
records = {}
for jd in tqdm(payload):
new_spec = json_to_spectrum(jd)
if new_spec is not None:
records[new_spec.spectrum_id] = new_spec
def filter_min_peaks(spectrum, min_n_peaks=10):
n_peaks = len(spectrum.peaks)
if n_peaks < min_n_peaks:
return None
else:
return spectrum
def filter_rel_intensity(spectrum, min_rel=0.01, max_rel=1.):
pp = spectrum.peaks
mz,i = zip(*pp)
max_i = max(i)
new_pp = []
for p in pp:
ri = p[1]/max_i
if ri <= max_rel and ri >= min_rel:
new_pp.append(p)
spectrum.peaks = new_pp
return spectrum
new_records = {}
for sid in tqdm(records.keys()):
spec = records[sid]
ss = filter_min_peaks(spec)
if ss is not None:
new_records[sid] = ss
else:
continue
ss = filter_rel_intensity(ss)
new_records[sid] = ss
for sid, ss in new_records.items():
ss.metadata['inchikey'] = ss.metadata['inchikey_smiles']
from mass_spec_utils.library_matching.spec_libraries import SpectralLibrary
sl = SpectralLibrary()
sl.records = new_records
sl.sorted_record_list = sl._dic2list()
save_obj(sl, os.path.join(library_cache,'gnps.p'))
###Output
_____no_output_____ |
modulo - 1 Fundamentos/desafio_final1.ipynb | ###Markdown
Desafio Final 1Bootcamp Analista de Machine Learning @ IGTI **Objetivos**:* Pré-processamento dos dados.* Detecção de anomalias* Processamento dos dados.* Correlações.* Redução da dimensionalidade.* Algoritmos supervisionados e não supervisionados**Análise com:*** Redução de dimensionalidade* Clusterização com K-means* Classificação supervisionada
###Code
import pandas as pd
import numpy as np
import seaborn as sns
from google.colab import drive
drive.mount('/content/drive')
cars = pd.read_csv('/content/drive/My Drive/Data Science/Bootcamp Analista de ML/Desafio Final/cars.csv')
###Output
_____no_output_____
###Markdown
Conhecendo o dataset **Significado das classes:*** mpg = miles per gallon* cylinders = quantidade de cilindros, que é a origem da força mecânica que possibilita o deslocamento do veículo* cubicinches = volume total de ar e combustível queimado pelos cilindros através do motor* hp = horse power* weightlbs = peso do carro em libras* time-to-60 = capacidade em segundos do carro de ir de 0 a 60 milhas por horas* year = ano de fabricação* brand = marca, origem, etc.1 kg = 2,20462 lbs
###Code
cars.head()
cars.describe()
#linhas x colunas
cars.shape
#Existem dados faltantes ?
cars.isnull().sum()
cars.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 261 entries, 0 to 260
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 mpg 261 non-null float64
1 cylinders 261 non-null int64
2 cubicinches 261 non-null object
3 hp 261 non-null int64
4 weightlbs 261 non-null object
5 time-to-60 261 non-null int64
6 year 261 non-null int64
7 brand 261 non-null object
dtypes: float64(1), int64(4), object(3)
memory usage: 16.4+ KB
###Markdown
Teste: Desafio Final Pergunta 1 - Após a utilização da biblioteca pandas para a leitura dos dados sobre os valores lidos, é CORRETO afirmar que:
###Code
cars.isnull().sum()
###Output
_____no_output_____
###Markdown
**Não foram encontrados valores nulos após a leitura dos dados.** Pergunta 2 - Realize a transformação das colunas “cubicinches” e “weightlbs” do tipo “string” para o tipo numérico utilizando o pd.to_numeric(), utilizando o parâmetro errors='coerce'. Após essa transformação, é CORRETO afirmar:
###Code
#Convertendo valores objects para numeric
cars['cubicinches'] = pd.to_numeric(cars['cubicinches'], errors='coerce')
cars['weightlbs'] = pd.to_numeric(cars['weightlbs'], errors='coerce')
#Verificando resultado
cars.info()
cars.isnull().sum()
###Output
_____no_output_____
###Markdown
**Essa transformação adiciona valores nulos ao nosso dataset.** Pergunta 3 - Indique quais eram os índices dos valores presentes no dataset que “forçaram” o pandas a compreender a variável “cubicinches” como string.
###Code
indices_cub = [cars[cars['cubicinches'].isnull()]]
indices_cub
###Output
_____no_output_____
###Markdown
Pergunta 4 - Após a transformação das variáveis “string” para os valores numéricos, quantos valores nulos (células no dataframe) passaram a existir no dataset?
###Code
cars.isnull().sum()
###Output
_____no_output_____
###Markdown
Pergunta 5 - Substitua os valores nulos introduzidos no dataset, após a transformação, pelo valor médio das colunas. Qual é o novo valor médio da coluna “weightlbs”?
###Code
cars['cubicinches'] = cars['cubicinches'].fillna(cars['cubicinches'].mean())
cars['weightlbs'] = cars['weightlbs'].fillna(cars['weightlbs'].mean())
cars.isnull().sum()
cars['weightlbs'].mean()
###Output
_____no_output_____
###Markdown
Pergunta 6 - Após substituir os valores nulos pela média das colunas, selecione as colunas ['mpg', 'cylinders', 'cubicinches', 'hp', 'weightlbs', 'time-to-60', 'year']. Qual é o valor da mediana para a característica 'mpg'?
###Code
cars['mpg'].median()
###Output
_____no_output_____
###Markdown
Pergunta 7 - Qual é a afirmação CORRETA sobre o valor de 14,00 para a variável “time-to-60”?
###Code
cars.describe()
###Output
_____no_output_____
###Markdown
75% dos dados são maiores que o valor de 14,00. 8 - Sobre o coeficiente de correlação de Pearson entre as variáveis “cylinders” e “mpg”, é correto afirmar
###Code
from scipy import stats
stats.pearsonr(cars['cylinders'], cars['mpg'])
from sklearn.metrics import r2_score
r2_score(cars['cylinders'], cars['mpg'])
###Output
_____no_output_____
###Markdown
Mesmo não sendo igual a 1, é possível dizer que à medida em que a variável “cylinders” aumenta, a variável “mpg” também aumenta na mesma direção. 9 - Sobre o boxplot da variável “hp”, é correto afirmar, EXCETO:
###Code
sns.boxplot(cars['hp'])
###Output
/usr/local/lib/python3.6/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
Cada um dos quartis possui a mesma quantidade de valores para a variável “hp”. 10 - Após normalizado, utilizando a função StandardScaler(), qual é o maior valor para a variável “hp”?
###Code
cars.head()
cars_normalizar = cars.drop('brand', axis=1)
cars_normalizar.head()
from sklearn.preprocessing import StandardScaler
normalizar = StandardScaler() #instanciando o standart scaler
scaler = normalizar.fit(cars_normalizar.values) #fitando o dataset para normalizar
cars_normalizado = scaler.transform(cars_normalizar.values) #normalizando
cars_normalizado = pd.DataFrame(cars_normalizado, columns=cars_normalizar.columns) #transformando o array numpy em data frame do pandas
cars_normalizado['hp'].max()
###Output
_____no_output_____
###Markdown
11 - Aplicando o PCA, conforme a definição acima, qual é o valor da variância explicada com pela primeira componente principal
###Code
from sklearn.decomposition import PCA
pca = PCA(n_components=7)
principais = pca.fit_transform(cars_normalizado)
pca.explained_variance_ratio_
###Output
_____no_output_____
###Markdown
12 - Utilize os três primeiros componentes principais para construir o K-means com um número de 3 clusters. Sobre os clusters, é INCORRETO afirmar que
###Code
principais.explained_variance_ratio_
principais_componentes = pd.DataFrame(principais)
principais_componentes.head()
principais_componentes_k = principais_componentes.iloc[:, :3] #selecionando todas as linhas e as 3 primeiras colunas
principais_componentes_k.columns = ['componente 1', 'componente 2', 'componente 3']
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, random_state=42).fit(principais_componentes_k) #Parâmetros dados no desafio
principais_componentes_k['cluster'] = kmeans.labels_ #adicionando coluna do cluster em que o carro está
principais_componentes_k
principais_componentes_k['cluster'].value_counts() #Contando a quantidade de elementos dos clusters gerados
###Output
_____no_output_____
###Markdown
13 - Após todo o processamento realizado nos itens anteriores, crie uma coluna que contenha a variável de eficiência do veículo. Veículos que percorrem mais de 25 milhas com um galão (“mpg”>25) devem ser considerados eficientes. Utilize as colunas ['cylinders' ,'cubicinches' ,'hp' ,'weightlbs','time-to-60'] como entradas e como saída a coluna de eficiência criada.Utilizando a árvore de decisão como mostrado, qual é a acurácia do modelo?
###Code
cars.head()
entradas = np.array(cars[['cylinders' ,'cubicinches' ,'hp' ,'weightlbs' ,'time-to-60']])
saidas = np.array(cars['mpg'] > 25).astype(int) #zero = maior, 1 = menor
entradas
saidas
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(entradas, saidas, test_size=0.30, random_state=42)
from sklearn.tree import DecisionTreeClassifier
classificador = DecisionTreeClassifier(random_state=42)
classificador.fit(x_train, y_train)
y_pred = classificador.predict(x_test)
from sklearn.metrics import accuracy_score
acuracia = accuracy_score(y_test, y_pred)
acuracia
###Output
_____no_output_____
###Markdown
14 - Sobre a matriz de confusão obtida após a aplicação da árvore de decisão, como mostrado anteriormente, é INCORRETO afirmar:
###Code
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred)
###Output
_____no_output_____
###Markdown
Existem duas vezes mais veículos considerados não eficientes que instâncias de veículos eficientes 15 - Utilizando a mesma divisão de dados entre treinamento e teste empregada para a análise anterior, aplique o modelo de regressão logística como mostrado na descrição do trabalho.Comparando os resultados obtidos com o modelo de árvore de decisão, é INCORRETO afirmar que:
###Code
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(random_state=42).fit(x_train, y_train)
logreg_y_pred = logreg.predict(x_test)
accuracy_score(y_test, logreg_y_pred)
###Output
_____no_output_____ |
ces/carbon.ipynb | ###Markdown
If not explicitly mentioned otherwise we assume:- RCP2.6 scenario or the lowest ppm concentration reported (stabilized around 400-420)- Linear phase-out of fossil fuels from model start time (2000-2015) by 2100- BAU scenario would lead to RCP6 or higher- as it is widely accepcted that in order to obtain RCP2.6, emissions must at least cease or turn into removals in the geological near-term (throughout this century), therefore whenever the carbon price is given in terms of percentage reduction from current levels, a linear 100% reduction is assumed from model start time (2000-2015) by 2100- if ranges are reported, the mean is taken- if the model reports price in dollar per ton of carbon, it is converted to dollar per ton of carbon dioxide
###Code
import pandas as pd, numpy as np, matplotlib.pyplot as plt, matplotlib as mpl
%matplotlib inline
mpl.style.use('classic')
d=[]
#d.append(pd.read_csv('carbon/alberth_hope2006.csv',header=None))
#d.append(pd.read_csv('carbon/alberth_hope2006_2.csv',header=None))
d.append(pd.read_csv('carbon/bauer2012.csv',header=None))
d.append(pd.read_csv('carbon/bauer2012_2a.csv',header=None))
d.append(pd.read_csv('carbon/bauer2012_2b.csv',header=None))
d.append(pd.read_csv('carbon/bauer2012_2c.csv',header=None))
d.append(pd.read_csv('carbon/bosetti2014a.csv',header=None))
d.append(pd.read_csv('carbon/bosetti2014b.csv',header=None))
d.append(pd.read_csv('carbon/bosetti2014c.csv',header=None))
d.append(pd.read_csv('carbon/cai2015.csv',header=None))
d.append(pd.read_csv('carbon/chen2005.csv',header=None))
d.append(pd.read_csv('carbon/edmonds_GCAM1994.csv',header=None))
d.append(pd.read_csv('carbon/kriegler2015_2.csv',header=None))
#d.append(pd.read_csv('carbon/luderer_REMIND2015.csv',header=None))
d.append(pd.read_csv('carbon/manne_richels_MERGE2005.csv',header=None))
d.append(pd.read_csv('carbon/paltsev2005.csv',header=None))
d.append(pd.read_csv('carbon/russ_POLES2012.csv',header=None))
d.append(pd.read_csv('carbon/wilkerson2015.csv',header=None))
from scipy.interpolate import interp1d
kd=[]
fd=[]
for z in range(len(d)):
kd.append({})
for i in range(len(d[z][0])):
if ~np.isnan(d[z][0][i]):
kd[z][np.round(d[z][0][i],0)]=d[z][1][i]
fd.append(interp1d(sorted(kd[z].keys()),[kd[z][j] for j in sorted(kd[z].keys())]))
for z in range(len(d)):
#plt.scatter(d[z][0],d[z][1])
years=range(int(min(d[z][0]))+1,int(max(d[z][0]))+1)
plt.plot(years,fd[z](years))
labels=['Bauer, Hilaire et al.\n2012 | REMIND-R',\
'Luderer, Bosetti et al.\n2011 | IMACLIM-R',\
'Luderer, Bosetti et al.\n2011 | REMIND-R',\
'Luderer, Bosetti et al.\n2011 | WITCH',\
'Bosetti, Marangoni et al.\n2015 | GCAM',\
'Bosetti, Marangoni et al.\n2015 | MARKAL US',\
'Bosetti, Marangoni et al.\n2015 | WITCH',\
'Cai, Newth et al.\n2015 | GTEM-C',\
'Chen, 2005\nMARKAL-MACRO',\
'Edmonds, Wise, MacCracken\n1994 | GCAM',\
'Kriegler, Petermann, et al.\n2015 | multiple',\
'Manne, Richels\n2005 | MERGE',\
'Paltsev, Reilly et al.\n2005 | MIT EPPA',\
'Russ, Ciscar et al.\n2009 | POLES',\
'Wilkerson, Leibowicz et al.\n2015 | multiple'\
]
co2=[1,1,1,1,0,0,0,1,0,0,1,0,0,0,1]
z=14
plt.scatter(d[z][0],d[z][1])
years=range(int(min(d[z][0]))+1,int(max(d[z][0]))+1)
plt.plot(years,fd[z](years))
def plotter(ax,x,y,c,l,z=2,zz=2,step=2,w=-50,w2=30):
yrs=range(x[0]-40,x[len(x)-1]+10)
maxi=[0,0]
maxv=-100
#try a few initial values for maximum rsquared
i=0
for k in range(1,5):
p0 = [1., 1., x[len(x)*k/5]]
fit2 = optimize.leastsq(errfunc,p0,args=(x,y),full_output=True)
ss_err=(fit2[2]['fvec']**2).sum()
ss_tot=((y-y.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
if rsquared>maxv:
maxi=[i,k]
maxv=rsquared
i=maxi[0]
k=maxi[1]
p0 = [1., 1., x[len(x)*k/5], -1+i*0.5]
fit2 = optimize.leastsq(errfunc,p0,args=(x,y),full_output=True)
ss_err=(fit2[2]['fvec']**2).sum()
ss_tot=((y-y.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
ax.scatter(x[::step],y[::step],lw*3,color=c)
#ax.plot(yrs,logist(fit2[0],yrs),color="#006d2c",lw=lw)
ax.plot(yrs,logist(fit2[0],yrs),color="#444444",lw=lw)
#ax.plot(yrs,logist(fit2[0],yrs),color=c,lw=1)
yk=logist([fit2[0][0],fit2[0][1],fit2[0][2],fit2[0][3]],range(3000))
mint=0
maxt=3000
perc=0.1
for i in range(3000):
if yk[i]<perc: mint=i
if yk[i]<1-perc: maxt=i
if z>-1:
coord=len(x)*z/5
ax.annotate('$R^2 = '+str(np.round(rsquared,2))+'$\n'+\
'$\\alpha = '+str(np.round(fit2[0][0],2))+'$\n'+\
'$\\beta = '+str(np.round(fit2[0][1],2))+'$\n'+\
'$\\Delta t = '+str(int(maxt-mint))+'$', xy=(yrs[coord], logist(fit2[0],yrs)[coord]),\
xycoords='data',
xytext=(w, w2), textcoords='offset points', color="#444444",
arrowprops=dict(arrowstyle="->",color='#444444'))
coord=len(x)*zz/5
ax.annotate(l, xy=(yrs[coord], logist(fit2[0],yrs)[coord]),\
xycoords='data',
xytext=(w, w2), textcoords='offset points',
arrowprops=dict(arrowstyle="->"))
fig, ax = plt.subplots(1,1,subplot_kw=dict(axisbg='#EEEEEE',axisbelow=True),figsize=(10,5))
lw=2
colors=["#756bb1","#d95f0e","#444444"]
ax.grid(color='white', linestyle='solid')
ax.set_xlabel('Years')
ax.set_ylabel('Carbon tax $[\$/tonCO_2]$')
ax.set_xlim([2000,2100])
ax.set_ylim([0,5000])
#ax.set_yscale('log')
ax.set_title('Carbon price estimations from various IAM models',size=13,y=1.04)
loc=[2088,2083,2084,2080,2031,2047,2043,2088,2015,2072,2050,2075,2095,2020,2062]
lz=[(-70, 20),(-70, 20),(-20, 10),(-40, 20),(-100, 40),(-110, 20),(-130, 20),(-15, 15),\
(-70, 20),(-105, 20),(-80, 20),(-60, 12),(-120, -5),(-70, 50),(-30, 7)]
for z in range(len(d))[:15]:
#ax.scatter(d[z][0],d[z][1])
years=range(int(min(d[z][0]))+1,int(max(d[z][0]))+1)
if (co2[z]==1):k=1
else: k=44.0/12.0
ax.plot(years,fd[z](years)*k,lw=lw,color=colors[z%3])
ax.annotate(labels[z]+str(z), xy=(loc[z],fd[z]([loc[z]])*k),\
xycoords='data',
xytext=lz[z], textcoords='offset points',fontsize=9, color=colors[z%3],
arrowprops=dict(arrowstyle="->",color=colors[z%3]))
#plt.savefig('ces9.png',bbox_inches = 'tight', pad_inches = 0.1, dpi=150)
plt.show()
fig, ax = plt.subplots(1,1,subplot_kw=dict(axisbg='#EEEEEE',axisbelow=True),figsize=(10,5))
lw=2
colors=["#756bb1","#d95f0e","#444444"]
ax.grid(color='white', linestyle='solid')
ax.set_xlabel('Years')
ax.set_ylabel('$MAC$ $[\$/tonCO_2]$')
ax.set_xlim([2000,2100])
ax.set_ylim([0,5000])
#ax.set_yscale('log')
ax.set_title(u'Marginal abatement cost $(MAC)$ estimations from various IAM models',size=13,y=1.04)
loc=[2088,2070,2084,2070,2031,2047,2043,2088,2015,2072,2065,2075,2095,2019,2062]
lz=[(-60, 20),(-75, 20),(-20, 10),(-70, 20),(-100, 40),(-110, 20),(-130, 20),(-15, 15),\
(-70, 20),(-90, 20),(-70, 20),(-70, 12),(-120, -5),(-60, 50),(-30, 7)]
for z in range(len(d))[:15]:
#ax.scatter(d[z][0],d[z][1])
if z not in {0,9,14}:
years=range(int(min(d[z][0]))+1,int(max(d[z][0]))+1)
if (co2[z]==1):k=1
else: k=44.0/12.0
if z in {3,6,7,12}:
lw=3
c=colors[2]
elif z in {0,1,2,5}:
lw=1
c=colors[1]
else:
lw=1
c=colors[0]
ax.plot(years,fd[z](years)*k,lw=lw,color=c)
ax.annotate(labels[z], xy=(loc[z],fd[z]([loc[z]])*k),\
xycoords='data',
xytext=lz[z], textcoords='offset points',fontsize=9, color=c,
arrowprops=dict(arrowstyle="->",color=c))
plt.savefig('ces9b.png',bbox_inches = 'tight', pad_inches = 0.1, dpi=150)
plt.show()
for z in range(len(d))[:15]:
print labels[z]
###Output
Bauer, Hilaire et al.
2012 | REMIND-R
Luderer, Bosetti et al.
2011 | IMACLIM-R
Luderer, Bosetti et al.
2011 | REMIND-R
Luderer, Bosetti et al.
2011 | WITCH
Bosetti, Marangoni et al.
2015 | GCAM
Bosetti, Marangoni et al.
2015 | MARKAL US
Bosetti, Marangoni et al.
2015 | WITCH
Cai, Newth et al.
2015 | GTEM-C
Chen, 2005
MARKAL-MACRO
Edmonds, Wise, MacCracken
1994 | GCAM
Kriegler, Petermann, et al.
2015 | multiple
Manne, Richels
2005 | MERGE
Paltsev, Reilly et al.
2005 | MIT EPPA
Russ, Ciscar et al.
2009 | POLES
Wilkerson, Leibowicz et al.
2015 | multiple
|
3_ml_start_knn_examples/approximate_nearest_neighbors.ipynb | ###Markdown
Approximate nearest neighbors in TSNEThis example presents how to chain KNeighborsTransformer and TSNE in apipeline. It also shows how to wrap the packages `annoy` and `nmslib` toreplace KNeighborsTransformer and perform approximate nearest neighbors.These packages can be installed with `pip install annoy nmslib`.Note: Currently `TSNE(metric='precomputed')` does not modify the precomputeddistances, and thus assumes that precomputed euclidean distances are squared.In future versions, a parameter in TSNE will control the optional squaring ofprecomputed distances (see 12401).Note: In KNeighborsTransformer we use the definition which includes eachtraining point as its own neighbor in the count of `n_neighbors`, and forcompatibility reasons, one extra neighbor is computed when`mode == 'distance'`. Please note that we do the same in the proposed wrappers.Sample output:: Benchmarking on MNIST_2000: --------------------------- AnnoyTransformer: 0.583 sec NMSlibTransformer: 0.321 sec KNeighborsTransformer: 1.225 sec TSNE with AnnoyTransformer: 4.903 sec TSNE with NMSlibTransformer: 5.009 sec TSNE with KNeighborsTransformer: 6.210 sec TSNE with internal NearestNeighbors: 6.365 sec Benchmarking on MNIST_10000: ---------------------------- AnnoyTransformer: 4.457 sec NMSlibTransformer: 2.080 sec KNeighborsTransformer: 30.680 sec TSNE with AnnoyTransformer: 30.225 sec TSNE with NMSlibTransformer: 43.295 sec TSNE with KNeighborsTransformer: 64.845 sec TSNE with internal NearestNeighbors: 64.984 sec
###Code
# Author: Tom Dupre la Tour
#
# License: BSD 3 clause
import time
import sys
try:
import annoy
except ImportError:
print("The package 'annoy' is required to run this example.")
sys.exit()
try:
import nmslib
except ImportError:
print("The package 'nmslib' is required to run this example.")
sys.exit()
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.neighbors import KNeighborsTransformer
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.datasets import fetch_openml
from sklearn.pipeline import make_pipeline
from sklearn.manifold import TSNE
from sklearn.utils import shuffle
print(__doc__)
class NMSlibTransformer(TransformerMixin, BaseEstimator):
"""Wrapper for using nmslib as sklearn's KNeighborsTransformer"""
def __init__(self, n_neighbors=5, metric='euclidean', method='sw-graph',
n_jobs=1):
self.n_neighbors = n_neighbors
self.method = method
self.metric = metric
self.n_jobs = n_jobs
def fit(self, X):
self.n_samples_fit_ = X.shape[0]
# see more metric in the manual
# https://github.com/nmslib/nmslib/tree/master/manual
space = {
'sqeuclidean': 'l2',
'euclidean': 'l2',
'cosine': 'cosinesimil',
'l1': 'l1',
'l2': 'l2',
}[self.metric]
self.nmslib_ = nmslib.init(method=self.method, space=space)
self.nmslib_.addDataPointBatch(X)
self.nmslib_.createIndex()
return self
def transform(self, X):
n_samples_transform = X.shape[0]
# For compatibility reasons, as each sample is considered as its own
# neighbor, one extra neighbor will be computed.
n_neighbors = self.n_neighbors + 1
results = self.nmslib_.knnQueryBatch(X, k=n_neighbors,
num_threads=self.n_jobs)
indices, distances = zip(*results)
indices, distances = np.vstack(indices), np.vstack(distances)
if self.metric == 'sqeuclidean':
distances **= 2
indptr = np.arange(0, n_samples_transform * n_neighbors + 1,
n_neighbors)
kneighbors_graph = csr_matrix((distances.ravel(), indices.ravel(),
indptr), shape=(n_samples_transform,
self.n_samples_fit_))
return kneighbors_graph
class AnnoyTransformer(TransformerMixin, BaseEstimator):
"""Wrapper for using annoy.AnnoyIndex as sklearn's KNeighborsTransformer"""
def __init__(self, n_neighbors=5, metric='euclidean', n_trees=10,
search_k=-1):
self.n_neighbors = n_neighbors
self.n_trees = n_trees
self.search_k = search_k
self.metric = metric
def fit(self, X):
self.n_samples_fit_ = X.shape[0]
metric = self.metric if self.metric != 'sqeuclidean' else 'euclidean'
self.annoy_ = annoy.AnnoyIndex(X.shape[1], metric=metric)
for i, x in enumerate(X):
self.annoy_.add_item(i, x.tolist())
self.annoy_.build(self.n_trees)
return self
def transform(self, X):
return self._transform(X)
def fit_transform(self, X, y=None):
return self.fit(X)._transform(X=None)
def _transform(self, X):
"""As `transform`, but handles X is None for faster `fit_transform`."""
n_samples_transform = self.n_samples_fit_ if X is None else X.shape[0]
# For compatibility reasons, as each sample is considered as its own
# neighbor, one extra neighbor will be computed.
n_neighbors = self.n_neighbors + 1
indices = np.empty((n_samples_transform, n_neighbors),
dtype=int)
distances = np.empty((n_samples_transform, n_neighbors))
if X is None:
for i in range(self.annoy_.get_n_items()):
ind, dist = self.annoy_.get_nns_by_item(
i, n_neighbors, self.search_k, include_distances=True)
indices[i], distances[i] = ind, dist
else:
for i, x in enumerate(X):
indices[i], distances[i] = self.annoy_.get_nns_by_vector(
x.tolist(), n_neighbors, self.search_k,
include_distances=True)
if self.metric == 'sqeuclidean':
distances **= 2
indptr = np.arange(0, n_samples_transform * n_neighbors + 1,
n_neighbors)
kneighbors_graph = csr_matrix((distances.ravel(), indices.ravel(),
indptr), shape=(n_samples_transform,
self.n_samples_fit_))
return kneighbors_graph
def test_transformers():
"""Test that AnnoyTransformer and KNeighborsTransformer give same results
"""
X = np.random.RandomState(42).randn(10, 2)
knn = KNeighborsTransformer()
Xt0 = knn.fit_transform(X)
ann = AnnoyTransformer()
Xt1 = ann.fit_transform(X)
nms = NMSlibTransformer()
Xt2 = nms.fit_transform(X)
assert_array_almost_equal(Xt0.toarray(), Xt1.toarray(), decimal=5)
assert_array_almost_equal(Xt0.toarray(), Xt2.toarray(), decimal=5)
def load_mnist(n_samples):
"""Load MNIST, shuffle the data, and return only n_samples."""
mnist = fetch_openml("mnist_784")
X, y = shuffle(mnist.data, mnist.target, random_state=2)
return X[:n_samples] / 255, y[:n_samples]
def run_benchmark():
datasets = [
('MNIST_2000', load_mnist(n_samples=2000)),
('MNIST_10000', load_mnist(n_samples=10000)),
]
n_iter = 500
perplexity = 30
# TSNE requires a certain number of neighbors which depends on the
# perplexity parameter.
# Add one since we include each sample as its own neighbor.
n_neighbors = int(3. * perplexity + 1) + 1
transformers = [
('AnnoyTransformer', AnnoyTransformer(n_neighbors=n_neighbors,
metric='sqeuclidean')),
('NMSlibTransformer', NMSlibTransformer(n_neighbors=n_neighbors,
metric='sqeuclidean')),
('KNeighborsTransformer', KNeighborsTransformer(
n_neighbors=n_neighbors, mode='distance', metric='sqeuclidean')),
('TSNE with AnnoyTransformer', make_pipeline(
AnnoyTransformer(n_neighbors=n_neighbors, metric='sqeuclidean'),
TSNE(metric='precomputed', perplexity=perplexity,
method="barnes_hut", random_state=42, n_iter=n_iter), )),
('TSNE with NMSlibTransformer', make_pipeline(
NMSlibTransformer(n_neighbors=n_neighbors, metric='sqeuclidean'),
TSNE(metric='precomputed', perplexity=perplexity,
method="barnes_hut", random_state=42, n_iter=n_iter), )),
('TSNE with KNeighborsTransformer', make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode='distance',
metric='sqeuclidean'),
TSNE(metric='precomputed', perplexity=perplexity,
method="barnes_hut", random_state=42, n_iter=n_iter), )),
('TSNE with internal NearestNeighbors',
TSNE(metric='sqeuclidean', perplexity=perplexity, method="barnes_hut",
random_state=42, n_iter=n_iter)),
]
# init the plot
nrows = len(datasets)
ncols = np.sum([1 for name, model in transformers if 'TSNE' in name])
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, squeeze=False,
figsize=(5 * ncols, 4 * nrows))
axes = axes.ravel()
i_ax = 0
for dataset_name, (X, y) in datasets:
msg = 'Benchmarking on %s:' % dataset_name
print('\n%s\n%s' % (msg, '-' * len(msg)))
for transformer_name, transformer in transformers:
start = time.time()
Xt = transformer.fit_transform(X)
duration = time.time() - start
# print the duration report
longest = np.max([len(name) for name, model in transformers])
whitespaces = ' ' * (longest - len(transformer_name))
print('%s: %s%.3f sec' % (transformer_name, whitespaces, duration))
# plot TSNE embedding which should be very similar across methods
if 'TSNE' in transformer_name:
axes[i_ax].set_title(transformer_name + '\non ' + dataset_name)
axes[i_ax].scatter(Xt[:, 0], Xt[:, 1], c=y.astype(np.int32),
alpha=0.2, cmap=plt.cm.viridis)
axes[i_ax].xaxis.set_major_formatter(NullFormatter())
axes[i_ax].yaxis.set_major_formatter(NullFormatter())
axes[i_ax].axis('tight')
i_ax += 1
fig.tight_layout()
plt.show()
if __name__ == '__main__':
test_transformers()
run_benchmark()
###Output
Automatically created module for IPython interactive environment
Benchmarking on MNIST_2000:
---------------------------
AnnoyTransformer: 2.460 sec
NMSlibTransformer: 0.173 sec
KNeighborsTransformer: 2.206 sec
TSNE with AnnoyTransformer: 7.510 sec
TSNE with NMSlibTransformer: 5.929 sec
TSNE with KNeighborsTransformer: 8.114 sec
TSNE with internal NearestNeighbors: 9.213 sec
Benchmarking on MNIST_10000:
----------------------------
AnnoyTransformer: 12.359 sec
NMSlibTransformer: 1.494 sec
KNeighborsTransformer: 52.123 sec
TSNE with AnnoyTransformer: 51.923 sec
TSNE with NMSlibTransformer: 49.194 sec
|
Drug_Data_NLP_notebook.ipynb | ###Markdown
Concrete solutions to real problems An NLP workshop by Emmanuel Ameisen [(@EmmanuelAmeisen)](https://twitter.com/EmmanuelAmeisen), from Insight AI While there exist a wealth of elaborate and abstract NLP techniques, clustering and classification should always be in our toolkit as the first techniques to use when dealing with this kind of data. In addition to being amongst some of the easiest to scale in production, their ease of use can quickly help business address a set of applied problems:- How do you automatically make the distinction between different categories of sentences?- How can you find sentences in a dataset that are most similar to a given one?- How can you extract a rich and concise representation that can then be used for a range of other tasks?- Most importantly, how do you find quickly whether these tasks are possible on your dataset at all?While there is a vast amount of resources on classical Machine Learning, or Deep Learning applied to images, I've found that there is a lack of clear, simple guides as to what to do when one wants to find a meaningful representation for sentences (in order to classify them or group them together for examples). Here is my attempt below. It starts with data Our Dataset: Disasters on social mediaContributors looked at over 10,000 tweets retrieved with a variety of searches like “ablaze”, “quarantine”, and “pandemonium”, then noted whether the tweet referred to a disaster event (as opposed to a joke with the word or a movie review or something non-disastrous). Thank you [Crowdflower](https://www.crowdflower.com/data-for-everyone/). Why it mattersWe will try to correctly predict tweets that are about disasters. This is a very relevant problem, because:- It is actionable to anybody trying to get signal from noise (such as police departments in this case)- It is tricky because relying on keywords is harder than in most cases like spam
###Code
!pip install gensim
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
#auth.authenticate_user()
#gauth = GoogleAuth()
#gauth.credentials = GoogleCredentials.get_application_default()
#drive = GoogleDrive(gauth)
import keras
import nltk
import pandas as pd
import numpy as np
import re
import codecs
###Output
Using TensorFlow backend.
###Markdown
Sanitizing inputLet's make sure our tweets only have characters we want. We remove '' characters but keep the words after the '' sign because they might be relevant (eg: disaster)
###Code
#!wget http://archive.ics.uci.edu/ml/machine-learning-databases/00462/drugsCom_raw.zip
#!unzip drugsCom_raw.zip
df_train = pd.read_table('drugsComTrain_raw.tsv')
df_test = pd.read_table('drugsComTest_raw.tsv')
df_main = pd.concat([df_train, df_test], axis=0)
df_main.head()
# Turn rating into new "binned" column
def rank_bin(array):
y_rank = []
for i in array:
if i <= 4: # Negative Rating Cut Off (Inclusive)
y_rank.append(-1)
elif i >= 10: # Positive Rating Cut Off (Inclusive)
y_rank.append(1)
else:
y_rank.append(0)
return y_rank
df_main["rank_bin"] = rank_bin(df_main["rating"])
df_main.rank_bin.value_counts() # Check to see the bin sizes.
# Upload File Manually
#from google.colab import files
#uploaded = files.upload()
#for fn in uploaded.keys():
# print('User uploaded file "{name}" with length {length} bytes'.format(
# name=fn, length=len(uploaded[fn])))
#downloaded = drive.CreateFile({'id': "1m74XhpHHZXfS3mAM8cbBYl-FHlpjZnEi"})
#downloaded.GetContentFile("socialmedia_relevant_cols.csv")
#input_file = codecs.open("socialmedia_relevant_cols.csv", "r",encoding='utf-8', errors='replace')
#output_file = open("socialmedia_relevant_cols_clean.csv", "w")
#def sanitize_characters(raw, clean):
# for line in input_file:
# out = line
# output_file.write(line)
#sanitize_characters(input_file, output_file)
###Output
_____no_output_____
###Markdown
Let's inspect the dataIt looks solid, but we don't really need urls, and we would like to have our words all lowercase (Hello and HELLO are pretty similar for our task)
###Code
questions = df_main[['review','rating','rank_bin']] #pd.read_csv("socialmedia_relevant_cols_clean.csv")
questions.columns=['text', 'rating', 'class_label']
questions.head()
questions.tail()
questions.describe()
###Output
_____no_output_____
###Markdown
Let's use a few regular expressions to clean up pour data, and save it back to disk for future use
###Code
def standardize_text(df, text_field):
df[text_field] = df[text_field].str.replace(r"http\S+", "")
df[text_field] = df[text_field].str.replace(r"http", "")
df[text_field] = df[text_field].str.replace(r"@\S+", "")
df[text_field] = df[text_field].str.replace(r"[^A-Za-z0-9(),!?@\'\`\"\_\n]", " ")
df[text_field] = df[text_field].str.replace(r"@", "at")
df[text_field] = df[text_field].str.lower()
return df
questions = standardize_text(questions, "text")
questions.to_csv("clean_data.csv")
questions.head()
clean_questions = pd.read_csv("clean_data.csv")
clean_questions.tail()
###Output
_____no_output_____
###Markdown
Data Overview Let's look at our class balance.
###Code
clean_questions.groupby("class_label").count()
###Output
_____no_output_____
###Markdown
We can see our classes are pretty balanced, with a slight oversampling of the "Irrelevant" class. Our data is clean, now it needs to be preparedNow that our inputs are more reasonable, let's transform our inputs in a way our model can understand. This implies:- Tokenizing sentences to a list of separate words- Creating a train test split- Inspecting our data a little more to validate results
###Code
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
clean_questions["tokens"] = clean_questions["text"].apply(tokenizer.tokenize)
df_main['review_clean']=clean_questions.text
df_main['tokens']=clean_questions.tokens
clean_questions.head()
###Output
_____no_output_____
###Markdown
Inspecting our dataset a little more
###Code
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
all_words = [word for tokens in clean_questions["tokens"] for word in tokens]
df_main["sentence_length"] = [len(tokens) for tokens in clean_questions["tokens"]]
VOCAB = sorted(list(set(all_words)))
print("%s words total, with a vocabulary size of %s" % (len(all_words), len(VOCAB)))
print("Max sentence length is %s" % max(df_main["sentence_length"]))
print(df_main.loc[df_main['sentence_length'] == 1992].review.values)
_a = df_main.loc[df_main['sentence_length'] >= 1000].review.count()
_b = df_main.loc[df_main['sentence_length'] >= 750].review.count()
_c = df_main.loc[df_main['sentence_length'] >= 250].review.count()
_d = df_main.loc[df_main['sentence_length'] >= 175].review.count()
_e = df_main.loc[df_main['sentence_length'] >= 100].review.count()
_f = df_main.loc[df_main['sentence_length'] < 100].review.count()
print (" # of Reviews by Length \n %s >1000 words \n %s 1000<>700 words \n %s 750<>500 words \n %s 300<>150 words \n %s 200<>100 words \n %s <100 words\n" % (_a,_b,_c,_d,_e,_f))
df_short = df_main.loc[df_main['sentence_length'] <= 250]
df_short = df_short.sort_values(by='sentence_length', ascending=False)
print("Max sentence length is %s" % max(df_short["sentence_length"]))
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.xlabel('Sentence length')
plt.ylabel('Number of sentences')
plt.title('Length of Tokenized Sentences')
plt.hist(df_short["sentence_length"], bins=250)
plt.show()
a_ = 180
b_ = 175
c_ = 170
d_ = 168
e_ = 167
f_ = 166
g_ = 165
h_ = 165
_a = df_main.loc[df_main['sentence_length'] > a_].review.count()
_b = df_main.loc[df_main['sentence_length'] > b_].review.count()
_c = df_main.loc[df_main['sentence_length'] > c_].review.count()
_d = df_main.loc[df_main['sentence_length'] > d_].review.count()
_e = df_main.loc[df_main['sentence_length'] > e_].review.count()
_f = df_main.loc[df_main['sentence_length'] > f_].review.count()
_g = df_main.loc[df_main['sentence_length'] > g_].review.count()
_h = df_main.loc[df_main['sentence_length'] < h_].review.count()
print (" Cumulative # of Reviews by Length\n %s >%s words \n %s >%s words \n %s >%s words \n %s >%s words \n %s >%s words \n %s >%s words\n %s >%s words\n %s <%s words\n" % (_a,a_,_b,b_,_c,c_,_d,d_,_e,e_,_f,f_,_g,g_,_h,h_))
df_shorter = df_main.loc[df_main['sentence_length'] <= 180]
df_shorter = df_shorter.sort_values(by='sentence_length', ascending=False)
print("Max sentence length is %s" % max(df_shorter["sentence_length"]))
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.xlabel('sentences length')
plt.ylabel('number of sentences')
plt.title('tokenized sentences')
plt.hist(df_shorter["sentence_length"], bins=181)
plt.show()
###Output
_____no_output_____
###Markdown
On to the Machine LearningNow that our data is clean and prepared, let's dive in to the machine learning part. Enter embeddings Machine Learning on images can use raw pixels as inputs. Fraud detection algorithms can use customer features. What can NLP use? A natural way to represent text for computers is to encode each character individually, this seems quite inadequate to represent and understand language. Our goal is to first create a useful embedding for each sentence (or tweet) in our dataset, and then use these embeddings to accurately predict the relevant category.The simplest approach we can start with is to use a bag of words model, and apply a logistic regression on top. A bag of words just associates an index to each word in our vocabulary, and embeds each sentence as a list of 0s, with a 1 at each index corresponding to a word present in the sentence. Bag of Words Counts
###Code
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def cv(data):
count_vectorizer = CountVectorizer()
emb = count_vectorizer.fit_transform(data)
return emb, count_vectorizer
list_corpus = clean_questions["text"].tolist()
list_labels = clean_questions["class_label"].tolist()
X_train, X_test, y_train, y_test = train_test_split(list_corpus, list_labels, test_size=0.2,
random_state=40)
X_train_counts, count_vectorizer = cv(X_train)
X_test_counts = count_vectorizer.transform(X_test)
###Output
_____no_output_____
###Markdown
Visualizing the embeddingsNow that we've created embeddings, let's visualize them and see if we can identify some structure. In a perfect world, our embeddings would be so distinct that are two classes would be perfectly separated. Since visualizing data in 20k dimensions is hard, let's project it down to 2.
###Code
from sklearn.decomposition import PCA, TruncatedSVD
import matplotlib
import matplotlib.patches as mpatches
def plot_LSA(test_data, test_labels, savepath="PCA_demo.csv", plot=True):
lsa = TruncatedSVD(n_components=2)
lsa.fit(test_data)
lsa_scores = lsa.transform(test_data)
color_mapper = {label:idx for idx,label in enumerate(set(test_labels))}
color_column = [color_mapper[label] for label in test_labels]
colors = ['orange','blue','blue']
if plot:
plt.scatter(lsa_scores[:,0], lsa_scores[:,1], s=8, alpha=.8, c=test_labels, cmap=matplotlib.colors.ListedColormap(colors))
red_patch = mpatches.Patch(color='orange', label='Irrelevant')
green_patch = mpatches.Patch(color='blue', label='Disaster')
plt.legend(handles=[red_patch, green_patch], prop={'size': 30})
fig = plt.figure(figsize=(16, 16))
plot_LSA(X_train_counts, y_train)
plt.show()
###Output
_____no_output_____
###Markdown
These embeddings don't look very cleanly separated. Let's see if we can still fit a useful model on them. Fitting a classifierStarting with a logistic regression is a good idea. It is simple, often gets the job done, and is easy to interpret.
###Code
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg',
multi_class='multinomial', n_jobs=-1, random_state=40)
clf.fit(X_train_counts, y_train)
y_predicted_counts = clf.predict(X_test_counts)
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/utils/optimize.py:203: ConvergenceWarning: newton-cg failed to converge. Increase the number of iterations.
"number of iterations.", ConvergenceWarning)
###Markdown
EvaluationLet's start by looking at some metrics to see if our classifier performed well at all.
###Code
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
def get_metrics(y_test, y_predicted):
# true positives / (true positives+false positives)
precision = precision_score(y_test, y_predicted, pos_label=None,
average='weighted')
# true positives / (true positives + false negatives)
recall = recall_score(y_test, y_predicted, pos_label=None,
average='weighted')
# harmonic mean of precision and recall
f1 = f1_score(y_test, y_predicted, pos_label=None, average='weighted')
# true positives + true negatives/ total
accuracy = accuracy_score(y_test, y_predicted)
return accuracy, precision, recall, f1
accuracy, precision, recall, f1 = get_metrics(y_test, y_predicted_counts)
print("accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f" % (accuracy, precision, recall, f1))
###Output
_____no_output_____
###Markdown
InspectionA metric is one thing, but in order to make an actionnable decision, we need to actually inspect the kind of mistakes our classifier is making. Let's start by looking at the confusion matrix.
###Code
import numpy as np
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.winter):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=30)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, fontsize=20)
plt.yticks(tick_marks, classes, fontsize=20)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center",
color="white" if cm[i, j] < thresh else "black", fontsize=40)
plt.tight_layout()
plt.ylabel('True label', fontsize=30)
plt.xlabel('Predicted label', fontsize=30)
return plt
cm = confusion_matrix(y_test, y_predicted_counts)
fig = plt.figure(figsize=(10, 10))
plot = plot_confusion_matrix(cm, classes=['Irrelevant','Disaster','Unsure'], normalize=False, title='Confusion matrix')
plt.show()
print(cm)
###Output
_____no_output_____
###Markdown
Our classifier never predicts class 3, which is not surprising, seeing as it is critically undersampled. This is not very important here, as the label is not very meaningful. Our classifier creates more false negatives than false positives (proportionally). Depending on the use case, this seems desirable (a false positive is quite a high cost for law enforcement for example). Further inspectionLet's look at the features our classifier is using to make decisions.
###Code
def get_most_important_features(vectorizer, model, n=5):
index_to_word = {v:k for k,v in vectorizer.vocabulary_.items()}
# loop for each class
classes ={}
for class_index in range(model.coef_.shape[0]):
word_importances = [(el, index_to_word[i]) for i,el in enumerate(model.coef_[class_index])]
sorted_coeff = sorted(word_importances, key = lambda x : x[0], reverse=True)
tops = sorted(sorted_coeff[:n], key = lambda x : x[0])
bottom = sorted_coeff[-n:]
classes[class_index] = {
'tops':tops,
'bottom':bottom
}
return classes
importance = get_most_important_features(count_vectorizer, clf, 10)
def plot_important_words(top_scores, top_words, bottom_scores, bottom_words, name):
y_pos = np.arange(len(top_words))
top_pairs = [(a,b) for a,b in zip(top_words, top_scores)]
top_pairs = sorted(top_pairs, key=lambda x: x[1])
bottom_pairs = [(a,b) for a,b in zip(bottom_words, bottom_scores)]
bottom_pairs = sorted(bottom_pairs, key=lambda x: x[1], reverse=True)
top_words = [a[0] for a in top_pairs]
top_scores = [a[1] for a in top_pairs]
bottom_words = [a[0] for a in bottom_pairs]
bottom_scores = [a[1] for a in bottom_pairs]
fig = plt.figure(figsize=(10, 10))
plt.subplot(121)
plt.barh(y_pos,bottom_scores, align='center', alpha=0.5)
plt.title('Irrelevant', fontsize=20)
plt.yticks(y_pos, bottom_words, fontsize=14)
plt.suptitle('Key words', fontsize=16)
plt.xlabel('Importance', fontsize=20)
plt.subplot(122)
plt.barh(y_pos,top_scores, align='center', alpha=0.5)
plt.title('Disaster', fontsize=20)
plt.yticks(y_pos, top_words, fontsize=14)
plt.suptitle(name, fontsize=16)
plt.xlabel('Importance', fontsize=20)
plt.subplots_adjust(wspace=0.8)
plt.show()
top_scores = [a[0] for a in importance[1]['tops']]
top_words = [a[1] for a in importance[1]['tops']]
bottom_scores = [a[0] for a in importance[1]['bottom']]
bottom_words = [a[1] for a in importance[1]['bottom']]
plot_important_words(top_scores, top_words, bottom_scores, bottom_words, "Most important words for relevance")
###Output
_____no_output_____
###Markdown
Our classifier correctly picks up on some patterns (hiroshima, massacre), but clearly seems to be overfitting on some irellevant terms (heyoo, x1392) TFIDF Bag of WordsLet's try a slightly more subtle approach. On top of our bag of words model, we use a TF-IDF (Term Frequency, Inverse Document Frequency) which means weighing words by how frequent they are in our dataset, discounting words that are too frequent, as they just add to the noise.
###Code
def tfidf(data):
tfidf_vectorizer = TfidfVectorizer()
train = tfidf_vectorizer.fit_transform(data)
return train, tfidf_vectorizer
X_train_tfidf, tfidf_vectorizer = tfidf(X_train)
X_test_tfidf = tfidf_vectorizer.transform(X_test)
fig = plt.figure(figsize=(16, 16))
plot_LSA(X_train_tfidf, y_train)
plt.show()
###Output
_____no_output_____
###Markdown
These embeddings look much more separated, let's see if it leads to better performance.
###Code
clf_tfidf = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg',
multi_class='multinomial', n_jobs=-1, random_state=40)
clf_tfidf.fit(X_train_tfidf, y_train)
y_predicted_tfidf = clf_tfidf.predict(X_test_tfidf)
accuracy_tfidf, precision_tfidf, recall_tfidf, f1_tfidf = get_metrics(y_test, y_predicted_tfidf)
print("accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f" % (accuracy_tfidf, precision_tfidf,
recall_tfidf, f1_tfidf))
###Output
_____no_output_____
###Markdown
The results are a little better, let's see if they translate to an actual difference in our use case.
###Code
cm2 = confusion_matrix(y_test, y_predicted_tfidf)
fig = plt.figure(figsize=(10, 10))
plot = plot_confusion_matrix(cm2, classes=['Irrelevant','Disaster','Unsure'], normalize=False, title='Confusion matrix')
plt.show()
print("TFIDF confusion matrix")
print(cm2)
print("BoW confusion matrix")
print(cm)
###Output
_____no_output_____
###Markdown
Our False positives have decreased, as this model is more conservative about choosing the positive class. Looking at important coefficients for linear regressionInsert details here
###Code
importance_tfidf = get_most_important_features(tfidf_vectorizer, clf_tfidf, 10)
top_scores = [a[0] for a in importance_tfidf[1]['tops']]
top_words = [a[1] for a in importance_tfidf[1]['tops']]
bottom_scores = [a[0] for a in importance_tfidf[1]['bottom']]
bottom_words = [a[1] for a in importance_tfidf[1]['bottom']]
plot_important_words(top_scores, top_words, bottom_scores, bottom_words, "Most important words for relevance")
###Output
_____no_output_____
###Markdown
The words it picked up look much more relevant! Although our metrics on our held out validation set haven't increased much, we have much more confidence in the terms our model is using, and thus would feel more comfortable deploying it in a system that would interact with customers. Capturing semantic meaningOur first models have managed to pick up on high signal words. However, it is unlikely that we will have a training set containing all relevant words. To solve this problem, we need to capture the semantic meaning of words. Meaning we need to understand that words like 'good' and 'positive' are closer than apricot and 'continent'. Enter word2vecWord2vec is a model that was pre-trained on a very large corpus, and provides embeddings that map words that are similar close to each other. A quick way to get a sentence embedding for our classifier, is to average word2vec scores of all words in our sentence.
###Code
downloaded = drive.CreateFile({'id': "0B7XkCwpI5KDYNlNUTTlSS21pQmM"})
downloaded.GetContentFile("GoogleNews-vectors-negative300.bin.gz")
import gensim
word2vec_path = "GoogleNews-vectors-negative300.bin.gz"
word2vec = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
def get_average_word2vec(tokens_list, vector, generate_missing=False, k=300):
if len(tokens_list)<1:
return np.zeros(k)
if generate_missing:
vectorized = [vector[word] if word in vector else np.random.rand(k) for word in tokens_list]
else:
vectorized = [vector[word] if word in vector else np.zeros(k) for word in tokens_list]
length = len(vectorized)
summed = np.sum(vectorized, axis=0)
averaged = np.divide(summed, length)
return averaged
def get_word2vec_embeddings(vectors, clean_questions, generate_missing=False):
embeddings = clean_questions['tokens'].apply(lambda x: get_average_word2vec(x, vectors,
generate_missing=generate_missing))
return list(embeddings)
embeddings = get_word2vec_embeddings(word2vec, clean_questions)
X_train_word2vec, X_test_word2vec, y_train_word2vec, y_test_word2vec = train_test_split(embeddings, list_labels,
test_size=0.2, random_state=40)
fig = plt.figure(figsize=(16, 16))
plot_LSA(embeddings, list_labels)
plt.show()
###Output
_____no_output_____
###Markdown
These look much more separated, let's see how our logistic regression does on them!
###Code
clf_w2v = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg',
multi_class='multinomial', random_state=40)
clf_w2v.fit(X_train_word2vec, y_train_word2vec)
y_predicted_word2vec = clf_w2v.predict(X_test_word2vec)
accuracy_word2vec, precision_word2vec, recall_word2vec, f1_word2vec = get_metrics(y_test_word2vec, y_predicted_word2vec)
print("accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f" % (accuracy_word2vec, precision_word2vec,
recall_word2vec, f1_word2vec))
###Output
_____no_output_____
###Markdown
Still getting better, let's plot the confusion matrix
###Code
cm_w2v = confusion_matrix(y_test_word2vec, y_predicted_word2vec)
fig = plt.figure(figsize=(10, 10))
plot = plot_confusion_matrix(cm, classes=['Irrelevant','Disaster','Unsure'], normalize=False, title='Confusion matrix')
plt.show()
print("Word2Vec confusion matrix")
print(cm_w2v)
print("TFIDF confusion matrix")
print(cm2)
print("BoW confusion matrix")
print(cm)
###Output
_____no_output_____
###Markdown
Our model is strictly better in all regards than the first two models, this is promissing! Further inspectionSince our model does not use a vector with one dimension per word, it gets much harder to directly see which words are most relevant to our classification. In order to provide some explainability, we can leverage a black box explainer such as LIME.
###Code
!pip install lime
from lime import lime_text
from sklearn.pipeline import make_pipeline
from lime.lime_text import LimeTextExplainer
X_train_data, X_test_data, y_train_data, y_test_data = train_test_split(list_corpus, list_labels, test_size=0.2,
random_state=40)
vector_store = word2vec
def word2vec_pipeline(examples):
global vector_store
tokenizer = RegexpTokenizer(r'\w+')
tokenized_list = []
for example in examples:
example_tokens = tokenizer.tokenize(example)
vectorized_example = get_average_word2vec(example_tokens, vector_store, generate_missing=False, k=300)
tokenized_list.append(vectorized_example)
return clf_w2v.predict_proba(tokenized_list)
c = make_pipeline(count_vectorizer, clf)
def explain_one_instance(instance, class_names):
explainer = LimeTextExplainer(class_names=class_names)
exp = explainer.explain_instance(instance, word2vec_pipeline, num_features=6)
return exp
def visualize_one_exp(features, labels, index, class_names = ["irrelevant","relevant", "unknown"]):
exp = explain_one_instance(features[index], class_names = class_names)
print('Index: %d' % index)
print('True class: %s' % class_names[labels[index]])
exp.show_in_notebook(text=True)
visualize_one_exp(X_test_data, y_test_data, 65)
visualize_one_exp(X_test_data, y_test_data, 60)
import random
from collections import defaultdict
random.seed(40)
def get_statistical_explanation(test_set, sample_size, word2vec_pipeline, label_dict):
sample_sentences = random.sample(test_set, sample_size)
explainer = LimeTextExplainer()
labels_to_sentences = defaultdict(list)
contributors = defaultdict(dict)
# First, find contributing words to each class
for sentence in sample_sentences:
probabilities = word2vec_pipeline([sentence])
curr_label = probabilities[0].argmax()
labels_to_sentences[curr_label].append(sentence)
exp = explainer.explain_instance(sentence, word2vec_pipeline, num_features=6, labels=[curr_label])
listed_explanation = exp.as_list(label=curr_label)
for word,contributing_weight in listed_explanation:
if word in contributors[curr_label]:
contributors[curr_label][word].append(contributing_weight)
else:
contributors[curr_label][word] = [contributing_weight]
# average each word's contribution to a class, and sort them by impact
average_contributions = {}
sorted_contributions = {}
for label,lexica in contributors.items():
curr_label = label
curr_lexica = lexica
average_contributions[curr_label] = pd.Series(index=curr_lexica.keys())
for word,scores in curr_lexica.items():
average_contributions[curr_label].loc[word] = np.sum(np.array(scores))/sample_size
detractors = average_contributions[curr_label].sort_values()
supporters = average_contributions[curr_label].sort_values(ascending=False)
sorted_contributions[label_dict[curr_label]] = {
'detractors':detractors,
'supporters': supporters
}
return sorted_contributions
label_to_text = {
0: 'Irrelevant',
1: 'Relevant',
2: 'Unsure'
}
sorted_contributions = get_statistical_explanation(X_test_data, 100, word2vec_pipeline, label_to_text)
# First index is the class (Disaster)
# Second index is 0 for detractors, 1 for supporters
# Third is how many words we sample
top_words = sorted_contributions['Relevant']['supporters'][:10].index.tolist()
top_scores = sorted_contributions['Relevant']['supporters'][:10].tolist()
bottom_words = sorted_contributions['Relevant']['detractors'][:10].index.tolist()
bottom_scores = sorted_contributions['Relevant']['detractors'][:10].tolist()
plot_important_words(top_scores, top_words, bottom_scores, bottom_words, "Most important words for relevance")
###Output
_____no_output_____
###Markdown
Looks like very relevant words are picked up! This model definitely seems to make decisions in a very understandable way. Leveraging text structureOur models have been performing better, but they completely ignore the structure. To see whether capturing some more sense of structure would help, we will try a final, more complex model. CNNs for text classificationHere, we will be using a Convolutional Neural Network for sentence classification. While not as popular as RNNs, they have been proven to get competitive results (sometimes beating the best models), and are very fast to train, making them a perfect choice for this tutorial. First, let's embed our text!
###Code
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
EMBEDDING_DIM = 300
MAX_SEQUENCE_LENGTH = 35
VOCAB_SIZE = len(VOCAB)
VALIDATION_SPLIT=.2
tokenizer = Tokenizer(num_words=VOCAB_SIZE)
tokenizer.fit_on_texts(clean_questions["text"].tolist())
sequences = tokenizer.texts_to_sequences(clean_questions["text"].tolist())
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
cnn_data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(clean_questions["class_label"]))
indices = np.arange(cnn_data.shape[0])
np.random.shuffle(indices)
cnn_data = cnn_data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * cnn_data.shape[0])
embedding_weights = np.zeros((len(word_index)+1, EMBEDDING_DIM))
for word,index in word_index.items():
embedding_weights[index,:] = word2vec[word] if word in word2vec else np.random.rand(EMBEDDING_DIM)
print(embedding_weights.shape)
###Output
_____no_output_____
###Markdown
Now, we will define a simple Convolutional Neural Network
###Code
from keras.layers import Dense, Input, Flatten, Dropout, Merge
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.layers import LSTM, Bidirectional
from keras.models import Model
def ConvNet(embeddings, max_sequence_length, num_words, embedding_dim, labels_index, trainable=False, extra_conv=True):
embedding_layer = Embedding(num_words,
embedding_dim,
weights=[embeddings],
input_length=max_sequence_length,
trainable=trainable)
sequence_input = Input(shape=(max_sequence_length,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
# Yoon Kim model (https://arxiv.org/abs/1408.5882)
convs = []
filter_sizes = [3,4,5]
for filter_size in filter_sizes:
l_conv = Conv1D(filters=128, kernel_size=filter_size, activation='relu')(embedded_sequences)
l_pool = MaxPooling1D(pool_size=3)(l_conv)
convs.append(l_pool)
l_merge = Merge(mode='concat', concat_axis=1)(convs)
# add a 1D convnet with global maxpooling, instead of Yoon Kim model
conv = Conv1D(filters=128, kernel_size=3, activation='relu')(embedded_sequences)
pool = MaxPooling1D(pool_size=3)(conv)
if extra_conv==True:
x = Dropout(0.5)(l_merge)
else:
# Original Yoon Kim model
x = Dropout(0.5)(pool)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
#x = Dropout(0.5)(x)
preds = Dense(labels_index, activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
return model
###Output
_____no_output_____
###Markdown
Now let's train our Neural Network
###Code
x_train = cnn_data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = cnn_data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
model = ConvNet(embedding_weights, MAX_SEQUENCE_LENGTH, len(word_index)+1, EMBEDDING_DIM,
len(list(clean_questions["class_label"].unique())), False)
model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=3, batch_size=128)
###Output
_____no_output_____ |
String/1011/1556. Thousand Separator.ipynb | ###Markdown
说明: 给定整数n,添加点(“.”)作为千位分隔符,并以字符串格式返回。Example 1: Input: n = 987 Output: "987"Example 2: Input: n = 1234 Output: "1.234"Example 3: Input: n = 123456789 Output: "123.456.789"Example 4: Input: n = 0 Output: "0" Constraints: 1、0 <= n < 2^31
###Code
class Solution:
def thousandSeparator(self, n: int) -> str:
if n < 1000: return str(n)
s_n = str(n)[::-1]
res = ''
for i, s in enumerate(s_n):
res += s
if (i + 1) % 3 == 0 and i + 1 != len(s_n):
res += '.'
return res[::-1]
solution = Solution()
solution.thousandSeparator(1234)
###Output
_____no_output_____ |
Data input and visulization.ipynb | ###Markdown
Web howework 2019 Data Science Cohort : Ming Gao 1.Data reading and formating2.Ploting data for visulization3.Convert csv file into html format in pandas4.Export as html format file
###Code
# Dependiences setup
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
import numpy as np
from scipy.stats import linregress
import seaborn as sns
# Read data from cities csv file
weather_df=pd.read_csv("./Resources/cities.csv",index_col=0)
weather_df.head()
# plot (latitude vs. MaxTemp)
sns.set(rc={'axes.facecolor':'lightgray'})
sns.scatterplot(x=weather_df['Lat'],y=weather_df['Max Temp'],alpha=0.8, edgecolor='k',facecolor="royalblue")
plt.title("City Latitude vs. Max Temperature (02/27/2020)")
plt.ylabel("Max Temperature(F)")
plt.xlabel("Latitude")
plt.grid(True)
plt.savefig("City Latitude vs. Max Temperature.png")
plt.show()
# plot (latitude vs. Humidity)
sns.set(rc={'axes.facecolor':'lightgray'})
sns.scatterplot(x=weather_df['Lat'],y=weather_df['Humidity'],alpha=0.8, edgecolor='k',facecolor="royalblue")
plt.title("City Latitude vs. Humidity (02/27/2020)")
plt.ylabel("Humidity(%)")
plt.xlabel("Latitude")
plt.grid(True)
plt.savefig("City Latitude vs. Humidity.png")
plt.show()
# plot (latitude vs. cloudiness)
sns.set(rc={'axes.facecolor':'lightgray'})
sns.scatterplot(x=weather_df['Lat'],y=weather_df['Cloudiness'],alpha=0.8, edgecolor='k',facecolor="royalblue")
plt.title("City Latitude vs. Cloudiness (02/27/2020)")
plt.ylabel("Cloudiness(%)")
plt.xlabel("Latitude")
plt.grid(True)
plt.savefig("City Latitude vs. Cloudiness.png")
plt.show()
# plot (latitude vs. Wind Speed)
sns.set(rc={'axes.facecolor':'lightgray'})
sns.scatterplot(x=weather_df['Lat'],y=weather_df['Wind Speed'],alpha=0.8, edgecolor='k',facecolor="royalblue")
plt.title("City Latitude vs. Wind Speed (02/27/2020)")
plt.ylabel("Wind Speed(mph)")
plt.xlabel("Latitude")
plt.grid(True)
plt.savefig("City Latitude vs. Wind Speed.png")
plt.show()
#Covert csv file into .html file, then write it out
weather_df.to_html("Date_table.html")
data_html_file = weather_df.to_html()
###Output
_____no_output_____ |
glm_walkthrough.ipynb | ###Markdown
Example fMRI subject-level GLM model fitting================================================Full step-by-step example of fitting a subject-level GLM to experimental data and visualizingthe results. We first do this on one runs of one subject of an fMRI dataset for motor functions. These data were downloaded in BIDS format from https://openneuro.org/datasets/ds000114/versions/1.0.1 and were preprocessed using fmriprep.For details on the data, please see:Gorgolewski K J, Storkey A, Bastin M, Whittle I R, Wardlaw J M, Pernet C R. A test-retest fMRI dataset for motor, language and spatial attention functions. Gigascience. 2013: 2:6.https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3641991/More specifically:1. A sequence of preprocessed fMRI volumes are loaded2. A design matrix describing all the effects related to the data is computed3. A GLM is applied to the run of the dataset, contrasts are computed, and results are visualized4. A example for loop is shown to loop this analysis across runs and across subjectsTechnically, this example shows how to handle two sessions thatcontain the same experimental conditions. Task Description This dataset was acquired to validate an fMRI task used in pre-surgical planning. Different motor tasks activate different areas of the brain and surgons need to have reliable information about where these areas are located before conducting neurosurgical procedures. The task consisted of finger tapping, foot twitching and lip pursing blocks that were interleaved with fixation. Single Subject GLM First, we use PyBIDS to import the preprocessed BIDS data
###Code
# Import preprocessed BIDS dataset
from bids.layout import BIDSLayout
from os import path
data_dir = '/scratch/cis-training/'
layout = BIDSLayout(path.join(data_dir, 'ds000114/'),
derivatives='/scratch/cis-training/ds000114/derivatives/')
events_file = layout.get(task='fingerfootlips', suffix='events')[0]
events_file = events_file.path
###Output
_____no_output_____
###Markdown
We query the preprocessed BIDS dataset and grab the nifti files for each run from one subject:
###Code
func_files = layout.get(
sub='01', datatype='func', task='fingerfootlips',
space='MNI152NLin2009cAsym', desc='preproc',
extension='nii.gz')
print(func_files)
###Output
[<BIDSImageFile filename='/scratch/cis-training/ds000114/derivatives/fmriprep/sub-01/ses-retest/func/sub-01_ses-retest_task-fingerfootlips_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'>, <BIDSImageFile filename='/scratch/cis-training/ds000114/derivatives/fmriprep/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'>]
###Markdown
Create a mean image of the run for plotting purpose
###Code
%matplotlib inline
from nilearn.image import mean_img
mean_img_ = mean_img(func_files[0].path)
from nilearn import plotting
plotting.plot_img(mean_img_)
###Output
_____no_output_____
###Markdown
The timing file describing each conditon is:
###Code
import pandas as pd
events_df = pd.read_csv(events_file, sep='\t')
events_df.head()
###Output
_____no_output_____
###Markdown
We want to include nuisance regressors in our design matrix, so we search for potential confound files that were created as part of fmriprep's preprocessing.
###Code
# Find the confounds file
temp_entities = func_files[0].get_entities()
temp_entities['suffix'] = 'regressors'
temp_entities['extension'] = 'tsv'
temp_entities['desc'] = 'confounds'
temp_entities.pop('space')
confounds_file = layout.get(**temp_entities)[0].path
confounds = pd.read_csv(confounds_file, sep='\t')
###Output
_____no_output_____
###Markdown
The possible confounds we have to choose from are:
###Code
for c in confounds.columns:
print(c)
###Output
csf
csf_derivative1
csf_power2
csf_derivative1_power2
white_matter
white_matter_derivative1
white_matter_power2
white_matter_derivative1_power2
global_signal
global_signal_derivative1
global_signal_derivative1_power2
global_signal_power2
std_dvars
dvars
framewise_displacement
t_comp_cor_00
t_comp_cor_01
t_comp_cor_02
t_comp_cor_03
t_comp_cor_04
t_comp_cor_05
a_comp_cor_00
a_comp_cor_01
a_comp_cor_02
a_comp_cor_03
a_comp_cor_04
a_comp_cor_05
a_comp_cor_06
a_comp_cor_07
a_comp_cor_08
a_comp_cor_09
a_comp_cor_10
a_comp_cor_11
a_comp_cor_12
a_comp_cor_13
a_comp_cor_14
a_comp_cor_15
a_comp_cor_16
a_comp_cor_17
a_comp_cor_18
a_comp_cor_19
a_comp_cor_20
a_comp_cor_21
a_comp_cor_22
a_comp_cor_23
a_comp_cor_24
a_comp_cor_25
a_comp_cor_26
a_comp_cor_27
a_comp_cor_28
a_comp_cor_29
a_comp_cor_30
a_comp_cor_31
a_comp_cor_32
a_comp_cor_33
a_comp_cor_34
a_comp_cor_35
a_comp_cor_36
a_comp_cor_37
a_comp_cor_38
a_comp_cor_39
a_comp_cor_40
a_comp_cor_41
a_comp_cor_42
a_comp_cor_43
a_comp_cor_44
a_comp_cor_45
a_comp_cor_46
a_comp_cor_47
a_comp_cor_48
a_comp_cor_49
a_comp_cor_50
a_comp_cor_51
a_comp_cor_52
a_comp_cor_53
a_comp_cor_54
a_comp_cor_55
a_comp_cor_56
a_comp_cor_57
a_comp_cor_58
a_comp_cor_59
a_comp_cor_60
cosine00
cosine01
cosine02
cosine03
cosine04
cosine05
non_steady_state_outlier00
trans_x
trans_x_derivative1
trans_x_derivative1_power2
trans_x_power2
trans_y
trans_y_derivative1
trans_y_power2
trans_y_derivative1_power2
trans_z
trans_z_derivative1
trans_z_power2
trans_z_derivative1_power2
rot_x
rot_x_derivative1
rot_x_derivative1_power2
rot_x_power2
rot_y
rot_y_derivative1
rot_y_power2
rot_y_derivative1_power2
rot_z
rot_z_derivative1
rot_z_derivative1_power2
rot_z_power2
motion_outlier00
motion_outlier01
motion_outlier02
motion_outlier03
motion_outlier04
motion_outlier05
motion_outlier06
motion_outlier07
motion_outlier08
motion_outlier09
###Markdown
We'ge going to regress out six motion parameters (3 translation and 3 rotation) as well as any volumes that were flagged by DVARS. Here we create these confounds for the model.
###Code
cols = ['trans_x', 'trans_y', 'trans_z',
'rot_x', 'rot_y', 'rot_z', 'dvars']
func_file = func_files[0]
temp_entities = func_file.get_entities()
temp_entities['suffix'] = 'regressors'
temp_entities['extension'] = 'tsv'
temp_entities['desc'] = 'confounds'
temp_entities.pop('space')
confounds_file = layout.get(**temp_entities)[0].path
confounds = pd.read_csv(confounds_file, sep='\t')
confounds_for_model = confounds[cols]
confounds_for_model = confounds_for_model.fillna(0)
###Output
_____no_output_____
###Markdown
Define the subject-level model.
###Code
from nistats.first_level_model import FirstLevelModel
t_r = layout.get_metadata(func_files[0].path)['RepetitionTime']
model = FirstLevelModel(t_r=t_r, period_cut=128,
subject_label=func_files[0].entities['subject'],
smoothing_fwhm=5., drift_model = 'polynomial',
noise_model='ar1', minimize_memory=True)
###Output
_____no_output_____
###Markdown
Run the model
###Code
model.fit(func_file.path, events_df, confounds_for_model)
###Output
/home/data/cis/training-week-2019/env/lib/python3.6/site-packages/nilearn/_utils/cache_mixin.py:232: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.
Use os.path.join(memory.location, 'joblib') attribute instead.
if (memory.cachedir is None and memory_level is not None
/home/data/cis/training-week-2019/env/lib/python3.6/site-packages/nilearn/_utils/cache_mixin.py:232: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.
Use os.path.join(memory.location, 'joblib') attribute instead.
if (memory.cachedir is None and memory_level is not None
<string>:6: DeprecationWarning: object of type <class 'numpy.float64'> cannot be safely interpreted as an integer.
<string>:6: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.
/home/data/cis/training-week-2019/env/lib/python3.6/site-packages/nilearn/_utils/cache_mixin.py:232: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.
Use os.path.join(memory.location, 'joblib') attribute instead.
if (memory.cachedir is None and memory_level is not None
###Markdown
Visualize the design matrix
###Code
%matplotlib inline
from nistats.reporting import plot_design_matrix
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(16, 10))
plot_design_matrix(model.design_matrices_[0], ax=ax)
ax.set_title('Design Matrix')
fig.show()
###Output
_____no_output_____
###Markdown
Build and plot the contrasts
###Code
import numpy as np
def pad_vector(contrast_, n_columns):
return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))
n_columns = model.design_matrices_[0].shape[1]
contrasts = {
'Finger_minus_All': pad_vector([1, -0.5, -0.5], n_columns),
'Foot_minus_All': pad_vector([-0.5, 1, -0.5], n_columns)
}
from nistats.reporting import plot_contrast_matrix
plt.figure(figsize=(7, 7))
for i, (key, values) in enumerate(contrasts.items()):
ax = plt.subplot(5, 1, i + 1)
plot_contrast_matrix(values, design_matrix=model.design_matrices_[0], ax=ax)
plt.show()
###Output
/home/data/cis/training-week-2019/env/lib/python3.6/site-packages/numpy/matrixlib/defmatrix.py:71: PendingDeprecationWarning: the matrix subclass is not the recommended way to represent matrices or deal with linear algebra (see https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). Please adjust your code to use regular ndarray.
return matrix(data, dtype=dtype, copy=False)
###Markdown
Compute the contrasts and plot up the within-run, subject-level results
###Code
sub = '01'
ses = layout.get_sessions(subjects=sub)[0]
print(sub)
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
print('\t\tContrast {} out of {}: {}'.format(index+1,
len(contrasts),
contrast_id))
pe_map = model.compute_contrast(contrast_val,
output_type='effect_size')
pe_image_file = 'sub-{}_ses-{}_{}_pe_map.nii.gz'.format(sub,
ses,
contrast_id)
pe_map.to_filename(pe_image_file)
z_map = model.compute_contrast(contrast_val, output_type='z_score')
z_image_file = 'sub-{}_ses-{}_{}_z_map.nii.gz'.format(sub,
ses,
contrast_id)
z_map.to_filename(z_image_file)
plotting.plot_stat_map(
z_map, bg_img=mean_img_, threshold=3.0,
title='%s, first session' % contrast_id)
###Output
01
Contrast 1 out of 2: Finger_minus_All
###Markdown
The above cells ran the analysis for a sigle rub for a single subject. The following code accomplishes what above cells did but also loops through mulitple runs and multiple participants so that first-level analyses are performed for all subjects in the study.
###Code
# Loop through participants
cols = ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z', 'dvars']
events_file = layout.get(task='fingerfootlips', suffix='events')[0]
events_df = pd.read_csv(events_file, sep='\t')
for sub in layout.get_subjects()[:1]:
print('Subject: {}'.format(sub))
func_files = []
confounds = []
for ses in layout.get_sessions(subjects=sub):
print('\tSession {}'.format(ses))
func_file = layout.get(
sub=sub, ses=ses, datatype='func', task='fingerfootlips',
space='MNI152NLin2009cAsym', desc='preproc',
extension='nii.gz')[0]
func_files.append(func_file.path)
# Search for confounds file
temp_entities = func_file.get_entities()
temp_entities['suffix'] = 'regressors'
temp_entities['extension'] = 'tsv'
temp_entities['desc'] = 'confounds'
temp_entities.pop('space')
confounds_file = layout.get(**temp_entities)[0].path
confounds_df = pd.read_csv(confounds_file, sep='\t')
confounds_for_model = confounds_df[cols]
confounds_for_model = confounds_for_model.fillna(0)
confounds.append(confounds_for_model)
# Build model that runs across sessions
t_r = layout.get_metadata(func_file.path)['RepetitionTime']
model = FirstLevelModel(t_r=t_r, subject_label=temp_entities['subject'], smoothing_fwhm=5.)
model.fit(func_file.path, events_df, confounds_for_model)
# Build contrasts
n_columns = model.design_matrices_[0].shape[1]
contrasts = {
'Finger_minus_All': pad_vector([1, -0.5, -0.5], n_columns),
'Foot_minus_All': pad_vector([-0.5, 1, -0.5], n_columns)
}
# Compute contrasts and plot results
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
print('\t\tContrast {} out of {}: {}'.format(index+1, len(contrasts), contrast_id))
pe_map = model.compute_contrast(contrast_val, output_type='effect_size')
pe_image_file = 'sub-{}_ses-{}_{}_pe_map.nii.gz'.format(sub, ses, contrast_id)
pe_map.to_filename(pe_image_file)
z_map = model.compute_contrast(contrast_val, output_type='z_score')
z_image_file = 'sub-{}_ses-{}_{}_z_map.nii.gz'.format(sub, ses, contrast_id)
z_map.to_filename(z_image_file)
plotting.plot_stat_map(z_image_file, bg_img=mean_img_, threshold=3.0,
title='sub {0}, {1}, {2}'.format(sub,ses,contrast_id))
plt.show()
###Output
Subject: 01
Session retest
###Markdown
We can also average across both runs:
###Code
from nilearn import image
files = ['sub-01_ses-test_Finger_minus_All_pe_map.nii.gz',
'sub-01_ses-retest_Finger_minus_All_pe_map.nii.gz']
level2_pe_map = image.mean_img(files)
level2_pe_map.to_filename('sub-{}_Finger_minus_All_pe_map.nii.gz'.format('01'))
plotting.plot_stat_map(level2_pe_map, bg_img=mean_img_,
threshold=3.0, title='sub {0}, {1}, {2}'.format(sub,ses,contrast_id))
###Output
_____no_output_____ |
Day 3/DCGAN.ipynb | ###Markdown
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import glob
import imageio
import os
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras import layers
import time
from IPython import display
import PIL
from tensorflow.keras.datasets import mnist
(train_images, train_labels), (_,_) = mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5
batch_size = 256
buffer_size = 60000
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(buffer_size).batch(batch_size)
def generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
return model
generator = generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
plt.imshow(generated_image[0, :, :, 0], cmap='gray')
def discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5,5), strides=(2,2), padding='same', input_shape=[28,28,1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.1))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.1))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
discriminator = discriminator_model()
decision = discriminator(generated_image)
print (decision)
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])
@tf.function
def train_step(images):
noise = tf.random.normal([batch_size, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
def generate_and_save_images(model, epoch, test_input):
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
train(train_dataset, EPOCHS)
PIL.Image.open('image_at_epoch_{:04d}.png'.format(EPOCHS))
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
anim_file = 'output.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
import IPython
if IPython.version_info > (6,2,0,''):
display.Image(filename=anim_file)
###Output
_____no_output_____ |
4_Matplotlib/03_ContourPlot.ipynb | ###Markdown
Contour Plots
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
def f(x, y):
return x**2 + y**2
x = np.arange(-5, 5.0, 0.25)
y = np.arange(-5, 5.0, 0.25)
print(x[:10])
print(y[:10])
###Output
[-5. -4.75 -4.5 -4.25 -4. -3.75 -3.5 -3.25 -3. -2.75]
[-5. -4.75 -4.5 -4.25 -4. -3.75 -3.5 -3.25 -3. -2.75]
###Markdown
Meshgrid```pythonnp.meshgrid( *xi, copy=True, sparse=False, indexing='xy')```Return coordinate matrices from coordinate vectors.Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,…, xn.
###Code
X, Y = np.meshgrid(x, y)
print(X)
print(Y)
plt.scatter(X, Y, s=10);
Z = f(X, Y)
print(Z)
plt.contour(X, Y, Z, colors='black');
###Output
_____no_output_____
###Markdown
Colorbars'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r', 'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', 'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', 'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r', 'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', 'PuBuGn_r', 'PuBu_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r', 'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', 'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r', 'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r', 'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Wistia', 'Wistia_r', 'YlGn', 'YlGnBu', 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r', 'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', 'cividis', 'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r', 'cubehelix', 'cubehelix_r', 'flag', 'flag_r', 'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'inferno', 'inferno_r', 'jet', 'jet_r', 'magma', 'magma_r', 'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r', 'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r', 'rainbow', 'rainbow_r', 'seismic', 'seismic_r', 'spring', 'spring_r', 'summer', 'summer_r', 'tab10', 'tab10_r', 'tab20', 'tab20_r', 'tab20b', 'tab20b_r', 'tab20c', 'tab20c_r', 'terrain', 'terrain_r', 'turbo', 'turbo_r', 'twilight', 'twilight_r', 'twilight_shifted', 'twilight_shifted_r', 'viridis', 'viridis_r', 'winter', 'winter_r'
###Code
plt.contourf(X, Y, Z, 20, cmap='RdGy')
plt.colorbar();
plt.contourf(X, Y, Z, 20, cmap='cool')
plt.colorbar();
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
###Output
_____no_output_____ |
300_cheat_sheet/Git_bucket.ipynb | ###Markdown
git pushができなくなった時https://cpoint-lab.co.jp/article/201804/windows%E7%89%88git%E3%81%A7%E8%AA%8D%E8%A8%BC%E6%83%85%E5%A0%B1%E3%82%92%E6%B6%88%E3%81%99%E6%96%B9%E6%B3%95/ - コルトナに”資格情報”と入力- “Windows資格情報”をクリック- 正しい情報に修正する Synologyで立ち上げる downloadwget https://github.com/gitbucket/gitbucket/releases/download/4.29.0/gitbucket.war 立ち上げ java -jar gitbucket.war 下記にアクセス http://192.168.0.112:8080/
###Code
nohup java -jar gitbucket.war
###Output
_____no_output_____ |
notebooks/04_resource_phase_eda.ipynb | ###Markdown
Let's get into resource Phases! First restrict on closed items
###Code
isclosed = data[data["to_phase"]=="End"]["work_item"].unique()
closed = data[data["work_item"].isin(isclosed)]
closed[closed["from_resource"]=="0"]["from_phase"].value_counts()
closed[closed["from_phase"]=="Start"]["from_resource"].value_counts(dropna=False)
###Output
_____no_output_____
###Markdown
Everery Resource start from NaN.
###Code
print("There are %i different resource states and %i different resource phases" %(closed["to_resource"].nunique(), closed["resource_phase"].nunique()))
###Output
There are 270 different resource states and 2371 different resource phases
###Markdown
Count the different paths a resource can go
###Code
procfreq = count_processes(closed, "resource_phase")
processes = dict_to_df(procfreq).T
processes.sort_values(by="freq", ascending=False, inplace=True)
processes.reset_index(drop=True, inplace=True)
save_path = Path("../plots/")
g1 = plot_process(process=processes["process"][0].values(), name="Resource_top_Frequency", path=save_path)
g2 = plot_process(process=processes["process"][1].values(), name="Resource_second_Frequency", path=save_path)
g3 = plot_process(process=processes["process"][2].values(), name="Resource_third_Frequency", path=save_path)
g1
g2
g3
closed["to_resource"].value_counts().head(8)
###Output
_____no_output_____
###Markdown
We can see that Resources are going forward and back or stay in the same state. Let's see what happens with the resource phase if the process phase stays on same stage
###Code
process_hold = closed[closed["from_phase"]==closed["to_phase"]]
resource_change = process_hold[process_hold["resource_phase"].apply(lambda x: x[0] != x[1])]
resource_hold = process_hold[process_hold["resource_phase"].apply(lambda x: x[0] == x[1])]
print("In %i cases the resource changes, if the process holds." %len(resource_change))
print("In %i cases the resource holds, if the process holds." %len(resource_hold))
###Output
In 171 cases the resource changes, if the process holds.
In 0 cases the resource holds, if the process holds.
###Markdown
This is a useful information. If the process flow stops, the resource is changed. If the resource is a person this could have multiple reasons. The things we should to get know more about are:1. What is the correlation between the process flow and the resource flow, which resource corresponds to which process phase2. Which resource needs most time3. Why is a resource changing First we calculate times and wrangle the data
###Code
relevant_columns = ["work_item", "process_phase", "resource_phase", "timestamp"]
times = time_for_phase(data, relevant_columns=[*relevant_columns, "to_resource"], process=True, end_date=None)
duration_type = ["duration_in_days", "duration_in_hours", "duration_in_minutes"]
new_names = {
"to_phase": "current_phase",
"process_phase_x": "from_process",
"process_phase_y": "to_process",
"to_resource_x": "current_resource",
"resource_phase_x": "from_resource",
"resource_phase_y": "to_resource"
}
times.rename(columns=new_names, inplace=True)
relevant_cols = ["work_item", "process_index", *list(new_names.values()), "duration_in_days"]
times= times[relevant_cols]
times.head()
is_open = pd.isnull(times["duration_in_days"])
times_open = times[is_open]
times_closed = times[~is_open]
categories = ["Analyze", "Design", "Build", "Test", "Package", "Accept", "Deploy", "Clarify"]
times_closed.loc[:, "current_phase"] = pd.Categorical(times_closed["current_phase"], categories=categories)
###Output
_____no_output_____
###Markdown
Now we take a look on the correltation between the current phase and the current resource based on the most frequent 20 resources regarding to the most influencing process phases (Analyze, Design) and take a look at the first question1. What is the correlation between the process flow and the resource flow, which resource corresponds to which process phase
###Code
cross = pd.crosstab(times_closed["current_phase"], times_closed["current_resource"])
cross = cross.reindex(categories).T
top20 = cross.nlargest(20, columns=["Analyze", "Design"])
top20.T.plot(kind='bar', figsize=(15,8))
plt.title("Most frequent resource phases regarding to the process phases")
###Output
_____no_output_____
###Markdown
We can see that resources ER_00061 and ER_00206 going along with the phase Analyze and Deploy, while ER_00239 and ER_00225 going along with Design, Build, Test, Package and Accept. Let's look at the time2. Which resource needs most time
###Code
resource_freq = times_closed.groupby("current_resource").size()
resource_sum = pd.DataFrame(times_closed.groupby("current_resource")[duration_type[0]].sum() / resource_freq)
resource_sum.rename(columns={0: "duration_in_days"}, inplace=True)
toptime = resource_sum.nlargest(20, columns="duration_in_days")
toptime.plot(kind='barh',figsize=(15,8), title="Current resource regarding to normalized duration")
plt.xlabel("duration_in_days / frequency")
###Output
_____no_output_____
###Markdown
Store into json for usage in models
###Code
# filepath = Path("../data/top20_time_resources.json")
# with open(filepath, 'w') as f:
# json.dump(list(toptime.index), f)
###Output
_____no_output_____
###Markdown
We can see that the resources ER_00169, ER_00097 and ER_00002 need the most time to get things done. We should compare with the resources from 1.
###Code
print("The following resources have a high impact based on the process phases and the time: \n{}".format(set(toptime.index.values) & set(top20.index.values)))
###Output
The following resources have a high impact based on the process phases and the time:
set()
###Markdown
Sadly the intersection is 0. But we should check the rank of the resources that are related to the process phases
###Code
ranks = resource_sum.reset_index().sort_values(by="duration_in_days", ascending=False).reset_index(drop=True)
print("The both resources that have highest relation to the phases Analyze and Deploy:")
print("The rank of resource of ER_00061 is %i"%ranks[ranks["current_resource"]=="ER_00061"].index[0])
print("The rank of resource of ER_00206 is %i"%ranks[ranks["current_resource"]=="ER_00206"].index[0])
print("The both resources that have highest relation to the phases Design, Build, Test, Package and Accept:")
print("The rank of resource of ER_00239 is %i"%ranks[ranks["current_resource"]=="ER_00239"].index[0])
print("The rank of resource of ER_00225 is %i"%ranks[ranks["current_resource"]=="ER_00225"].index[0])
print("\nWhile the highest rank is %i"%len(ranks))
###Output
The both resources that have highest relation to the phases Analyze and Deploy:
The rank of resource of ER_00061 is 73
The rank of resource of ER_00206 is 172
The both resources that have highest relation to the phases Design, Build, Test, Package and Accept:
The rank of resource of ER_00239 is 144
The rank of resource of ER_00225 is 123
While the highest rank is 273
|
2018-day08.ipynb | ###Markdown
Advent of Code 2018 Day 8: Memory Maneuverhttps://adventofcode.com/2018/day/8The tree is made up of nodes; a single, outermost node forms the tree's root, and it contains all other nodes in the tree (or contains nodes that contain nodes, and so on).Specifically, a node consists of:* A header, which is always exactly two numbers: - The quantity of child nodes. - The quantity of metadata entries.* Zero or more child nodes (as specified in the header).* One or more metadata entries (as specified in the header).Each child node is itself a node that has its own header, child nodes, and metadata. For example:```2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2A---------------------------------- B----------- C----------- D-----```
###Code
(require racket)
###Output
_____no_output_____
###Markdown
We're faced with a tree made up of numbers, expressed as a depth-first traversal. Seems like good Racket terrain. First step is to load up the data.
###Code
(define data (map string->number (string-split (first (file->lines "data/input2018-08.data")))))
(take data 10)
###Output
_____no_output_____
###Markdown
Looks like we get what we wanted -- a list of numbers.The tree is a node, which has a (potentially empty) set of child-nodes, and a "metadata" weight. We can represent this as a Racket `struct`
###Code
(struct node (children meta) #:transparent)
###Output
_____no_output_____
###Markdown
We need to consume the input list of numbers one at a time -- let's use a generator for this
###Code
(require racket/generator)
(define next-item
(generator () (for ([x (in-list data)]) (yield x))))
###Output
_____no_output_____
###Markdown
Now we can drill down the data given, recursively constructing nodes.
###Code
(define (read-node)
(cond [(equal? (generator-state next-item) 'done) '()]
[else
(define-values (child-count meta-count) (values (next-item) (next-item)))
(node
(for/list ([n (in-range child-count)]) (read-node))
(for/list ([n (in-range meta-count)]) (next-item)))]))
(define tree (read-node))
###Output
_____no_output_____
###Markdown
Part1: What is the sum of all metadata entries?For this we traverse the tree and sum up the meta part of the nodes. We could have done that directly in the `read-node` function if we wanted, but let's keep it separate for clarity.
###Code
(require math) ; sum
(define (metadata-sum tree)
;; The meta data sum is the sum of the current node's meta data list
;; plus the sum of its childrens' meta data sums.
(cond [(null? tree) 0]
[else
(+ (sum (node-meta tree))
(for/sum ([ch (in-list (node-children tree))])
(metadata-sum ch)))]))
(require rackunit)
(check-eq? (metadata-sum tree) 44838)
###Output
_____no_output_____
###Markdown
Bingo. Part2: What's the root node value?The value of a node depends on whether it has child nodes.If a node has no child nodes, its value is the sum of its metadata entries.However, if a node does have child nodes, the metadata entries become indexes which refer to those child nodes. A metadata entry of 1 refers to the first child node, 2 to the second, 3 to the third, and so on. The value of this node is the sum of the values of the child nodes referenced by the metadata entries. If a referenced child node does not exist, that reference is skipped. A child node can be referenced multiple time and counts each time it is referenced. A metadata entry of 0 does not refer to any child node.We'll need two functions. Firstly, we need a way to get the node value, and then we need a function to pick out a child node's value by its index.
###Code
(define (node-value tree)
(cond [(null? tree) 0]
[(null? (node-children tree)) (sum (node-meta tree))] ; node has no children: sum-of-meta
[else
(for/sum ([i (in-list (node-meta tree))]) ; sum of indexed child nodes' values
(value@index tree i))]))
(define (value@index cur idx)
(cond [(zero? idx) 0]
[(>= (sub1 idx) (length (node-children cur))) 0]
[else
(node-value (list-ref (node-children cur) (sub1 idx)))]))
(check-eq? (part2 tree) 22198)
###Output
_____no_output_____ |
ML/DAT8-master/notebooks/05_pandas_visualization.ipynb | ###Markdown
Visualization with Pandas (and Matplotlib)
###Code
import pandas as pd
import matplotlib.pyplot as plt
# display plots in the notebook
%matplotlib inline
# increase default figure and font sizes for easier viewing
plt.rcParams['figure.figsize'] = (8, 6)
plt.rcParams['font.size'] = 14
# read in the drinks data
drink_cols = ['country', 'beer', 'spirit', 'wine', 'liters', 'continent']
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv'
drinks = pd.read_csv(url, header=0, names=drink_cols, na_filter=False)
###Output
_____no_output_____
###Markdown
Histogram: show the distribution of a numerical variable
###Code
# sort the beer column and mentally split it into 3 groups
drinks.beer.order().values
# compare with histogram
drinks.beer.plot(kind='hist', bins=3)
# try more bins
drinks.beer.plot(kind='hist', bins=20)
# add title and labels
drinks.beer.plot(kind='hist', bins=20, title='Histogram of Beer Servings')
plt.xlabel('Beer Servings')
plt.ylabel('Frequency')
# compare with density plot (smooth version of a histogram)
drinks.beer.plot(kind='density', xlim=(0, 500))
###Output
_____no_output_____
###Markdown
Scatter Plot: show the relationship between two numerical variables
###Code
# select the beer and wine columns and sort by beer
drinks[['beer', 'wine']].sort('beer').values
# compare with scatter plot
drinks.plot(kind='scatter', x='beer', y='wine')
# add transparency
drinks.plot(kind='scatter', x='beer', y='wine', alpha=0.3)
# vary point color by spirit servings
drinks.plot(kind='scatter', x='beer', y='wine', c='spirit', colormap='Blues')
# scatter matrix of three numerical columns
pd.scatter_matrix(drinks[['beer', 'spirit', 'wine']])
# increase figure size
pd.scatter_matrix(drinks[['beer', 'spirit', 'wine']], figsize=(10, 8))
###Output
_____no_output_____
###Markdown
Bar Plot: show a numerical comparison across different categories
###Code
# count the number of countries in each continent
drinks.continent.value_counts()
# compare with bar plot
drinks.continent.value_counts().plot(kind='bar')
# calculate the mean alcohol amounts for each continent
drinks.groupby('continent').mean()
# side-by-side bar plots
drinks.groupby('continent').mean().plot(kind='bar')
# drop the liters column
drinks.groupby('continent').mean().drop('liters', axis=1).plot(kind='bar')
# stacked bar plots
drinks.groupby('continent').mean().drop('liters', axis=1).plot(kind='bar', stacked=True)
###Output
_____no_output_____
###Markdown
Box Plot: show quartiles (and outliers) for one or more numerical variables**Five-number summary:**- min = minimum value- 25% = first quartile (Q1) = median of the lower half of the data- 50% = second quartile (Q2) = median of the data- 75% = third quartile (Q3) = median of the upper half of the data- max = maximum value(More useful than mean and standard deviation for describing skewed distributions)**Interquartile Range (IQR)** = Q3 - Q1**Outliers:**- below Q1 - 1.5 * IQR- above Q3 + 1.5 * IQR
###Code
# sort the spirit column
drinks.spirit.order().values
# show "five-number summary" for spirit
drinks.spirit.describe()
# compare with box plot
drinks.spirit.plot(kind='box')
# include multiple variables
drinks.drop('liters', axis=1).plot(kind='box')
###Output
_____no_output_____
###Markdown
Line Plot: show the trend of a numerical variable over time
###Code
# read in the ufo data
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/ufo.csv'
ufo = pd.read_csv(url)
ufo['Time'] = pd.to_datetime(ufo.Time)
ufo['Year'] = ufo.Time.dt.year
# count the number of ufo reports each year (and sort by year)
ufo.Year.value_counts().sort_index()
# compare with line plot
ufo.Year.value_counts().sort_index().plot()
# don't use a line plot when there is no logical ordering
drinks.continent.value_counts().plot()
###Output
_____no_output_____
###Markdown
Grouped Box Plots: show one box plot for each group
###Code
# reminder: box plot of beer servings
drinks.beer.plot(kind='box')
# box plot of beer servings grouped by continent
drinks.boxplot(column='beer', by='continent')
# box plot of all numeric columns grouped by continent
drinks.boxplot(by='continent')
###Output
_____no_output_____
###Markdown
Grouped Histograms: show one histogram for each group
###Code
# reminder: histogram of beer servings
drinks.beer.plot(kind='hist')
# histogram of beer servings grouped by continent
drinks.hist(column='beer', by='continent')
# share the x axes
drinks.hist(column='beer', by='continent', sharex=True)
# share the x and y axes
drinks.hist(column='beer', by='continent', sharex=True, sharey=True)
# change the layout
drinks.hist(column='beer', by='continent', sharex=True, layout=(2, 3))
###Output
_____no_output_____
###Markdown
Assorted Functionality
###Code
# saving a plot to a file
drinks.beer.plot(kind='hist', bins=20, title='Histogram of Beer Servings')
plt.xlabel('Beer Servings')
plt.ylabel('Frequency')
plt.savefig('beer_histogram.png')
# list available plot styles
plt.style.available
# change to a different style
plt.style.use('ggplot')
###Output
_____no_output_____ |
privacy_analytics/attrition-analysis.ipynb | ###Markdown
Transformation LogicStep1: Find features that can lead to better prediction - f_subset: subset of features used for task prediction Step2: pdistcompute on dataframe(f_subset) to find unique ones that can be used to distinguish users @ADVERSARY: semi-honest adversary who uses all insider knowledge to learn aout user private information; @ADVERSARY: One who is knowledgable about data preparation Objective 1: Protect identified sensitive attributes (Age,Distance) so @ADVERSARY cannot de-identify individual These attributes are ones that can be used by adversary to identify individuals using age, gender, location (PUBLIC). Using DE-IDENTIFICATION, PRIVATE information such as monthly income, monthly rate, daily rate, percent salary hike, performance rating etc...Protect deidentification using PUBLIC attributes which will protect PRIVATE attributes Objective 2: Protect sensitive hidden inferences from published data - a case where same data can be used to make multiple classes - using attrition data to predict suicide
###Code
from sklearn.model_selection import train_test_split
#Step 1 using a classifier to predict attrition from input data
feat = ['Age', 'BusinessTravel', 'DailyRate', 'Department',
'DistanceFromHome', 'Education', 'EducationField', 'EmployeeCount',
'EmployeeNumber', 'EnvironmentSatisfaction', 'Gender', 'HourlyRate',
'JobInvolvement', 'JobLevel', 'JobRole', 'JobSatisfaction',
'MaritalStatus', 'MonthlyIncome', 'MonthlyRate', 'NumCompaniesWorked',
'Over18', 'OverTime', 'PercentSalaryHike', 'PerformanceRating',
'RelationshipSatisfaction', 'StandardHours', 'StockOptionLevel',
'TotalWorkingYears', 'TrainingTimesLastYear', 'WorkLifeBalance',
'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion',
'YearsWithCurrManager']
label = ['Attrition']
X = attrition[feat]
y = attrition[label]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # 80% training and 30% test
PRIVACY_FLAG = 1
if PRIVACY_FLAG == 1:
for ele in private_attr:
feat.remove(ele)
X_train = X_train[feat]
X_test = X_test[feat]
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
#Import Random Forest Model
from sklearn.ensemble import RandomForestClassifier
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=100)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
plt.barh(feat,clf.feature_importances_)
plt.yticks(fontsize=7)
plt.tight_layout()
###Output
Accuracy: 0.9013605442176871
|
fhdataapi/IMF/read_me.ipynb | ###Markdown
IMFThe `IMF` class allows you to grab macroeconomic and financial data from the [IMF Database](https://www.imf.org/en/Data). This is essentially a wrapper around their data API.--- ExampleTo fetch data with this class you need to know the IMF dataset that has your series and the values of the query parameters. The class is built in a way that helps you find the desired series and fill out the query parameters. The workflow below gives an example.
###Code
from fldataapi import IMF
import matplotlib.pyplot as plt
imf = IMF()
###Output
_____no_output_____
###Markdown
First let us look at all the available datasets using the **`dataflow`** method.
###Code
imf.dataflow()
###Output
_____no_output_____
###Markdown
As you can see, there are a lot of datasets. So it is recommended that you look for the dataset you need on the IMF's website and then looking for dataset code here.As an example, let us use the ***Direction of Trade* (DOT)** dataset. Once you have your dataset, we got to check what are the query parameters for that table and their dimensions. To do this we use the **`data_structure`** method. The output `dim_code` is a list containing the query parameter names and `dim_codedict` is a dictionary where the keys are the query parameters and the values are dataframes with the available dimensions for that parameter.
###Code
dim_code, dim_codedict = imf.data_structure('DOT')
print('These are the query parameters')
print(dim_code)
for code in dim_codedict.keys():
print('These are the possible values for the', code, 'query parameter')
display(dim_codedict[code])
###Output
These are the possible values for the CL_FREQ query parameter
###Markdown
Finnally, now that we have all the elements of the query, we can grab our series. Lets see how much did brazil export to the US on a yearly frequency.
###Code
query_filter = {'CL_FREQ': 'A',
'CL_AREA_DOT': 'BR',
'CL_INDICATOR_DOT': 'TXG_FOB_USD',
'CL_COUNTERPART_AREA_DOT': 'US'}
df = imf.compact_data('DOT', query_filter, 'BR exports to US')
df.plot(figsize=(13, 8))
###Output
_____no_output_____ |
10 sparsity_and_l1_regularization.ipynb | ###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
稀疏性和 L1 正则化 **学习目标:** * 计算模型大小 * 通过应用 L1 正则化来增加稀疏性,以减小模型大小 降低复杂性的一种方法是使用正则化函数,它会使权重正好为零。对于线性模型(例如线性回归),权重为零就相当于完全没有使用相应特征。除了可避免过拟合之外,生成的模型还会更加有效。L1 正则化是一种增加稀疏性的好方法。 设置运行以下单元格,以加载数据并创建特征定义。
###Code
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://download.mlcc.google.cn/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
def preprocess_features(california_housing_dataframe):
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = pd.DataFrame()
# Create a boolean categorical feature representing whether the
# median_house_value is above a set threshold.
output_targets["median_house_value_is_high"] = (
california_housing_dataframe["median_house_value"] > 265000).astype(float)
return output_targets
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def get_quantile_based_buckets(feature_values, num_buckets):
quantiles = feature_values.quantile(
[(i+1.)/(num_buckets + 1.) for i in range(num_buckets)])
return [quantiles[q] for q in quantiles.keys()]
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
bucketized_households = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("households"),
boundaries=get_quantile_based_buckets(training_examples["households"], 10))
bucketized_longitude = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("longitude"),
boundaries=get_quantile_based_buckets(training_examples["longitude"], 50))
bucketized_latitude = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("latitude"),
boundaries=get_quantile_based_buckets(training_examples["latitude"], 50))
bucketized_housing_median_age = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("housing_median_age"),
boundaries=get_quantile_based_buckets(
training_examples["housing_median_age"], 10))
bucketized_total_rooms = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("total_rooms"),
boundaries=get_quantile_based_buckets(training_examples["total_rooms"], 10))
bucketized_total_bedrooms = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("total_bedrooms"),
boundaries=get_quantile_based_buckets(training_examples["total_bedrooms"], 10))
bucketized_population = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("population"),
boundaries=get_quantile_based_buckets(training_examples["population"], 10))
bucketized_median_income = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("median_income"),
boundaries=get_quantile_based_buckets(training_examples["median_income"], 10))
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("rooms_per_person"),
boundaries=get_quantile_based_buckets(
training_examples["rooms_per_person"], 10))
long_x_lat = tf.feature_column.crossed_column(
set([bucketized_longitude, bucketized_latitude]), hash_bucket_size=1000)
feature_columns = set([
long_x_lat,
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_total_rooms,
bucketized_total_bedrooms,
bucketized_population,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person])
return feature_columns
###Output
_____no_output_____
###Markdown
计算模型大小要计算模型大小,只需计算非零参数的数量即可。为此,我们在下面提供了一个辅助函数。该函数深入使用了 Estimator API,如果不了解它的工作原理,也不用担心。
###Code
def model_size(estimator):
variables = estimator.get_variable_names()
size = 0
for variable in variables:
if not any(x in variable
for x in ['global_step',
'centered_bias_weight',
'bias_weight',
'Ftrl']
):
size += np.count_nonzero(estimator.get_variable_value(variable))
return size
###Output
_____no_output_____
###Markdown
减小模型大小您的团队需要针对 *SmartRing* 构建一个准确度高的逻辑回归模型,这种指环非常智能,可以感应城市街区的人口统计特征(`median_income`、`avg_rooms`、`households` 等等),并告诉您指定城市街区的住房成本是否高昂。由于 SmartRing 很小,因此工程团队已确定它只能处理**参数数量不超过 600 个**的模型。另一方面,产品管理团队也已确定,除非所保留测试集的**对数损失函数低于 0.35**,否则该模型不能发布。您可以使用秘密武器“L1 正则化”调整模型,使其同时满足大小和准确率限制条件吗? 任务 1:查找合适的正则化系数。**查找可同时满足以下两种限制条件的 L1 正则化强度参数:模型的参数数量不超过 600 个且验证集的对数损失函数低于 0.35。**以下代码可帮助您快速开始。您可以通过多种方法向您的模型应用正则化。在此练习中,我们选择使用 `FtrlOptimizer` 来应用正则化。`FtrlOptimizer` 是一种设计成使用 L1 正则化比标准梯度下降法得到更好结果的方法。重申一次,我们会使用整个数据集来训练该模型,因此预计其运行速度会比通常要慢。
###Code
def train_linear_classifier_model(
learning_rate,
regularization_strength,
steps,
batch_size,
feature_columns,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
regularization_strength: A `float` that indicates the strength of the L1
regularization. A value of `0.0` means no regularization.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
feature_columns: A `set` specifying the input feature columns to use.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearClassifier` object trained on the training data.
"""
periods = 7
steps_per_period = steps / periods
# Create a linear classifier object.
my_optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate, l1_regularization_strength=regularization_strength)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
optimizer=my_optimizer
)
# Create input functions.
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss (on validation data):")
training_log_losses = []
validation_log_losses = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_probabilities = linear_classifier.predict(input_fn=predict_training_input_fn)
training_probabilities = np.array([item['probabilities'] for item in training_probabilities])
validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn)
validation_probabilities = np.array([item['probabilities'] for item in validation_probabilities])
# Compute training and validation loss.
training_log_loss = metrics.log_loss(training_targets, training_probabilities)
validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_log_losses.append(training_log_loss)
validation_log_losses.append(validation_log_loss)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.tight_layout()
plt.plot(training_log_losses, label="training")
plt.plot(validation_log_losses, label="validation")
plt.legend()
return linear_classifier
linear_classifier = train_linear_classifier_model(
learning_rate=0.1,
# TWEAK THE REGULARIZATION VALUE BELOW
regularization_strength=0.0,
steps=300,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
print("Model size:", model_size(linear_classifier))
###Output
_____no_output_____
###Markdown
解决方案点击下方即可查看可能的解决方案。 正则化强度为 0.1 应该就足够了。请注意,有一个需要做出折中选择的地方:正则化越强,我们获得的模型就越小,但会影响分类损失。
###Code
linear_classifier = train_linear_classifier_model(
learning_rate=0.1,
regularization_strength=0.1,
steps=300,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
print("Model size:", model_size(linear_classifier))
###Output
_____no_output_____ |
notebooks/Demo_BMP_API.ipynb | ###Markdown
Best Management Practice (BMP) Application Programming Interface (API) Academy of Natural Sciences of Drexel University, Drexel University College of Computing and InformaticsThe Drexel University College of Computing and Informatics (CCI) and the Academy of Natural Sciences (ANS) of Drexel University have developed Application Programming Interfaces (APIs) which incorporate novel algorithms to apply efficient solutions to complex environmental queries. The APIs are built in Python using a GeoDjango Web framework, Nginx, Docker, PostgreSQL, and Swagger.The Best Management Practice (BMP) API returnes BMP specific nutrient and sediment reduction estimates for a user-supplied area of interest. The API supports BMPs within the Delaware and Chesapeake Bay basins. In the Delaware, the API uses the Generalized Watershed Loading Funtion Enhanced (GWLF-E) model outputs at the HUC12 scale. In the Chesapeake, the API relies on the Chesapeake Bay Model scenarios within the Chesapeake Assessment Scenario Tool (CAST), also at the HUC12 scale. The API calculates the watershed boundary and land cover distribution for the BMP area of interest, then re-allocates the loads using BMP specific modeling approaches and efficiencies. BMP efficiencies used vary spatially depending on the watershed. This API is being extended to incorporate local factors that affect pollutant transport and BMP efficiency, such as ecoregion or soils.
###Code
from ipyleaflet import Map, basemaps, Polygon, DrawControl, WidgetControl, GeoJSON, MeasureControl, basemap_to_tiles, SplitMapControl
import ipywidgets as widgets
from ipywidgets import IntSlider, ColorPicker, jslink, Dropdown, interact, FloatSlider
import requests
import geojson
import json
import shapely
from shapely.geometry import mapping, MultiPolygon, Polygon, LineString, Point
import matplotlib.pyplot as plt
import collections
from math import sqrt
import numpy as np
from shapely.ops import unary_union
# Copy in the lookup table from the BMP API
bmp_lookups = {"": [],
"Agricultural Animal": [
#"",
"Animal Waste Management Systems",
"Waste Storage Facility",
"Waste Storage Pond",
"Waste Storage Structure"
],
"Stream Restoration": [
#"",
"Watering Facility",
"Fence",
"Non-Urban Stream Restoration",
"Stream Channel Stabilization",
"Streambank and Shoreline Protection",
"Urban Stream Restoration"
],
"Land Use Change": [
#"",
"Tree and Shrub Establishment",
"Tree Planting",
"Barnyard Runoff Controls",
"Conservation Easement",
"Heavy Use Area Protection",
"Roof Runoff Management",
"Roof Runoff Structure",
"Roofs and Covers"
],
"Agricultural Land": [
#"",
"Conservation Tillage",
"Reduced Tillage",
"High Residue Tillage Management",
"Soil Conservation and Water Quality Plans",
"Comprehensive Nutrient Management Plan",
"Nutrient Management",
"Conservation Cover",
"Cover Crop",
"Grazing Land Protection",
"Prescribed Grazing"
],
"Urban Stormwater Management": [
#"",
"Constructed Wetland",
"Dry Extended Detention Ponds",
"Wet Pond",
"Wet Ponds & Wetlands",
"Bioretention",
"Bioretention/raingardens - A/B soils no underdrain",
"Bioretention/raingardens - A/B soils no underdrain",
"Bioretention/raingardens - C/D soils underdrain",
"Bioretention/raingardens - C/D soils no underdrain",
"Bioswale",
"Dry Well/Seepage Pit",
"Impervious Surface Reduction",
"Infiltration Practices w/o Sand Veg. - A/B soils no underdrain",
"Permeable Pavement w/o Sand Veg. - A/B soils no underdrain",
"Stormwater Performance Standard-Runoff Reduction",
"Urban Infiltration Practices",
"Stormwater Performance Standard-Stormwater Treatment"
],
"Polygon Drainage": [
#"",
"Forest Buffer",
"Forest Buffer - Narrow",
"Riparian Herbaceous Cover",
"Grass Buffers",
"Grass Buffer - Narrow",
"Grassed Waterway",
"Wetland Creation - Floodplain",
"Wetland Restoration",
"Wetland Restoration - Floodplain"
],
"Exclusion Buffer": [
#"",
"Forest Buffer-Streamside with Exclusion Fencing",
"Forest Buffer-Narrow with Exclusion Fencing",
"Grass Buffer-Streamside with Exclusion Fencing",
"Grass Buffer-Narrow with Exclusion Fencing",
]
}
messages_arch = {"": '',
"Agricultural Animal": 'Draw the point where the farm is located and enter the number of animals treated by the BMP.',
"Stream Restoration": 'Draw the line representing the BMP and (optionally) enter the length of stream treated.',
"Land Use Change": 'Draw the point or polygon representing the BMP and enter the acres treated (optional if a polygon is drawn).',
"Agricultural Land": 'Draw the point or polygon representing the BMP and enter the acres treated (optional if a polygon is drawn).',
"Urban Stormwater Management": 'Draw the polygon for the BMP or the drainage area to the BMP and provide the drainage area (acres, optional), percent impervious (%, optional) and runoff capture (inches, optional).',
"Polygon Drainage": 'Draw the polygon footprint for your BMP.',
"Exclusion Buffer": 'Draw the polygon footprint for your BMP and enter the length of fencing.'
}
# Sort this dictionary for better viewing
bmp_lookups_sort = {}
for arch in sorted(bmp_lookups):
bmp_lookups_sort[arch] = bmp_lookups[arch]
temp = []
for bmp in sorted(bmp_lookups[arch]):
temp.append(bmp)
#print("%s: %s" % (key, bmp_lookups[key]))
bmp_lookups_sort[arch] = temp
bmp_lookups = dict(bmp_lookups_sort)
del(bmp_lookups_sort)
# bmp_lookups.keys()
# Set up all of the different fields to populate
acres_treated = 0.0
percent_impervious = 0.0
runoff_capture = 0.5
stream_feet_treated = 0.0
number_of_units = 1.0
animal_treated = {
"chickens_broilers": 0.0,
"chickens_layers": 0.0,
"cows_beef": 0.0,
"cows_dairy": 0.0,
"horses": 0.0,
"pigs_hogs_swine": 0.0,
"sheep": 0.0,
"turkeys": 0.0
}
acres_treated_text = widgets.BoundedFloatText(
value=acres_treated,
min=0.00,
max=10000.0,
placeholder='Enter Acres Treated:',
description='Acres',
disabled=False
)
drainage_acres_treated_text = widgets.BoundedFloatText(
value=acres_treated,
min=0.00,
max=10000.0,
placeholder='Enter Drainage Area Treated (Ac.):',
description='DA (Ac.)',
disabled=False
)
percent_impervious_text = widgets.BoundedFloatText(
value=percent_impervious,
placeholder='Enter Percent Impervious:',
min=0.00,
max=100.0,
description='% Impervious',
disabled=False
)
runoff_capture_text = widgets.BoundedFloatText(
value=runoff_capture,
min=0.05,
max=2.5,
step=0.1,
placeholder='Enter Runoff Capture (inches):',
description='Capture (in.)',
disabled=False
)
runoff_capture_slider = FloatSlider(min=0.05, max=2.5, step=0.05, value=1)
stream_feet_treated_text = widgets.BoundedFloatText(
value=stream_feet_treated,
min=0.00,
max=10000.0,
placeholder='Enter Stream Feet Treated:',
description='',
disabled=False
)
number_of_units_text = widgets.BoundedFloatText(
value=number_of_units,
min=0.00,
max=10.0,
placeholder='Enter Number of Units:',
description='',
disabled=False
)
# FOR ANIMALS
chickens_broilers_text = widgets.BoundedFloatText(
value=0,
min=0.00,
max=1000.0,
description='Broilers',
disabled=False
)
chickens_layers_text = widgets.BoundedFloatText(
value = None,
min=0.00,
max=1000.0,
description='Layers',
disabled=False
)
cows_beef_text = widgets.BoundedFloatText(
value=0,
min=0.00,
max=1000.0,
description='Beef Cows',
disabled=False
)
cows_dairy_text = widgets.BoundedFloatText(
value=0,
min=0.00,
max=1000.0,
description='Dairy Cows',
disabled=False
)
horses_text = widgets.BoundedFloatText(
value=0,
min=0.00,
max=1000.0,
description='Horses',
disabled=False
)
pigs_hogs_swine_text = widgets.BoundedFloatText(
value=0,
min=0.00,
max=1000.0,
description='Hogs/Swine',
disabled=False
)
sheep_text = widgets.BoundedFloatText(
value=0,
min=0.00,
max=1000.0,
description='Sheep',
disabled=False
)
turkeys_text = widgets.BoundedFloatText(
value=0,
min=0.00,
max=1000.0,
description='Turkeys',
disabled=False
)
# Put in a DropDown Widget to Select a BMP and enter the necessary information
bmps_selected = []
arches_selected = []
def select_bmp(Archetype):
arches_selected.append(Archetype)
bmpW.options = bmp_lookups[Archetype]
def print_bmp(BMP):
if BMP == None:
pass
else:
print('Selected BMP: {}\n'.format(BMP))
print(messages_arch[str(scW.value)])
bmps_selected.append(BMP)
if str(scW.value) == 'Agricultural Animal':
display(chickens_broilers_text)
display(chickens_layers_text)
display(cows_beef_text)
display(cows_dairy_text)
display(horses_text)
display(pigs_hogs_swine_text)
display(sheep_text)
display(turkeys_text)
elif str(scW.value) == 'Agricultural Land':
display(acres_treated_text)
elif str(scW.value) == 'Exclusion Buffer':
display(stream_feet_treated_text)
elif str(scW.value) == 'Land Use Change':
display(acres_treated_text)
elif str(scW.value) == 'Polygon Drainage':
pass
elif str(scW.value) == 'Stream Restoration':
display(stream_feet_treated_text)
elif str(scW.value) == 'Urban Stormwater Management':
display(drainage_acres_treated_text)
display(percent_impervious_text)
display(runoff_capture_text)
scW = widgets.Dropdown(options=bmp_lookups.keys())
init = scW.value
bmpW = widgets.Dropdown(options=bmp_lookups[init])
disp_bmp = widgets.interactive(print_bmp, BMP=bmpW)
disp_arch = widgets.interactive(select_bmp, Archetype=scW)
display(disp_arch)
display(disp_bmp)
# Create the map
# Map centred on Wissahickon Park
displaymap1 = Map(center = (40.050, -75.215), zoom = 12, min_zoom = 1, max_zoom = 20, scroll_wheel_zoom = True,
basemap = basemaps.Esri.WorldStreetMap)
# Enable user to draw polygons and lines
draw_control = DrawControl()
draw_control.polyline = {
"shapeOptions": {
"color": "#6bc2e5",
"weight": 4,
"opacity": 1.0
}
}
draw_control.polygon = {
"shapeOptions": {
"fillColor": "#6be5c3",
"color": "#6be5c3",
"fillOpacity": 0.3
},
"drawError": {
"color": "#dd253b",
"message": "Oups!"
},
"allowIntersection": True,
}
# Save the drawn geometry to a geojson
feature_collection = {
'type': 'FeatureCollection',
'features': []
}
def handle_draw(self, action, geo_json):
# Save the GeoJSON when it's drawn on the map
feature_collection['features'].append(geo_json)
# Add in a zoom slider bar for fun
zoom_slider1 = IntSlider(description='Zoom level:', min=0, max=20, value=12)
jslink((zoom_slider1, 'value'), (displaymap1, 'zoom'))
widget_control1 = WidgetControl(widget=zoom_slider1, position='topright')
# Add in ability to do measurements
measure = MeasureControl(
position='bottomleft',
active_color = 'red',
primary_length_unit = 'feet'
)
displaymap1.add_control(measure)
measure.completed_color = 'red'
measure.add_length_unit('miles', 0.000621371, 1)
measure.secondary_length_unit = 'miles'
measure.add_area_unit('sqkm', 0.0000001, 1)
measure.secondary_area_unit = 'sqkm'
# Add everything into the map
draw_control.on_draw(handle_draw)
displaymap1.add_control(draw_control)
left_layer = basemap_to_tiles(basemaps.Esri.WorldImagery)
right_layer = basemap_to_tiles(basemaps.Esri.WorldStreetMap)
#split_control = SplitMapControl(left_layer=left_layer, right_layer=right_layer)
#displaymap1.add_control(split_control)
# Show the map
displaymap1
# Show the BMP
model = 'gwlfe'
bmp = bmps_selected[len(bmps_selected)-1]
archetype = arches_selected[len(arches_selected)-1]
print('{}: {}'.format(archetype, bmp))
# Show the last drawn shape
last_geom_idx = len(feature_collection['features']) - 1
last_geom = feature_collection['features'][last_geom_idx]['geometry']
print(last_geom)
# Update the paramters with the user supplied input
acres_treated = drainage_acres_treated_text.value
percent_impervious = percent_impervious_text.value
runoff_capture = runoff_capture_text.value
stream_feet_treated = stream_feet_treated_text.value
number_of_units = number_of_units_text.value
animal_treated = {
"chickens_broilers": chickens_broilers_text.value,
"chickens_layers": chickens_layers_text.value,
"cows_beef": cows_beef_text.value,
"cows_dairy": cows_dairy_text.value,
"horses": horses_text.value,
"pigs_hogs_swine": pigs_hogs_swine_text.value,
"sheep": sheep_text.value,
"turkeys": turkeys_text.value
}
###Output
Polygon Drainage: Forest Buffer
{'type': 'Polygon', 'coordinates': [[[-76.039178, 40.049125], [-76.038749, 40.049737], [-76.03795, 40.050129], [-76.037019, 40.050321], [-76.036745, 40.049645], [-76.037711, 40.04939], [-76.038236, 40.04918], [-76.038617, 40.048915], [-76.039178, 40.049125]]]}
###Markdown
Call BMP API
###Code
# Call the Watersheds API with the drawn shape
_x = dict(eval(str(last_geom)))
_payload = {"bmp_geometry": _x,
"bmp_type": bmp,
"bmp_group": archetype,
"animal_treated": animal_treated,
"acres_treated": acres_treated,
"stream_feet_treated": stream_feet_treated,
"percent_impervious": percent_impervious,
"runoff_capture": runoff_capture,
"number_of_units": number_of_units,
"model": model
}
_payload = json.dumps(_payload)
_url = 'https://watersheds.cci.drexel.edu/api/bmp/'
_headers = {'Content-Type': 'application/json'}
_r_bmp = requests.post(_url, data = _payload , headers= _headers, allow_redirects=True, verify=True)
print('{')
for k, v in eval(_payload).items():
print('\t{}: {}'.format(k, v))
print('}')
# print(_r_bmp.text)
parsed = json.loads(_r_bmp.text)
print(json.dumps(parsed, indent=2, sort_keys=False))
# Cache what we want to use later in the visualization
# Save the watershed as a geojson and a shapely geometry object
return_dict = eval(str(json.loads(_r_bmp.text)))
watershed_shapely = shapely.geometry.shape(return_dict['watershed_geometry'])
if watershed_shapely.type == 'Polygon':
watershed_shapely = MultiPolygon([watershed_shapely])
watershed_geojson = geojson.loads(geojson.dumps(mapping(watershed_shapely)))
#watershed_shapely = unary_union(watershed_shapely)
#watershed_geojson = geojson.loads(geojson.dumps(mapping(watershed_shapely)))
# Cache land cover and final recutions
lulcs = return_dict['land_cover_acres']
lulcs_bmp = return_dict['land_cover_acres']
reductions = return_dict['reduction_lbyr']
p_impervious = return_dict['percent_impervious']
lu_total = 0
lu_total_bmp = 0
for lulc, acre in lulcs_bmp.items():
lu_total_bmp += acre
#print(lu_total_bmp)
#print('Input area ammended land cover acres: {}\n'.format(lulcs_bmp))
# In case we get a zero area, double check by re-sending the watershed boundary
# Reset area for providing warnings with urban stormwater management BMPs
try:
if lu_total_bmp >= 0:
lulcs = {}
lu_total = 0
n = 1
# Call the Fast Zonal API with the Watershed Boundary
for geom in watershed_shapely:
print(n, end='')
m = MultiPolygon([geom])
new_watershed_geojson = geojson.loads(geojson.dumps(mapping(m)))
_x = json.dumps(new_watershed_geojson)
_url = 'https://watersheds.cci.drexel.edu/api/fzs/'
_headers = {}
_r = requests.post(_url, data = _x , headers= _headers, allow_redirects=True, verify=True)
lulcs_loop = eval(_r.text)
for lulc, sqm in lulcs_loop.items():
if lulc in lulcs.keys():
_v = sqm / 4046.86
lulcs[lulc] += _v
else:
lulcs[lulc] = sqm / 4046.86
n += 1
for lulc, acre in lulcs.items():
lu_total += acre
#print('\nRaw land cover acres: {}'.format(lulcs))
#print(lu_total)
except Exception as e:
print(e)
###Output
12
###Markdown
Call OSI API
###Code
return_dict['watershed_geometry']
temp = dict({ "type": "MultiPolygon", "coordinates": [ [ [ [ -74.765000308583041, 41.165270858418161 ], [ -74.764334823710072, 41.164560835257987 ], [ -74.764339, 41.16456 ], [ -74.764464, 41.164514 ], [ -74.764618, 41.164457 ], [ -74.764686, 41.164428 ], [ -74.764743, 41.16441 ], [ -74.764858, 41.164383 ], [ -74.764933, 41.164358 ], [ -74.765013, 41.164316 ], [ -74.765302, 41.164165 ], [ -74.76565, 41.163954 ], [ -74.765693, 41.163927 ], [ -74.765866, 41.163825 ], [ -74.766076, 41.163691 ], [ -74.766108, 41.163669 ], [ -74.766473, 41.163412 ], [ -74.766583, 41.163335 ], [ -74.767029, 41.163021 ], [ -74.7671, 41.162971 ], [ -74.767133, 41.162943 ], [ -74.767192, 41.162883 ], [ -74.767241, 41.162856 ], [ -74.767278, 41.162849 ], [ -74.767301, 41.162849 ], [ -74.767323, 41.162858 ], [ -74.767381, 41.162893 ], [ -74.767395, 41.162901 ], [ -74.76741, 41.162911 ], [ -74.767415, 41.162927 ], [ -74.767412, 41.162948 ], [ -74.767339, 41.163179 ], [ -74.767338, 41.163205 ], [ -74.767345, 41.163229 ], [ -74.767373, 41.163261 ], [ -74.767455, 41.163338 ], [ -74.767496, 41.163388 ], [ -74.767518, 41.163417 ], [ -74.767538, 41.163465 ], [ -74.767546, 41.163498 ], [ -74.767549, 41.163541 ], [ -74.76757, 41.163671 ], [ -74.767583, 41.163721 ], [ -74.767609, 41.163759 ], [ -74.767579, 41.163852 ], [ -74.767593, 41.163891 ], [ -74.767677, 41.163957 ], [ -74.767698, 41.164006 ], [ -74.767705, 41.164108 ], [ -74.767777, 41.16415 ], [ -74.767788, 41.164157 ], [ -74.767755, 41.164171 ], [ -74.767567, 41.164222 ], [ -74.767537, 41.164241 ], [ -74.767503, 41.164266 ], [ -74.767467, 41.164302 ], [ -74.767413, 41.164333 ], [ -74.767359, 41.164354 ], [ -74.766062, 41.164732 ], [ -74.765975, 41.164768 ], [ -74.765896, 41.164814 ], [ -74.765824, 41.164859 ], [ -74.765752, 41.164896 ], [ -74.765674, 41.164936 ], [ -74.765000308583041, 41.165270858418161 ] ] ], [ [ [ -74.764258, 41.163513 ], [ -74.764262, 41.16351 ], [ -74.764743, 41.163237 ], [ -74.765621, 41.162742 ], [ -74.766306, 41.162364 ], [ -74.767051, 41.161958 ], [ -74.767202, 41.161883 ], [ -74.767545, 41.162154 ], [ -74.766789, 41.162602 ], [ -74.765475, 41.163396 ], [ -74.764543, 41.16378 ], [ -74.764258, 41.163513 ] ] ], [ [ [ -74.769347, 41.162123 ], [ -74.769394, 41.162069 ], [ -74.769485, 41.161901 ], [ -74.769515, 41.161857 ], [ -74.769507, 41.161819 ], [ -74.769387, 41.161784 ], [ -74.768913, 41.161777 ], [ -74.768695, 41.161803 ], [ -74.768271, 41.161911 ], [ -74.768043, 41.161995 ], [ -74.767962, 41.162014 ], [ -74.767914, 41.162007 ], [ -74.76789, 41.161991 ], [ -74.767881, 41.161958 ], [ -74.767858, 41.161876 ], [ -74.768135, 41.161637 ], [ -74.768412, 41.161474 ], [ -74.768307, 41.161271 ], [ -74.768284203831527, 41.161284166283458 ], [ -74.768311, 41.161268 ], [ -74.768458, 41.161163 ], [ -74.768615, 41.161042 ], [ -74.768747, 41.160919 ], [ -74.768846, 41.16081 ], [ -74.768945, 41.160691 ], [ -74.76905, 41.160547 ], [ -74.769134, 41.160408 ], [ -74.769213, 41.160237 ], [ -74.76931, 41.160032 ], [ -74.769394, 41.159848 ], [ -74.769588, 41.159426 ], [ -74.769894, 41.158774 ], [ -74.770053, 41.158424 ], [ -74.770182, 41.158169 ], [ -74.770586, 41.158284 ], [ -74.770957, 41.157592 ], [ -74.771548, 41.157773 ], [ -74.772174, 41.157017 ], [ -74.776342, 41.158905 ], [ -74.776288, 41.159001 ], [ -74.77594, 41.158985 ], [ -74.775809, 41.159052 ], [ -74.775341, 41.159292 ], [ -74.774598, 41.159671 ], [ -74.773707, 41.160127 ], [ -74.772907, 41.16073 ], [ -74.771895, 41.16007 ], [ -74.771207, 41.160703 ], [ -74.770845, 41.161314 ], [ -74.770646, 41.16165 ], [ -74.770858, 41.162266 ], [ -74.771061, 41.162855 ], [ -74.771157, 41.163137 ], [ -74.771254, 41.163417 ], [ -74.771391, 41.163815 ], [ -74.770645, 41.164433 ], [ -74.770523, 41.164534 ], [ -74.770398, 41.164638 ], [ -74.770151, 41.164842 ], [ -74.769905, 41.165046 ], [ -74.769781, 41.165149 ], [ -74.769661, 41.165252 ], [ -74.769023, 41.165247 ], [ -74.768178, 41.165833 ], [ -74.768325, 41.165971 ], [ -74.766838, 41.166675 ], [ -74.766678520290696, 41.166601426136729 ], [ -74.766701, 41.166596 ], [ -74.766771, 41.166588 ], [ -74.766794, 41.166588 ], [ -74.766844, 41.166601 ], [ -74.766899, 41.166633 ], [ -74.768209, 41.165906 ], [ -74.768088, 41.165821 ], [ -74.768032, 41.165908 ], [ -74.767936, 41.165937 ], [ -74.767854, 41.165933 ], [ -74.767707, 41.165968 ], [ -74.767644, 41.165988 ], [ -74.767589, 41.166004 ], [ -74.767546, 41.166015 ], [ -74.767378, 41.166127 ], [ -74.767222, 41.166193 ], [ -74.767104, 41.16623 ], [ -74.767002, 41.166241 ], [ -74.766976, 41.166223 ], [ -74.766994, 41.166198 ], [ -74.767146, 41.166152 ], [ -74.767291, 41.166087 ], [ -74.767509, 41.165977 ], [ -74.767553, 41.16591 ], [ -74.767629, 41.165856 ], [ -74.767856, 41.165716 ], [ -74.76786, 41.165717 ], [ -74.767878, 41.165725 ], [ -74.767891, 41.165734 ], [ -74.767921, 41.165748 ], [ -74.767939, 41.165747 ], [ -74.768107, 41.165635 ], [ -74.768099, 41.165625 ], [ -74.768099, 41.165623 ], [ -74.768092, 41.165614 ], [ -74.768087, 41.165609 ], [ -74.768022, 41.165547 ], [ -74.76801, 41.165533 ], [ -74.76801, 41.165519 ], [ -74.768015, 41.165512 ], [ -74.768059, 41.165487 ], [ -74.768114, 41.165455 ], [ -74.768139, 41.165446 ], [ -74.768166, 41.165436 ], [ -74.768197, 41.165421 ], [ -74.768219, 41.165406 ], [ -74.768231, 41.165381 ], [ -74.768253, 41.165358 ], [ -74.768271, 41.165344 ], [ -74.768301, 41.165332 ], [ -74.768354, 41.165314 ], [ -74.768388, 41.165302 ], [ -74.768422, 41.165289 ], [ -74.768456, 41.165271 ], [ -74.768466, 41.165264 ], [ -74.768442, 41.165173 ], [ -74.768456, 41.165101 ], [ -74.768525, 41.165019 ], [ -74.768663, 41.164935 ], [ -74.768749, 41.164906 ], [ -74.768771, 41.164949 ], [ -74.768794, 41.164993 ], [ -74.768833, 41.16502 ], [ -74.768883, 41.165011 ], [ -74.769003, 41.164999 ], [ -74.769005, 41.164996 ], [ -74.769005, 41.164986 ], [ -74.769, 41.164979 ], [ -74.768978, 41.164977 ], [ -74.768976, 41.164975 ], [ -74.768955, 41.164975 ], [ -74.768935, 41.164971 ], [ -74.768922, 41.164871 ], [ -74.768923, 41.164853 ], [ -74.768934, 41.164825 ], [ -74.768958, 41.164813 ], [ -74.76904, 41.164784 ], [ -74.769194, 41.164686 ], [ -74.769362, 41.164596 ], [ -74.769406, 41.164476 ], [ -74.7695, 41.164228 ], [ -74.769759, 41.163939 ], [ -74.770006, 41.163661 ], [ -74.770311, 41.163365 ], [ -74.770353, 41.16335 ], [ -74.7704, 41.163362 ], [ -74.770423, 41.163397 ], [ -74.770414, 41.163475 ], [ -74.770343, 41.164008 ], [ -74.77031, 41.164217 ], [ -74.77033, 41.164232 ], [ -74.770353, 41.164232 ], [ -74.770388, 41.164211 ], [ -74.770421, 41.164156 ], [ -74.770518, 41.164107 ], [ -74.770588, 41.164045 ], [ -74.770588, 41.163979 ], [ -74.770579, 41.163849 ], [ -74.77096, 41.162974 ], [ -74.770987, 41.162869 ], [ -74.77099, 41.162722 ], [ -74.770966, 41.162632 ], [ -74.770935, 41.162585 ], [ -74.770881, 41.162538 ], [ -74.770815, 41.1625 ], [ -74.770764, 41.162489 ], [ -74.770706, 41.16248 ], [ -74.770652, 41.16246 ], [ -74.770609, 41.162433 ], [ -74.770562, 41.162387 ], [ -74.770527, 41.162334 ], [ -74.77048, 41.162261 ], [ -74.770403, 41.162182 ], [ -74.770348, 41.162156 ], [ -74.770332, 41.162153 ], [ -74.770271, 41.162144 ], [ -74.77008, 41.162139 ], [ -74.769947, 41.162104 ], [ -74.769802, 41.162036 ], [ -74.769699, 41.161966 ], [ -74.769611, 41.161909 ], [ -74.769566, 41.161862 ], [ -74.769482, 41.162023 ], [ -74.769431, 41.16207 ], [ -74.769347, 41.162123 ] ] ], [ [ [ -74.766341916752978, 41.166446138528237 ], [ -74.765312, 41.165971 ], [ -74.765442, 41.165892 ], [ -74.765468, 41.165902 ], [ -74.765473, 41.165947 ], [ -74.765599, 41.166014 ], [ -74.765689, 41.166006 ], [ -74.765881, 41.16612 ], [ -74.765927, 41.166186 ], [ -74.766226, 41.166343 ], [ -74.766341916752978, 41.166446138528237 ] ] ], [ [ [ -74.769347, 41.162123 ], [ -74.769333, 41.162206 ], [ -74.769265, 41.162266 ], [ -74.769332, 41.162313 ], [ -74.769404, 41.162315 ], [ -74.769492, 41.162314 ], [ -74.769562, 41.162343 ], [ -74.769631, 41.162381 ], [ -74.769727, 41.162404 ], [ -74.769942, 41.162458 ], [ -74.76997, 41.162465 ], [ -74.770604, 41.162654 ], [ -74.770629, 41.162689 ], [ -74.770629, 41.162735 ], [ -74.770604, 41.162777 ], [ -74.770461, 41.162829 ], [ -74.76992, 41.163023 ], [ -74.769843, 41.163081 ], [ -74.769731, 41.16324 ], [ -74.769436, 41.163708 ], [ -74.768599, 41.164499 ], [ -74.768484, 41.164594 ], [ -74.768351, 41.164743 ], [ -74.768277, 41.164869 ], [ -74.768202, 41.164949 ], [ -74.768064, 41.165057 ], [ -74.767958, 41.165131 ], [ -74.767848, 41.165185 ], [ -74.767781, 41.165233 ], [ -74.767722, 41.165284 ], [ -74.767675, 41.165284 ], [ -74.767627, 41.165278 ], [ -74.76761, 41.165264 ], [ -74.767501, 41.165156 ], [ -74.767407, 41.165107 ], [ -74.7673, 41.165012 ], [ -74.767119, 41.165098 ], [ -74.767017, 41.165303 ], [ -74.766804, 41.165417 ], [ -74.767112, 41.165606 ], [ -74.76709, 41.165692 ], [ -74.766958, 41.165873 ], [ -74.766498, 41.165957 ], [ -74.766031, 41.165855 ], [ -74.765804, 41.165706 ], [ -74.766039, 41.165564 ], [ -74.766309, 41.165442 ], [ -74.766571, 41.165217 ], [ -74.766768, 41.165166 ], [ -74.766887, 41.165045 ], [ -74.767128, 41.164915 ], [ -74.76737, 41.164768 ], [ -74.767586, 41.164715 ], [ -74.767818, 41.164524 ], [ -74.768014, 41.164363 ], [ -74.768148, 41.164273 ], [ -74.768199, 41.164257 ], [ -74.76826, 41.16414 ], [ -74.768298, 41.16399 ], [ -74.76831, 41.163804 ], [ -74.768301, 41.163679 ], [ -74.768135, 41.163527 ], [ -74.768105, 41.163478 ], [ -74.768093, 41.163427 ], [ -74.768093, 41.163353 ], [ -74.768093, 41.163289 ], [ -74.768093, 41.16313 ], [ -74.768119, 41.163125 ], [ -74.768141, 41.1631 ], [ -74.768148, 41.163075 ], [ -74.768151, 41.163057 ], [ -74.768112, 41.16303 ], [ -74.768093, 41.16302 ], [ -74.768069, 41.162982 ], [ -74.768051, 41.162944 ], [ -74.768051, 41.162907 ], [ -74.768047, 41.162866 ], [ -74.768076, 41.162831 ], [ -74.768108, 41.162812 ], [ -74.768137, 41.16275 ], [ -74.768148, 41.162732 ], [ -74.768172, 41.162706 ], [ -74.768227, 41.162657 ], [ -74.768288, 41.162601 ], [ -74.768326, 41.162553 ], [ -74.768352, 41.162493 ], [ -74.768372, 41.162404 ], [ -74.76839, 41.162375 ], [ -74.768422, 41.162349 ], [ -74.768472, 41.162329 ], [ -74.768735, 41.162249 ], [ -74.76879, 41.16224 ], [ -74.768837, 41.162239 ], [ -74.768893, 41.162253 ], [ -74.769019, 41.162312 ], [ -74.769075, 41.162327 ], [ -74.769127, 41.162332 ], [ -74.769148, 41.162328 ], [ -74.769201, 41.162285 ], [ -74.769256, 41.162225 ], [ -74.769292, 41.162186 ], [ -74.769347, 41.162123 ] ] ] ] })
temp
watershed_temp = shapely.geometry.shape(temp)
watershed_temp
try:
n = 1
pwr = {}
for geom in watershed_temp:
print(n, end='')
_x = json.dumps(shapely.geometry.mapping(geom))
_url = 'https://watersheds.cci.drexel.edu/api/osigeo/'
_headers = {}
_r_osi = requests.post(_url, data = _x , headers= _headers, allow_redirects=True, verify=True)
pwr_loop = eval(_r_osi.text)
# Convert to acres / feet
for r, sqm in pwr_loop.items():
if r in pwr.keys():
if r == 'str_bank':
_v = sqm / 1609.34
else:
_v = sqm / 4046.86
pwr[r] += _v
else:
if r == 'str_bank':
pwr[r] = sqm / 1609.34
else:
pwr[r] = sqm / 4046.86
n += 1
# Get PWR percents
pwr_percent = {}
for r, acres in pwr.items():
try:
value = acres / lu_total * 100
if value >= 100:
value = 100
pwr_percent[r] = value
except:
pwr_percent[r] = 0.001
#print('\nValues: {}'.format(pwr))
#print('Percents: {}'.format(pwr_percent))
except Exception as e:
print(e)
print('test')
try:
n = 1
pwr = {}
for geom in watershed_shapely:
print(n, end='')
_x = json.dumps(shapely.geometry.mapping(geom))
_url = 'https://watersheds.cci.drexel.edu/api/osigeo/'
_headers = {}
_r_osi = requests.post(_url, data = _x , headers= _headers, allow_redirects=True, verify=True)
pwr_loop = eval(_r_osi.text)
# Convert to acres / feet
for r, sqm in pwr_loop.items():
if r in pwr.keys():
if r == 'str_bank':
_v = sqm / 1609.34
else:
_v = sqm / 4046.86
pwr[r] += _v
else:
if r == 'str_bank':
pwr[r] = sqm / 1609.34
else:
pwr[r] = sqm / 4046.86
n += 1
# Get PWR percents
pwr_percent = {}
for r, acres in pwr.items():
try:
value = acres / lu_total * 100
if value >= 100:
value = 100
pwr_percent[r] = value
except:
pwr_percent[r] = 0.001
#print('\nValues: {}'.format(pwr))
#print('Percents: {}'.format(pwr_percent))
except Exception as e:
print(e)
print('test')
###Output
12
###Markdown
Create the final visualization
###Code
# Generate the second map
# Show the polygon and its watershed on a new map
try:
zoom_level = round((-0.697 * np.log(lu_total)) + 18.003)
except:
zoom_level = 15.0
displaymap2 = Map(center = (watershed_shapely.centroid.coords[0][1], watershed_shapely.centroid.coords[0][0])
, min_zoom = 1, max_zoom = 20, scroll_wheel_zoom = True,
basemap=basemaps.Esri.WorldStreetMap)
zoom_slider2 = IntSlider(description='Zoom level:', min=0, max=20, value=zoom_level)
jslink((zoom_slider2, 'value'), (displaymap2, 'zoom'))
widget_control2 = WidgetControl(widget=zoom_slider2, position='topright')
watershed = GeoJSON(
data=watershed_geojson,
style={
'opacity': 1, 'dashArray': '2', 'fillOpacity': 0.2, 'weight': 1, 'color': 'blue'
}
)
polygon = GeoJSON(
data=last_geom,
style={
'opacity': 1, 'fillOpacity': 0.1, 'weight': 2, 'color': 'green'
}
)
measure2 = MeasureControl(
position='topright',
active_color = 'red',
primary_length_unit = 'kilometers'
)
measure2.completed_color = 'red'
measure2.add_length_unit('miles', 0.000621371, 1)
measure2.secondary_length_unit = 'miles'
measure2.add_area_unit('sqkm', 0.0000001, 1)
measure2.secondary_area_unit = 'sqkm'
displaymap2.add_layer(polygon)
displaymap2.add_layer(watershed)
# displaymap2.add_control(widget_control2)
left_layer = basemap_to_tiles(basemaps.Esri.WorldImagery)
right_layer = basemap_to_tiles(basemaps.Esri.WorldStreetMap)
split_control = SplitMapControl(left_layer=left_layer, right_layer=right_layer)
displaymap2.add_control(split_control)
displaymap2.add_control(measure2)
# displaymap2
############################
# FINAL VISUALIZATION PAGE #
############################
# Show land cover summary
print('Total Drainage Area Acres: {}'.format(round(lu_total_bmp,2)))
# for lulc, sqm in lulcs.items():
# print('{}: {}'.format(lulc, round(sqm,2)), end =", ")
print('Percent Impervious: {}%'.format(round(p_impervious, 2)))
# Show PWR summary, use the wetlands from the landcover call
pwr_labels = {'str_bank': 'Stream Bank (mi)', 'head_pwr': 'Headwaters (ac)', 'ara_pwr': 'Active River Area (ac)'
, 'wet_pwr': 'Wetlands (ac)', 'tot_pwr': 'Total Priority Water Resources (PWR) (ac)'}
wetlands_acres = 0
for lulc, acre in lulcs.items():
if lulc == '90' or lulc == '95':
wetlands_acres += acre
try:
wetlands_percent = wetlands_acres / lu_total * 100
except:
print("Total Area Returned Zero")
for r, sqm in pwr.items():
if r == 'wet_pwr':
print('{}: {}'.format(pwr_labels[r], round(wetlands_acres,2)))
else:
print('{}: {}'.format(pwr_labels[r], round(sqm,2)))
###############################
# Pie charts for the PWR call #
###############################
labels_head = 'Headwaters', ''
sizes_head = [pwr_percent['head_pwr'], 100 - pwr_percent['head_pwr']]
labels_ara = 'ARA', ''
sizes_ara = [pwr_percent['ara_pwr'], 100 - pwr_percent['ara_pwr']]
labels_wet = 'Wetlands', ''
sizes_wet = [wetlands_percent, 100 - wetlands_percent]
labels_pwr = 'Priority\nWater Resources', ''
sizes_pwr = [pwr_percent['tot_pwr'], 100 - pwr_percent['tot_pwr']]
explode = (0.1, 0.0) # only "explode" the 1st slice
pie_colors = ["lightblue", "lightgrey"]
fig1, (ax1, ax2, ax3, ax4) = plt.subplots(1,4)
fig1.set_size_inches(15,15)
ax1.pie(sizes_head, explode=explode, labels=labels_head, autopct='%1.1f%%',
shadow=True, startangle=90, colors = pie_colors, textprops={'fontsize': 12})
ax2.pie(sizes_ara, explode=explode, labels=labels_ara, autopct='%1.1f%%',
shadow=True, startangle=90, colors = pie_colors, textprops={'fontsize': 12})
ax3.pie(sizes_wet, explode=explode, labels=labels_wet, autopct='%1.1f%%',
shadow=True, startangle=90, colors = pie_colors, textprops={'fontsize': 12})
ax4.pie(sizes_pwr, explode=explode, labels=labels_pwr, autopct='%1.1f%%',
shadow=True, startangle=90, colors = pie_colors, textprops={'fontsize': 12})
plt.show()
##########################
# Create Land Cover Plot #
##########################
plot_length = int(len(lulcs))
if plot_length > 10:
plot_length = 10
fig2 = plt.figure()
fig2.set_size_inches(plot_length,5)
ax2 = fig2.add_axes([0,0,1,1])
colormap = {'11': '#5475a8','21': '#e8d1d1','22': '#e29e8c','23': '#ff0000','24': '#b50000','31': '#d2cdc0',
'41': '#86c77e','42': '#39814e','43': '#d5e7b0','52': '#dcca8f','71': '#fde9aa','81': '#fbf65c',
'82': '#ca9146','90': '#c8e6f8','95': '#64b3d5'}
crosswalk = {'11': 'Open Water',
'21': 'Developed Open Space',
'22': 'Developed Low Intensity',
'23': 'Developed Medium Intensity',
'24': 'Developed High Intensity',
'31': 'Barren Land',
'41': 'Deciduous Forest',
'42': 'Evergreen Forest',
'43': 'Mixed Forest',
'52': 'Shrub/Scrub',
'71': 'Grassland Herbaceous',
'81': 'Pasture Hay',
'82': 'Cultivated Crops',
'90': 'Woody Wetlands',
'95': 'Emergent Herbaceous Wetlands'}
colors = {x: colormap[x] for x in lulcs}
x_labels = {x: crosswalk[x] for x in lulcs}
ax2.bar(list(lulcs.keys()), list(lulcs.values()), align='center', color=colors.values())
#ax.set_xticklabels(x_labels.values())
ax2.set_ylabel('Area (acres)')
ax2.set_title('Watershed Land Cover Distribution (NLCD 2011)')
handles = [plt.Rectangle((0,0),len(lulcs),len(lulcs), color=colors[label]) for label in x_labels]
ax2.legend(handles, x_labels.values(), bbox_to_anchor=(1.02, 1), loc='upper left')
plt.show()
##########################
# Show Reductions #
##########################
volume_cf = ((p_impervious * (lu_total_bmp * 6273000)) * runoff_capture) * 0.000578704
volume_acft = volume_cf * 0.0000229569
if reductions['tss'] <= 2000.0:
print('The %s BMP treats %.1f acres (%.0f percent impervious), and is estimated to reduce:\n\tNitrogen: %.2f pounds\n'
'\tPhosphorus: %.2f pounds\n'
'\tSediment: %.2f pounds\n'
'\t*Volume (cubic-ft): %.0f\n'
'\t*Volume (acre-ft): %.2f\n' % (bmp, lu_total_bmp, p_impervious, reductions['tn'],reductions['tp'],reductions['tss'], volume_cf, volume_acft))
else:
print('The %s BMP treats %.1f acres (%.0f percent impervious), and is estimated to reduce:\n\tNitrogen: %.2f pounds\n'
'\tPhosphorus: %.2f pounds\n'
'\tSediment: %.2f tons\n'
'\t*Volume (cubic-ft): %.0f\n'
'\t*Volume (acre-ft): %.2f\n' % (bmp, lu_total_bmp, p_impervious, reductions['tn'],reductions['tp'],reductions['tss'], volume_cf, volume_acft))
print('* Note: Volume reductions are applicable only to urban stormwater management.')
if archetype == 'Urban Stormwater Management':
if acres_treated > lu_total:
print('** Warning: Number of sewershed acres treated given (%.2f) is larger than the expected overland drainage area (%.2f).'%(acres_treated, lu_total))
elif 0 < acres_treated < lu_total:
print('** Warning: Number of sewershed acres treated given (%.2f) is smaller than the expected overland drainage area (%.2f). The calculated drainage area does not consider subsurface pipe networks so this is to be expected with urban stormwater networks.'%(acres_treated, lu_total))
elif archetype == 'Polygon Drainage' or archetype == 'Exclusion Buffer':
if 'Buffer' in bmp and lu_total > 100.0:
print("** Warning: The buffer drains a large area (%.2f acres) and may contain concentrated flow paths, which would reduce estimated reduction."%(lu_total))
################
# Show the map #
################
displaymap2
###Output
Total Drainage Area Acres: 33.8
Percent Impervious: 0.92%
Stream Bank (mi): 0.0
Headwaters (ac): 0.0
Active River Area (ac): 0.0
Wetlands (ac): 0
Total Priority Water Resources (PWR) (ac): 0.0
|
Mahfuzur_Rahman_01_Probability_Basics.ipynb | ###Markdown
**Basics Of Probability** ```
P(A) = (Event Outcomes favorable to A)/Sample space
```
###Code
# probability of getting head or tail while tossing the coin
h = 1/2
t = 1/2
print(f"Probability of getting a head is {h*100} %")
print(f"Probability of getting a tail is {t*100} %")
# Probability of getting " 1 " while rolling the dice
p1 = 1/6
p2 = 1/6
p3 = 1/6
p4 = 1/6
p5 = 1/6
p6 = 1/6
print(f"Probability to get 1 while rolling a dice {p1*100} %")
# Example 3
'''
The data collected by an Advertisement agency has revealed that out of 2800
visitors, 56 visitors clicked on 1 Advertisement, 30 clicked on 2 advertisements
and 14 clicked on 3 advertisements and the remaining did not click on any
advertisement.
Calculate
a) The probability that a visitor to the website will not click on any
advertisement.
b) The probability that a visitor to the website will click on an
advertisement.
c) The probability that a visitor to the website will click on more than
one advertisement.
'''
num_visitors = 1800
people_who_clicked = 1800 - 100
total_avertisements = 56+30+14
people_who_click_more_than_one = 30+14
PE1 = round(people_who_clicked/num_visitors,4)
PE2 = round(total_avertisements/num_visitors,4)
PE3 = round(people_who_click_more_than_one/num_visitors,4)
print(f'a. The probability that a visitor to the website will not click on any advertisement is {PE1} %')
print(f'b. The probability that a visitor to the website will click on an advertisement is {PE2} %')
print(f'c. The probability that a visitor to the website will click on more than one advertisement is {PE3} %')
# Permutations code
import math
n = 8
k = 3
# determine the permutations and print result
permutations = math.pow(8,3)
print(permutations)
# example 2
n = 5
k = 3
permutations = math.factorial(n)/math.factorial(n-k)
print(permutations,"Possible Ways")
# combinantions code
n = 8
k = 3
# determine the combinantions and print
permutations = math.factorial(n)/(math.factorial(n-k)*math.factorial(k))
print(permutations)
###Output
56.0
###Markdown
Joint Probability
###Code
# example
'''
what is the probability to get NUmber 2 and in red color?
52 cars in a deck
total 4 card for #2 cards, when we filter those 4 card which is number 2 with red colour
we will have only 2 card with number 2
Probability(2 and red) = 2/52 = 1/26
'''
prob2_red = 2/52
print("Probability for getting 2 red is %1.4f percent" % prob2_red)
# Example 2 (Joint Probability)
'''
At a popular company service center, a total of 100 complaints were received.
80 customers complained about late delivery of the items and 60 complained
about poor product quality.
Calculate the probability that a customer complaint will be about both product
quality and late delivery.
Let
L = Late delivery
Q = Poor quality
n(L) = Number of cases in favour of L = 80
n(Q) = Number of cases in favour of Q = 60
N = Total Number of complaints = 100
'''
both_complaints = (80 + 60) - 100
total_complaints = 100
PE4 = round(both_complaints/total_complaints,4)
print('Probability that a customer complaint will \n\
be about both, product quality and late delivery is about %1.3f percent' % PE4)
###Output
Probability that a customer complaint will
be about both, product quality and late delivery is about 0.400 percent
###Markdown
Independent event
Two events, A and B are independent if and only if P(A | B) = P(A),
where
P(A|B) is the conditional probability of A given B
P(A) is the marginal probability of A
###Code
# example
'''
What is the probability of getting a "6" in two consecutive trials when rolling a dice?
For each roll of a dice:
Favorable events = {"6"}
Total number of outcomes = {"1","2","3","4","5","6"}
Let P1 be the probability of getting a "6" in the first roll of dice . Let P2 be the probability of getting a "6" in the second roll of dice.
Since first roll of dice does not influence the second roll of dice, these events are independent.
'''
first_roll = 1/6
second_roll = 1/6
PE5 = first_roll*second_roll
print('Getting a 6 in two consecutive rolls of dice is %1.4f' % PE5)
###Output
Getting a 6 in two consecutive rolls of dice is 0.0278
|
Data Structures/Recursion/Recurrence Relations.ipynb | ###Markdown
Problem StatementPreviously, we considered the following problem:>Given a positive integer `n`, write a function, `print_integers`, that uses recursion to print all numbers from `n` to `1`. >>For example, if `n` is `4`, the function shuld print `4 3 2 1`. Our solution was:
###Code
def print_integers(n):
if n <= 0:
return
print(n)
print_integers(n - 1)
print_integers(5)
###Output
5
4
3
2
1
###Markdown
We have already discussed that every time a function is called, a new *frame* is created in memory, which is then pushed onto the *call stack*. For the current function, `print_integers`, the call stack with all the frames would look like this: Note that in Python, the stack is displayed in an "upside down" manner. This can be seen in the illustration above—the last frame (i.e. the frame with `n = 0`) lies at the top of the stack (but is displayed last here) and the first frame (i.e., the frame with `n = 5`) lies at the bottom of the stack (but is displayed first).But don't let this confuse you. The frame with `n = 0` is indeed the top of the stack, so it will be discarded first. And the frame with `n = 5` is indeed at the bottom of the stack, so it will be discarded last. We define time complexity as a measure of amount of time it takes to run an algorithm. Similarly, the time complexity of our function `print_integers(5)`, would indicate the amount of time taken to exceute our function `print_integers`. But notice how when we call `print_integers()` with a particular value of `n`, it recursively calls itself multiple times. In other words, when we call `print_integers(n)`, it does operations (like checking for base case, printing number) and then calls `print_integers(n - 1)`.Therefore, the overall time taken by `print_integers(n)` to execute would be equal to the time taken to execute its own simple operations and the time taken to execute `print_integers(n - 1)`. Let the time taken to execute the function `print_integers(n)` be $T(n)$. And let the time taken to exceute the function's own simple operations be represented by some constant, $k$.In that case, we can say that$$T(n) = T(n - 1) + k$$where $T(n - 1)$ represents the time taken to execute the function `print_integers(n - 1)`.Similarly, we can represent $T(n - 1)$ as$$T(n - 1) = T(n - 2) + k$$We can see that a pattern is being formed here:1. $T(n)\ \ \ \ \ \ \ = T(n - 1) + k$2. $T(n - 1) = T(n - 2) + k$3. $T(n - 2) = T(n - 3) + k$4. $T(n - 3) = T(n - 4) + k$......5. $T(2) = T(1) + k$6. $T(1) = T(0) + k$7. $T(0) = k1$Notice that when `n = 0` we are only checking the base case and then returning. This time can be represented by some other constant, $k1$.If we add the respective left-hand sides and right-hand sides of all these equations, we get:$$T(n) = nk + k1$$We know that while calculating time complexity, we tend to ignore these added constants because for large input sizes on the order of $10^5$, these constants become irrelevant.Thus, we can simplify the above to:$$T(n) = nk $$We can see that the time complexity of our function `print_integers(n)` is a linear function of $n$. Hence, we can say that the time complexity of the function is $O(n)$. Binary Search OverviewGiven a **sorted** list (say `arr`), and a key (say `target`). The binary search algorithm returns the index of the `target` element if it is present in the given `arr` list, else returns -1. Here is an overview of how the the recursive version of binary search algorithm works:1. Given a list with the lower bound (`start_index`) and the upper bound (`end_index`). 1. Find the center (say `mid_index`) of the list. 1. Check if the element at the center is your `target`? If yes, return the `mid_index`. 1. Check if the `target` is greater than that element at `mid_index`? If yes, call the same function with right sub-array w.r.t center i.e., updated indexes as `mid_index + 1` to `end_index` 1. Check if the `target` is less than that element at `mid_index`? If yes, call the same function with left sub-array w.r.t center i.e., updated indexes as `start_index` to `mid_index - 1` 1. Repeat the step above until you find the target or until the bounds are the same or cross (the upper bound is less than the lower bound). Complexity AnalysisLet's look at the time complexity of the recursive version of binary search algorithm.>Note: The binary search function can also be written iteratively. But for the sake of understanding recurrence relations, we will have a look at the recursive algorithm.Here's the binary search algorithm, coded using recursion:
###Code
def binary_search(arr, target):
return binary_search_func(arr, 0, len(arr) - 1, target)
def binary_search_func(arr, start_index, end_index, target):
if start_index > end_index:
return -1
mid_index = (start_index + end_index)//2
if arr[mid_index] == target:
return mid_index
elif arr[mid_index] > target:
return binary_search_func(arr, start_index, mid_index - 1, target)
else:
return binary_search_func(arr, mid_index + 1, end_index, target)
arr = [0, 1, 2, 3, 4, 5, 6, 7, 8]
print(binary_search(arr, 5))
###Output
5
|
Section 3/.ipynb_checkpoints/3.3_EstimatingARModel-checkpoint.ipynb | ###Markdown
Estimating an AR Model Introduction to Autoregression Model An autoregression model is a regression with a time series and itself, shifted by a time step or steps. These are called lags. I will demonstrate with five examples with the non-stationarized datasets so that you can see the results in the original dataset along with the forecasted dataset.
###Code
import pandas as pd
from pandas import read_csv
from matplotlib import pyplot
from pandas.plotting import lag_plot
from statsmodels.graphics.tsaplots import plot_acf
###Output
_____no_output_____
###Markdown
Example 1: Vacation dataset
###Code
# Read in vacation dataset
vacation = read_csv('~/Desktop/section_3/df_vacation.csv', index_col=0, parse_dates=True)
vacation.head()
# Plot the time series against its lag
lag_plot(vacation)
pyplot.show()
from pandas import concat
values = pd.DataFrame(vacation.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation of the dataset
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(vacation)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(vacation, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(vacation, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
###Output
ARMA Model Results
===============================================================================
Dep. Variable: Num_Search_Vacation No. Observations: 190
Model: ARMA(1, 0) Log Likelihood -694.604
Method: css-mle S.D. of innovations 9.338
Date: Sun, 17 Nov 2019 AIC 1395.209
Time: 21:02:32 BIC 1404.950
Sample: 01-01-2004 HQIC 1399.155
- 10-01-2019
=============================================================================================
coef std err z P>|z| [0.025 0.975]
---------------------------------------------------------------------------------------------
const 60.4397 3.451 17.512 0.000 53.675 67.204
ar.L1.Num_Search_Vacation 0.8079 0.044 18.352 0.000 0.722 0.894
Roots
=============================================================================
Real Imaginary Modulus Frequency
-----------------------------------------------------------------------------
AR.1 1.2378 +0.0000j 1.2378 0.0000
-----------------------------------------------------------------------------
const 60.439736
ar.L1.Num_Search_Vacation 0.807885
dtype: float64
###Markdown
The best model chosen is the one with the lowest Information Criterion. The AIC shows the lowest.
###Code
# Forecasting
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(vacation, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
###Output
/Users/karenyang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:165: ValueWarning: No frequency information was provided, so inferred frequency MS will be used.
% freq, ValueWarning)
###Markdown
Example 2: Furniture dataset
###Code
furn = read_csv('~/Desktop/section_3/df_furniture.csv', index_col=0, parse_dates=True)
furn.head()
# Plot the time series against its lag
lag_plot(furn)
pyplot.show()
from pandas import concat
values = pd.DataFrame(furn.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(furn)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from pandas import read_csv
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(furn, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(furn, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
# S.D. of innovations is standard deviation of errors
# L1 is lag1
# fitted model parameters
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(furn, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
###Output
/Users/karenyang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:165: ValueWarning: No frequency information was provided, so inferred frequency MS will be used.
% freq, ValueWarning)
###Markdown
Example 3: Bank of America dataset
###Code
# Read in BOA dataset, this is original with resampling to monthly data
bac= read_csv('~/Desktop/section_3/df_bankofamerica.csv', index_col=0, parse_dates=True)
# convert daily data to monthly
bac= bac.resample(rule='M').last()
bac.head()
# Plot the time series against its lag
lag_plot(bac)
pyplot.show()
from pandas import concat
values = pd.DataFrame(bac.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(bac)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from pandas import read_csv
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(bac, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(bac, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
# S.D. of innovations is standard deviation of errors
# L1 is lag1
# fitted model parameters
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(bac, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
###Output
_____no_output_____
###Markdown
Example 4: J.P. Morgan dataset
###Code
# Read in JPM dataset
jpm = read_csv('~/Desktop/section_3/df_jpmorgan.csv', index_col=0, parse_dates=True)
# Convert the daily data to quarterly
jpm= jpm.resample(rule='Q').last() # resample to quarterly data
jpm.head()
# Plot the time series against its lag
lag_plot(jpm)
pyplot.show()
from pandas import concat
values = pd.DataFrame(jpm.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(jpm)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from pandas import read_csv
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(jpm, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(jpm, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
# S.D. of innovations is standard deviation of errors
# L1 is lag1
# fitted model parameters
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(jpm, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
###Output
_____no_output_____
###Markdown
Example 5: Average Temperature of St. Louis dataset
###Code
# Read in temp dataset
temp = read_csv('~/Desktop/section_3/df_temp.csv', index_col=0, parse_dates=True)
temp.head()
# Plot the time series against its lag
lag_plot(temp)
pyplot.show()
from pandas import concat
values = pd.DataFrame(temp.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(temp)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from pandas import read_csv
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(temp, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(temp, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
# S.D. of innovations is standard deviation of errors
# L1 is lag1
# fitted model parameters
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(temp, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
# end
###Output
_____no_output_____ |
Chapter07/06_ml_for_trading.ipynb | ###Markdown
ML for Trading: How to run an ML algorithm on Quantopian The code in this notebook is written for the Quantopian Research Platform and uses the 'Algorithms' rather than the 'Research' option we used before. To run it, you need to have a free Quantopian account, create a new algorithm and copy the content to the online development environment. Imports & Settings Quantopian Libraries
###Code
from quantopian.algorithm import attach_pipeline, pipeline_output, order_optimal_portfolio
from quantopian.pipeline import Pipeline, factors, filters, classifiers
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.data import Fundamentals
from quantopian.pipeline.data.psychsignal import stocktwits
from quantopian.pipeline.factors import (Latest,
CustomFactor,
SimpleMovingAverage,
AverageDollarVolume,
Returns,
RSI,
SimpleBeta,
MovingAverageConvergenceDivergenceSignal as MACD)
from quantopian.pipeline.filters import QTradableStocksUS
from quantopian.pipeline.experimental import risk_loading_pipeline, Size, Momentum, Volatility, Value, ShortTermReversal
import quantopian.optimize as opt
from quantopian.optimize.experimental import RiskModelExposure
###Output
_____no_output_____
###Markdown
Other Python Libraries
###Code
from scipy.stats import spearmanr
import talib
import pandas as pd
import numpy as np
from time import time
from collections import OrderedDict
from scipy import stats
from sklearn import linear_model, preprocessing, metrics, cross_validation
from sklearn.pipeline import make_pipeline
###Output
_____no_output_____
###Markdown
Strategy Positions
###Code
# strategy parameters
N_POSITIONS = 100 # Will be split 50% long and 50% short
TRAINING_PERIOD = 126 # past periods for training
HOLDING_PERIOD = 5 # predict returns N days into the future
# How often to trade, for daily, alternative is date_rules.every_day()
TRADE_FREQ = date_rules.week_start()
###Output
_____no_output_____
###Markdown
Custom Universe We define a custom universe to limit duration of training.
###Code
def Q250US():
"""Define custom universe"""
return filters.make_us_equity_universe(
target_size=250,
rankby=factors.AverageDollarVolume(window_length=200),
mask=filters.default_us_equity_universe_mask(),
groupby=classifiers.fundamentals.Sector(),
max_group_weight=0.3,
smoothing_func=lambda f: f.downsample('month_start'),
)
###Output
_____no_output_____
###Markdown
Create Alpha Factors
###Code
def make_alpha_factors():
def PriceToSalesTTM():
"""Last closing price divided by sales per share"""
return Fundamentals.ps_ratio.latest
def PriceToEarningsTTM():
"""Closing price divided by earnings per share (EPS)"""
return Fundamentals.pe_ratio.latest
def DividendYield():
"""Dividends per share divided by closing price"""
return Fundamentals.trailing_dividend_yield.latest
def Capex_To_Cashflows():
return (Fundamentals.capital_expenditure.latest * 4.) / \
(Fundamentals.free_cash_flow.latest * 4.)
def EBITDA_Yield():
return (Fundamentals.ebitda.latest * 4.) / \
USEquityPricing.close.latest
def EBIT_To_Assets():
return (Fundamentals.ebit.latest * 4.) / \
Fundamentals.total_assets.latest
def Return_On_Total_Invest_Capital():
return Fundamentals.roic.latest
class Mean_Reversion_1M(CustomFactor):
inputs = [Returns(window_length=21)]
window_length = 252
def compute(self, today, assets, out, monthly_rets):
out[:] = (monthly_rets[-1] - np.nanmean(monthly_rets, axis=0)) / \
np.nanstd(monthly_rets, axis=0)
def MACD_Signal():
return MACD(fast_period=12, slow_period=26, signal_period=9)
def Net_Income_Margin():
return Fundamentals.net_margin.latest
def Operating_Cashflows_To_Assets():
return (Fundamentals.operating_cash_flow.latest * 4.) / \
Fundamentals.total_assets.latest
def Price_Momentum_3M():
return Returns(window_length=63)
class Price_Oscillator(CustomFactor):
inputs = [USEquityPricing.close]
window_length = 252
def compute(self, today, assets, out, close):
four_week_period = close[-20:]
out[:] = (np.nanmean(four_week_period, axis=0) /
np.nanmean(close, axis=0)) - 1.
def Returns_39W():
return Returns(window_length=215)
class Vol_3M(CustomFactor):
inputs = [Returns(window_length=2)]
window_length = 63
def compute(self, today, assets, out, rets):
out[:] = np.nanstd(rets, axis=0)
def Working_Capital_To_Assets():
return Fundamentals.working_capital.latest / Fundamentals.total_assets.latest
def sentiment():
return SimpleMovingAverage(inputs=[stocktwits.bull_minus_bear],
window_length=5).rank(mask=universe)
class AdvancedMomentum(CustomFactor):
""" Momentum factor """
inputs = [USEquityPricing.close,
Returns(window_length=126)]
window_length = 252
def compute(self, today, assets, out, prices, returns):
out[:] = ((prices[-21] - prices[-252])/prices[-252] -
(prices[-1] - prices[-21])/prices[-21]) / np.nanstd(returns, axis=0)
def SPY_Beta():
return SimpleBeta(target=sid(8554), regression_length=252)
return {
'Price to Sales': PriceToSalesTTM,
'PE Ratio': PriceToEarningsTTM,
'Dividend Yield': DividendYield,
# 'Capex to Cashflows': Capex_To_Cashflows,
# 'EBIT to Assets': EBIT_To_Assets,
# 'EBITDA Yield': EBITDA_Yield,
'MACD Signal Line': MACD_Signal,
'Mean Reversion 1M': Mean_Reversion_1M,
'Net Income Margin': Net_Income_Margin,
# 'Operating Cashflows to Assets': Operating_Cashflows_To_Assets,
'Price Momentum 3M': Price_Momentum_3M,
'Price Oscillator': Price_Oscillator,
# 'Return on Invested Capital': Return_On_Total_Invest_Capital,
'39 Week Returns': Returns_39W,
'Vol 3M': Vol_3M,
'SPY_Beta': SPY_Beta,
'Advanced Momentum': AdvancedMomentum,
'Size': Size,
'Volatitility': Volatility,
'Value': Value,
'Short-Term Reversal': ShortTermReversal,
'Momentum': Momentum,
# 'Materials': materials,
# 'Consumer Discretionary': consumer_discretionary,
# 'Financials': financials,
# 'Real Estate': real_estate,
# 'Consumer Staples': consumer_staples,
# 'Healthcare': health_care,
# 'Utilities': utilities,
# 'Telecom ': telecom,
# 'Energy': energy,
# 'Industrials': industrials,
# 'Technology': technology
}
###Output
_____no_output_____
###Markdown
Custom Machine Learning Factor Here we define a Machine Learning factor which trains a model and predicts forward returns
###Code
class ML(CustomFactor):
init = False
def compute(self, today, assets, out, returns, *inputs):
"""Train the model using
- shifted returns as target, and
- factors in a list of inputs as features;
each factor contains a 2-D array of shape [time x stocks]
"""
if (not self.init) or today.strftime('%A') == 'Monday':
# train on first day then subsequent Mondays (memory)
# get features
features = pd.concat([pd.DataFrame(data, columns=assets).stack().to_frame(i)
for i, data in enumerate(inputs)], axis=1)
# shift returns and align features
target = (pd.DataFrame(returns, columns=assets)
.shift(-HOLDING_PERIOD)
.dropna(how='all')
.stack())
target.index.rename(['date', 'asset'], inplace=True)
features = features.reindex(target.index)
# finalize features
features = (pd.get_dummies(features
.assign(asset=features
.index.get_level_values('asset')),
columns=['asset'],
sparse=True))
# train the model
self.model_pipe = make_pipeline(preprocessing.Imputer(),
preprocessing.MinMaxScaler(),
linear_model.LinearRegression())
# run pipeline and train model
self.model_pipe.fit(X=features, y=target)
self.assets = assets # keep track of assets in model
self.init = True
# predict most recent factor values
features = pd.DataFrame({i: d[-1] for i, d in enumerate(inputs)}, index=assets)
features = features.reindex(index=self.assets).assign(asset=self.assets)
features = pd.get_dummies(features, columns=['asset'])
preds = self.model_pipe.predict(features)
out[:] = pd.Series(preds, index=self.assets).reindex(index=assets)
###Output
_____no_output_____
###Markdown
Create Factor Pipeline Create pipeline with predictive factors and target returns
###Code
def make_ml_pipeline(alpha_factors, universe, lookback=21, lookahead=5):
"""Create pipeline with predictive factors and target returns"""
# set up pipeline
pipe = OrderedDict()
# Returns over lookahead days.
pipe['Returns'] = Returns(inputs=[USEquityPricing.open],
mask=universe,
window_length=lookahead + 1)
# Rank alpha factors:
pipe.update({name: f().rank(mask=universe)
for name, f in alpha_factors.items()})
# ML factor gets `lookback` datapoints on each factor
pipe['ML'] = ML(inputs=pipe.values(),
window_length=lookback + 1,
mask=universe)
return Pipeline(columns=pipe, screen=universe)
###Output
_____no_output_____
###Markdown
Define Algorithm
###Code
def initialize(context):
"""
Called once at the start of the algorithm.
"""
set_slippage(slippage.FixedSlippage(spread=0.00))
set_commission(commission.PerShare(cost=0, min_trade_cost=0))
schedule_function(rebalance_portfolio,
TRADE_FREQ,
time_rules.market_open(minutes=1))
# Record tracking variables at the end of each day.
schedule_function(log_metrics,
date_rules.every_day(),
time_rules.market_close())
# Set up universe
# base_universe = AverageDollarVolume(window_length=63, mask=QTradableStocksUS()).percentile_between(80, 100)
universe = AverageDollarVolume(window_length=63, mask=QTradableStocksUS()).percentile_between(40, 60)
# create alpha factors and machine learning pipline
ml_pipeline = make_ml_pipeline(alpha_factors=make_alpha_factors(),
universe=universe,
lookback=TRAINING_PERIOD,
lookahead=HOLDING_PERIOD)
attach_pipeline(ml_pipeline, 'alpha_model')
attach_pipeline(risk_loading_pipeline(), 'risk_loading_pipeline')
context.past_predictions = {}
context.realized_rmse = 0
context.realized_ic = 0
context.long_short_spread = 0
###Output
_____no_output_____
###Markdown
Evaluate Model Evaluate model performance using past predictions on hold-out data
###Code
def evaluate_past_predictions(context):
"""Evaluate model performance using past predictions on hold-out data"""
# A day has passed, shift days and drop old ones
context.past_predictions = {k-1: v for k, v in context.past_predictions.items() if k-1 >= 0}
if 0 in context.past_predictions:
# Past predictions for the current day exist, so we can use todays' n-back returns to evaluate them
returns = pipeline_output('alpha_model')['Returns'].to_frame('returns')
df = (context
.past_predictions[0]
.to_frame('predictions')
.join(returns, how='inner')
.dropna())
# Compute performance metrics
context.realized_rmse = metrics.mean_squared_error(y_true=df['returns'], y_pred=df.predictions)
context.realized_ic, _ = spearmanr(df['returns'], df.predictions)
log.info('rmse {:.2%} | ic {:.2%}'.format(context.realized_rmse, context.realized_ic))
long_rets = df.loc[df.predictions >= df.predictions.median(), 'returns'].mean()
short_rets = df.loc[df.predictions < df.predictions.median(), 'returns'].mean()
context.long_short_spread = (long_rets - short_rets) * 100
# Store current predictions
context.past_predictions[HOLDING_PERIOD] = context.predictions
###Output
_____no_output_____
###Markdown
Algo Execution Prepare Trades
###Code
def before_trading_start(context, data):
"""
Called every day before market open.
"""
context.predictions = pipeline_output('alpha_model')['ML']
context.predictions.index.rename(['date', 'equity'], inplace=True)
context.risk_loading_pipeline = pipeline_output('risk_loading_pipeline')
evaluate_past_predictions(context)
###Output
_____no_output_____
###Markdown
Rebalance
###Code
def rebalance_portfolio(context, data):
"""
Execute orders according to our schedule_function() timing.
"""
predictions = context.predictions
predictions = predictions.loc[data.can_trade(predictions.index)]
# Select long/short positions
n_positions = int(min(N_POSITIONS, len(predictions)) / 2)
to_trade = (predictions[predictions>0]
.nlargest(n_positions)
.append(predictions[predictions < 0]
.nsmallest(n_positions)))
# Model may produce duplicate predictions
to_trade = to_trade[~to_trade.index.duplicated()]
# Setup Optimization Objective
objective = opt.MaximizeAlpha(to_trade)
# Setup Optimization Constraints
constrain_gross_leverage = opt.MaxGrossExposure(1.0)
constrain_pos_size = opt.PositionConcentration.with_equal_bounds(-.02, .02)
market_neutral = opt.DollarNeutral()
constrain_risk = RiskModelExposure(
risk_model_loadings=context.risk_loading_pipeline,
version=opt.Newest)
# Optimizer calculates portfolio weights and
# moves portfolio toward the target.
order_optimal_portfolio(
objective=objective,
constraints=[
constrain_gross_leverage,
constrain_pos_size,
market_neutral,
constrain_risk
],
)
###Output
_____no_output_____
###Markdown
Track Performance
###Code
def log_metrics(context, data):
"""
Plot variables at the end of each day.
"""
record(leverage=context.account.leverage,
#num_positions=len(context.portfolio.positions),
realized_rmse=context.realized_rmse,
realized_ic=context.realized_ic,
long_short_spread=context.long_short_spread,
)
###Output
_____no_output_____ |
nb/049_submission.ipynb | ###Markdown
Overview- nb046ベース(cv: 0.94114, sub: 0.945)- nb048で解析したbatch7のハズレ値を適切な値に置き換える Const
###Code
NB = '049'
isSmallSet = False
if isSmallSet:
LENGTH = 7000
else:
LENGTH = 500_000
PATH_TRAIN = './../data/input/train_clean.csv'
PATH_TEST = './../data/input/test_clean.csv'
PATH_SMPLE_SUB = './../data/input/sample_submission.csv'
DIR_OUTPUT = './../data/output/'
DIR_OUTPUT_IGNORE = './../data/output_ignore/'
cp = ['#f8b195', '#f67280', '#c06c84', '#6c5b7b', '#355c7d']
sr = 10*10**3 # 10 kHz
###Output
_____no_output_____
###Markdown
Import everything I need :)
###Code
import warnings
warnings.filterwarnings('ignore')
import time
import gc
import random
import os
import itertools
import multiprocessing
import numpy as np
from scipy import signal
# from pykalman import KalmanFilter
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from fastprogress import progress_bar
from lightgbm import LGBMRegressor
from sklearn.model_selection import KFold, train_test_split, StratifiedKFold, GroupKFold
from sklearn.metrics import f1_score, mean_absolute_error, confusion_matrix
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
# from sklearn.svm import SVR
from sklearn.linear_model import Lasso
# from dtreeviz.trees import dtreeviz
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import Callback, LearningRateScheduler
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from tensorflow.keras import losses, models, optimizers
# import tensorflow_addons as tfa
###Output
_____no_output_____
###Markdown
My function
###Code
def f1_macro(true, pred):
return f1_score(true, pred, average='macro')
def get_df_batch(df, batch):
idxs = df['batch'] == batch
assert any(idxs), 'そのようなbatchはありません'
return df[idxs]
def get_signal_mv_mean(df, n=3001):
signal_mv = np.zeros(len(df))
for bt in df['batch'].unique():
idxs = df['batch'] == bt
_signal_mv = df['signal'][idxs].rolling(n, center=True).mean().interpolate('spline', order=5, limit_direction='both').values
signal_mv[idxs] = _signal_mv
return signal_mv
def get_signal_mv_std(df, n=3001):
signal_mv = np.zeros(len(df))
for bt in df['batch'].unique():
idxs = df['batch'] == bt
_signal_mv = df['signal'][idxs].rolling(n, center=True).std().interpolate('spline', order=5, limit_direction='both').values
signal_mv[idxs] = _signal_mv
return signal_mv
def get_signal_mv_min(df, n=3001):
signal_mv = np.zeros(len(df))
for bt in df['batch'].unique():
idxs = df['batch'] == bt
_signal_mv = df['signal'][idxs].rolling(n, center=True).min().interpolate('spline', order=5, limit_direction='both').values
signal_mv[idxs] = _signal_mv
return signal_mv
def get_signal_mv_max(df, n=3001):
signal_mv = np.zeros(len(df))
for bt in df['batch'].unique():
idxs = df['batch'] == bt
_signal_mv = df['signal'][idxs].rolling(n, center=True).max().interpolate('spline', order=5, limit_direction='both').values
signal_mv[idxs] = _signal_mv
return signal_mv
def group_feat_train(_train):
train = _train.copy()
# group init
train['group'] = int(0)
# group 1
idxs = (train['batch'] == 3) | (train['batch'] == 7)
train['group'][idxs] = int(1)
# group 2
idxs = (train['batch'] == 5) | (train['batch'] == 8)
train['group'][idxs] = int(2)
# group 3
idxs = (train['batch'] == 2) | (train['batch'] == 6)
train['group'][idxs] = int(3)
# group 4
idxs = (train['batch'] == 4) | (train['batch'] == 9)
train['group'][idxs] = int(4)
return train[['group']]
def group_feat_test(_test):
test = _test.copy()
# group init
test['group'] = int(0)
x_idx = np.arange(len(test))
# group 1
idxs = (100000<=x_idx) & (x_idx<200000)
test['group'][idxs] = int(1)
idxs = (900000<=x_idx) & (x_idx<=1000000)
test['group'][idxs] = int(1)
# group 2
idxs = (200000<=x_idx) & (x_idx<300000)
test['group'][idxs] = int(2)
idxs = (600000<=x_idx) & (x_idx<700000)
test['group'][idxs] = int(2)
# group 3
idxs = (400000<=x_idx) & (x_idx<500000)
test['group'][idxs] = int(3)
# group 4
idxs = (500000<=x_idx) & (x_idx<600000)
test['group'][idxs] = int(4)
idxs = (700000<=x_idx) & (x_idx<800000)
test['group'][idxs] = int(4)
return test[['group']]
class permutation_importance():
def __init__(self, model, metric):
self.is_computed = False
self.n_feat = 0
self.base_score = 0
self.model = model
self.metric = metric
self.df_result = []
def compute(self, X_valid, y_valid):
self.n_feat = len(X_valid.columns)
if self.metric == 'auc':
y_valid_score = self.model.predict_proba(X_valid)[:, 1]
fpr, tpr, thresholds = roc_curve(y_valid, y_valid_score)
self.base_score = auc(fpr, tpr)
else:
pred = np.round(self.model.predict(X_valid)).astype('int8')
self.base_score = self.metric(y_valid, pred)
self.df_result = pd.DataFrame({'feat': X_valid.columns,
'score': np.zeros(self.n_feat),
'score_diff': np.zeros(self.n_feat)})
# predict
for i, col in enumerate(X_valid.columns):
df_perm = X_valid.copy()
np.random.seed(1)
df_perm[col] = np.random.permutation(df_perm[col])
y_valid_pred = self.model.predict(df_perm)
if self.metric == 'auc':
y_valid_score = self.model.predict_proba(df_perm)[:, 1]
fpr, tpr, thresholds = roc_curve(y_valid, y_valid_score)
score = auc(fpr, tpr)
else:
score = self.metric(y_valid, np.round(y_valid_pred).astype('int8'))
self.df_result['score'][self.df_result['feat']==col] = score
self.df_result['score_diff'][self.df_result['feat']==col] = self.base_score - score
self.is_computed = True
def get_negative_feature(self):
assert self.is_computed!=False, 'compute メソッドが実行されていません'
idx = self.df_result['score_diff'] < 0
return self.df_result.loc[idx, 'feat'].values.tolist()
def get_positive_feature(self):
assert self.is_computed!=False, 'compute メソッドが実行されていません'
idx = self.df_result['score_diff'] > 0
return self.df_result.loc[idx, 'feat'].values.tolist()
def show_permutation_importance(self, score_type='loss'):
'''score_type = 'loss' or 'accuracy' '''
assert self.is_computed!=False, 'compute メソッドが実行されていません'
if score_type=='loss':
ascending = True
elif score_type=='accuracy':
ascending = False
else:
ascending = ''
plt.figure(figsize=(15, int(0.25*self.n_feat)))
sns.barplot(x="score_diff", y="feat", data=self.df_result.sort_values(by="score_diff", ascending=ascending))
plt.title('base_score - permutation_score')
def plot_corr(df, abs_=False, threshold=0.95):
if abs_==True:
corr = df.corr().abs()>threshold
vmin = 0
else:
corr = df.corr()
vmin = -1
# Plot
fig, ax = plt.subplots(figsize=(12, 10), dpi=100)
fig.patch.set_facecolor('white')
sns.heatmap(corr,
xticklabels=df.corr().columns,
yticklabels=df.corr().columns,
vmin=vmin,
vmax=1,
center=0,
annot=False)
# Decorations
ax.set_title('Correlation', fontsize=22)
def get_low_corr_column(df, threshold):
df_corr = df.corr()
df_corr = abs(df_corr)
columns = df_corr.columns
# 対角線の値を0にする
for i in range(0, len(columns)):
df_corr.iloc[i, i] = 0
while True:
columns = df_corr.columns
max_corr = 0.0
query_column = None
target_column = None
df_max_column_value = df_corr.max()
max_corr = df_max_column_value.max()
query_column = df_max_column_value.idxmax()
target_column = df_corr[query_column].idxmax()
if max_corr < threshold:
# しきい値を超えるものがなかったため終了
break
else:
# しきい値を超えるものがあった場合
delete_column = None
saved_column = None
# その他との相関の絶対値が大きい方を除去
if sum(df_corr[query_column]) <= sum(df_corr[target_column]):
delete_column = target_column
saved_column = query_column
else:
delete_column = query_column
saved_column = target_column
# 除去すべき特徴を相関行列から消す(行、列)
df_corr.drop([delete_column], axis=0, inplace=True)
df_corr.drop([delete_column], axis=1, inplace=True)
return df_corr.columns # 相関が高い特徴量を除いた名前リスト
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
if col!='open_channels':
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
def create_signal_mod(train):
left = 3641000
right = 3829000
thresh_dict = {
3: [0.1, 2.0],
2: [-1.1, 0.7],
1: [-2.3, -0.6],
0: [-3.8, -2],
}
train['signal'] = train['signal'].values
for ch in train[train['batch']==7]['open_channels'].unique():
idxs_noisy = (train['open_channels']==ch) & (left<train.index) & (train.index<right)
idxs_not_noisy = (train['open_channels']==ch) & ~idxs_noisy
mean = train[idxs_not_noisy]['signal'].mean()
idxs_outlier = idxs_noisy & (thresh_dict[ch][1]<train['signal'].values)
train['signal'][idxs_outlier] = mean
idxs_outlier = idxs_noisy & (train['signal'].values<thresh_dict[ch][0])
train['signal'][idxs_outlier] = mean
return train
def train_lgbm(X, y, X_te, lgbm_params, random_state=5, n_fold=5, verbose=50, early_stopping_rounds=100, show_fig=True):
# using features
print(f'features({len(X.columns)}): \n{X.columns}') if not verbose==0 else None
# folds = KFold(n_splits=n_fold, shuffle=True, random_state=random_state)
folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=random_state)
scores = []
oof = np.zeros(len(X))
oof_round = np.zeros(len(X))
test_pred = np.zeros(len(X_te))
df_pi = pd.DataFrame(columns=['feat', 'score_diff'])
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X, y=y)):
if verbose==0:
pass
else:
print('\n------------------')
print(f'- Fold {fold_n + 1}/{N_FOLD} started at {time.ctime()}')
# prepare dataset
X_train, X_valid = X.iloc[train_idx], X.iloc[valid_idx]
y_train, y_valid = y[train_idx], y[valid_idx]
# train
model = LGBMRegressor(**lgbm_params, n_estimators=N_ESTIMATORS)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=verbose,
early_stopping_rounds=early_stopping_rounds)
# pred
y_valid_pred = model.predict(X_valid, model.best_iteration_)
y_valid_pred_round = np.round(y_valid_pred).astype('int8')
_test_pred = model.predict(X_te, model.best_iteration_)
if show_fig==False:
pass
else:
# permutation importance
pi = permutation_importance(model, f1_macro) # model と metric を渡す
pi.compute(X_valid, y_valid)
pi_result = pi.df_result
df_pi = pd.concat([df_pi, pi_result[['feat', 'score_diff']]])
# result
oof[valid_idx] = y_valid_pred
oof_round[valid_idx] = y_valid_pred_round
score = f1_score(y_valid, y_valid_pred_round, average='macro')
scores.append(score)
test_pred += _test_pred
if verbose==0:
pass
else:
print(f'---> f1-score(macro) valid: {f1_score(y_valid, y_valid_pred_round, average="macro"):.4f}')
print('')
print('====== finish ======')
print('score list:', scores)
print('CV mean score(f1_macro): {0:.4f}, std: {1:.4f}'.format(np.mean(scores), np.std(scores)))
print(f'oof score(f1_macro): {f1_score(y, oof_round, average="macro"):.4f}')
print('')
if show_fig==False:
pass
else:
# visualization
plt.figure(figsize=(5, 5))
plt.plot([0, 10], [0, 10], color='gray')
plt.scatter(y, oof, alpha=0.05, color=cp[1])
plt.xlabel('true')
plt.ylabel('pred')
plt.show()
# confusion_matrix
plot_confusion_matrix(y, oof_round, classes=np.arange(11))
# permutation importance
plt.figure(figsize=(15, int(0.25*len(X.columns))))
order = df_pi.groupby(["feat"]).mean()['score_diff'].reset_index().sort_values('score_diff', ascending=False)
sns.barplot(x="score_diff", y="feat", data=df_pi, order=order['feat'])
plt.title('base_score - permutation_score')
plt.show()
# submission
test_pred = test_pred/N_FOLD
test_pred_round = np.round(test_pred).astype('int8')
return test_pred_round, test_pred, oof_round, oof
def plot_confusion_matrix(truth, pred, classes, normalize=False, title=''):
cm = confusion_matrix(truth, pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix', size=15)
plt.colorbar(fraction=0.046, pad=0.04)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.grid(False)
plt.tight_layout()
def train_test_split_lgbm(X, y, X_te, lgbm_params, random_state=5, test_size=0.3, verbose=50, early_stopping_rounds=100, show_fig=True):
# using features
print(f'features({len(X.columns)}): \n{X.columns}') if not verbose==0 else None
# folds = KFold(n_splits=n_fold, shuffle=True, random_state=random_state)
# folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=random_state)
# prepare dataset
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=test_size, random_state=random_state)
# train
model = LGBMRegressor(**lgbm_params, n_estimators=N_ESTIMATORS)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=verbose,
early_stopping_rounds=early_stopping_rounds)
# pred
oof = model.predict(X_valid, model.best_iteration_)
oof_round = np.round(oof).astype('int8')
test_pred = model.predict(X_te, model.best_iteration_)
test_pred_round = np.round(test_pred).astype('int8')
print('====== finish ======')
print(f'oof score(f1_macro): {f1_score(y_valid, oof_round, average="macro"):.4f}')
print('')
if show_fig==False:
pass
else:
# visualization
plt.figure(figsize=(5, 5))
plt.plot([0, 10], [0, 10], color='gray')
plt.scatter(y_valid, oof, alpha=0.05, color=cp[1])
plt.xlabel('true')
plt.ylabel('pred')
plt.show()
# confusion_matrix
plot_confusion_matrix(y_valid, oof_round, classes=np.arange(11))
# permutation importance
pi = permutation_importance(model, f1_macro) # model と metric を渡す
pi.compute(X_valid, y_valid)
pi.show_permutation_importance(score_type='accuracy') # loss or accuracy
plt.show()
return test_pred_round, test_pred, oof_round, oof
###Output
_____no_output_____
###Markdown
ref: https://www.kaggle.com/martxelo/fe-and-ensemble-mlp-and-lgbm
###Code
def calc_gradients(s, n_grads=4):
'''
Calculate gradients for a pandas series. Returns the same number of samples
'''
grads = pd.DataFrame()
g = s.values
for i in range(n_grads):
g = np.gradient(g)
grads['grad_' + str(i+1)] = g
return grads
def calc_low_pass(s, n_filts=10):
'''
Applies low pass filters to the signal. Left delayed and no delayed
'''
wns = np.logspace(-2, -0.3, n_filts)
# wns = [0.3244]
low_pass = pd.DataFrame()
x = s.values
for wn in wns:
b, a = signal.butter(1, Wn=wn, btype='low')
zi = signal.lfilter_zi(b, a)
low_pass['lowpass_lf_' + str('%.4f' %wn)] = signal.lfilter(b, a, x, zi=zi*x[0])[0]
low_pass['lowpass_ff_' + str('%.4f' %wn)] = signal.filtfilt(b, a, x)
return low_pass
def calc_high_pass(s, n_filts=10):
'''
Applies high pass filters to the signal. Left delayed and no delayed
'''
wns = np.logspace(-2, -0.1, n_filts)
# wns = [0.0100, 0.0264, 0.0699, 0.3005, 0.4885, 0.7943]
high_pass = pd.DataFrame()
x = s.values
for wn in wns:
b, a = signal.butter(1, Wn=wn, btype='high')
zi = signal.lfilter_zi(b, a)
high_pass['highpass_lf_' + str('%.4f' %wn)] = signal.lfilter(b, a, x, zi=zi*x[0])[0]
high_pass['highpass_ff_' + str('%.4f' %wn)] = signal.filtfilt(b, a, x)
return high_pass
def calc_roll_stats(s, windows=[10, 50, 100, 500, 1000, 3000]):
'''
Calculates rolling stats like mean, std, min, max...
'''
roll_stats = pd.DataFrame()
for w in windows:
roll_stats['roll_mean_' + str(w)] = s.rolling(window=w, min_periods=1).mean().interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_std_' + str(w)] = s.rolling(window=w, min_periods=1).std().interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_min_' + str(w)] = s.rolling(window=w, min_periods=1).min().interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_max_' + str(w)] = s.rolling(window=w, min_periods=1).max().interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_range_' + str(w)] = roll_stats['roll_max_' + str(w)] - roll_stats['roll_min_' + str(w)]
roll_stats['roll_q10_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.10).interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_q25_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.25).interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_q50_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.50).interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_q75_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.75).interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_q90_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.90).interpolate('spline', order=5, limit_direction='both')
# add zeros when na values (std)
# roll_stats = roll_stats.fillna(value=0)
return roll_stats
def calc_ewm(s, windows=[10, 50, 100, 500, 1000, 3000]):
'''
Calculates exponential weighted functions
'''
ewm = pd.DataFrame()
for w in windows:
ewm['ewm_mean_' + str(w)] = s.ewm(span=w, min_periods=1).mean()
ewm['ewm_std_' + str(w)] = s.ewm(span=w, min_periods=1).std()
# add zeros when na values (std)
ewm = ewm.fillna(value=0)
return ewm
def divide_and_add_features(s, signal_size=500000):
'''
Divide the signal in bags of "signal_size".
Normalize the data dividing it by 15.0
'''
# normalize
s = s/15.0
ls = []
for i in progress_bar(range(int(s.shape[0]/signal_size))):
sig = s[i*signal_size:(i+1)*signal_size].copy().reset_index(drop=True)
sig_featured = add_features(sig)
ls.append(sig_featured)
return pd.concat(ls, axis=0)
###Output
_____no_output_____
###Markdown
ref: https://www.kaggle.com/nxrprime/single-model-lgbm-kalman-filter-ii
###Code
def Kalman1D(observations,damping=1):
# To return the smoothed time series data
observation_covariance = damping
initial_value_guess = observations[0]
transition_matrix = 1
transition_covariance = 0.1
initial_value_guess
kf = KalmanFilter(
initial_state_mean=initial_value_guess,
initial_state_covariance=observation_covariance,
observation_covariance=observation_covariance,
transition_covariance=transition_covariance,
transition_matrices=transition_matrix
)
pred_state, state_cov = kf.smooth(observations)
return pred_state
###Output
_____no_output_____
###Markdown
Preparation setting
###Code
sns.set()
###Output
_____no_output_____
###Markdown
load dataset
###Code
df_tr = pd.read_csv(PATH_TRAIN)
df_te = pd.read_csv(PATH_TEST)
###Output
_____no_output_____
###Markdown
処理のしやすさのために、バッチ番号を振る
###Code
batch_list = []
for n in range(10):
batchs = np.ones(500000)*n
batch_list.append(batchs.astype(int))
batch_list = np.hstack(batch_list)
df_tr['batch'] = batch_list
batch_list = []
for n in range(4):
batchs = np.ones(500000)*n
batch_list.append(batchs.astype(int))
batch_list = np.hstack(batch_list)
df_te['batch'] = batch_list
###Output
_____no_output_____
###Markdown
smallset?
###Code
if isSmallSet:
print('small set mode')
# train
batchs = df_tr['batch'].values
dfs = []
for i_bt, bt in enumerate(df_tr['batch'].unique()):
idxs = batchs == bt
_df = df_tr[idxs][:LENGTH].copy()
dfs.append(_df)
df_tr = pd.concat(dfs).reset_index(drop=True)
# test
batchs = df_te['batch'].values
dfs = []
for i_bt, bt in enumerate(df_te['batch'].unique()):
idxs = batchs == bt
_df = df_te[idxs][:LENGTH].copy()
dfs.append(_df)
df_te = pd.concat(dfs).reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Train
###Code
# configurations and main hyperparammeters
# EPOCHS = 180
EPOCHS = 180
NNBATCHSIZE = 16
GROUP_BATCH_SIZE = 4000
SEED = 321
LR = 0.0015
SPLITS = 5
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
# tf.random.set_seed(seed)
# read data
def read_data():
train = pd.read_csv(PATH_TRAIN, dtype={'time': np.float32, 'signal': np.float32, 'open_channels':np.int32})
test = pd.read_csv(PATH_TEST, dtype={'time': np.float32, 'signal': np.float32})
sub = pd.read_csv(PATH_SMPLE_SUB, dtype={'time': np.float32})
# Y_train_proba = np.load('./../data/input/Y_train_proba.npy')
# Y_test_proba = np.load('./../data/input/Y_test_proba.npy')
probas = np.load('./../data/output_ignore/probas_nb044_LGBMClassifier_cv_0.9385.npz')
Y_train_proba = probas['arr_0']
Y_test_proba = probas['arr_1']
for i in range(11):
train[f"proba_{i}"] = Y_train_proba[:, i]
test[f"proba_{i}"] = Y_test_proba[:, i]
# group1のスパイクを削除df_tr
# batch_list = []
# for n in range(10):
# batchs = np.ones(500000)*n
# batch_list.append(batchs.astype(int))
# batch_list = np.hstack(batch_list)
# train['batch'] = batch_list
# max_ = train.loc[train['batch']==3]['signal'].max()
# min_ = train.loc[train['batch']==3]['signal'].min()
# idxs = (train['batch']==7) & (train['signal']>max_)
# train['signal'][idxs] = max_
# idxs = (train['batch']==7) & (train['signal']<min_)
# train['signal'][idxs] = min_
# train = train.drop(['batch'], axis=1)
# add offset
batch_list = []
for n in range(10):
batchs = np.ones(500000)*n
batch_list.append(batchs.astype(int))
batch_list = np.hstack(batch_list)
train['batch'] = batch_list
batch_list = []
for n in range(4):
batchs = np.ones(500000)*n
batch_list.append(batchs.astype(int))
batch_list = np.hstack(batch_list)
test['batch'] = batch_list
group = group_feat_train(train)
train = pd.concat([train, group], axis=1)
group = group_feat_test(test)
test = pd.concat([test, group], axis=1)
off_set_4 = 0.952472 - (-1.766044)
off_set_9 = 0.952472 - (-1.770441)
# batch4
idxs = train['batch'] == 4
train['signal'][idxs] = train['signal'].values + off_set_4
# batch9
idxs = train['batch'] == 9
train['signal'][idxs] = train['signal'].values + off_set_9
off_set_test = 2.750
# group4
idxs = test['group'] == 4
test['signal'][idxs] = test['signal'][idxs].values + off_set_test
# mod batch7
train = create_signal_mod(train)
plt.figure(figsize=(20, 3))
plt.plot(train['signal'])
plt.show()
plt.figure(figsize=(20, 3))
plt.plot(test['signal'])
plt.show()
train = train.drop(['batch', 'group'], axis=1)
test = test.drop(['batch', 'group'], axis=1)
return train, test, sub
# create batches of 4000 observations
def batching(df, batch_size):
df['group'] = df.groupby(df.index//batch_size, sort=False)['signal'].agg(['ngroup']).values
df['group'] = df['group'].astype(np.uint16)
return df
# normalize the data (standard scaler). We can also try other scalers for a better score!
def normalize(train, test):
train_input_mean = train.signal.mean()
train_input_sigma = train.signal.std()
train['signal'] = (train.signal - train_input_mean) / train_input_sigma
test['signal'] = (test.signal - train_input_mean) / train_input_sigma
return train, test
# get lead and lags features
def lag_with_pct_change(df, windows):
for window in windows:
df['signal_shift_pos_' + str(window)] = df.groupby('group')['signal'].shift(window).fillna(0)
df['signal_shift_neg_' + str(window)] = df.groupby('group')['signal'].shift(-1 * window).fillna(0)
return df
# main module to run feature engineering. Here you may want to try and add other features and check if your score imporves :).
def run_feat_engineering(df, batch_size):
# create batches
df = batching(df, batch_size = batch_size)
# create leads and lags (1, 2, 3 making them 6 features)
df = lag_with_pct_change(df, [1, 2, 3])
# create signal ** 2 (this is the new feature)
df['signal_2'] = df['signal'] ** 2
return df
# fillna with the mean and select features for training
def feature_selection(train, test):
features = [col for col in train.columns if col not in ['index', 'group', 'open_channels', 'time']]
train = train.replace([np.inf, -np.inf], np.nan)
test = test.replace([np.inf, -np.inf], np.nan)
for feature in features:
feature_mean = pd.concat([train[feature], test[feature]], axis = 0).mean()
train[feature] = train[feature].fillna(feature_mean)
test[feature] = test[feature].fillna(feature_mean)
return train, test, features
# model function (very important, you can try different arquitectures to get a better score. I believe that top public leaderboard is a 1D Conv + RNN style)
def Classifier(shape_):
def cbr(x, out_layer, kernel, stride, dilation):
x = Conv1D(out_layer, kernel_size=kernel, dilation_rate=dilation, strides=stride, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def wave_block(x, filters, kernel_size, n):
dilation_rates = [2**i for i in range(n)]
x = Conv1D(filters = filters,
kernel_size = 1,
padding = 'same')(x)
res_x = x
for dilation_rate in dilation_rates:
tanh_out = Conv1D(filters = filters,
kernel_size = kernel_size,
padding = 'same',
activation = 'tanh',
dilation_rate = dilation_rate)(x)
sigm_out = Conv1D(filters = filters,
kernel_size = kernel_size,
padding = 'same',
activation = 'sigmoid',
dilation_rate = dilation_rate)(x)
x = Multiply()([tanh_out, sigm_out])
x = Conv1D(filters = filters,
kernel_size = 1,
padding = 'same')(x)
res_x = Add()([res_x, x])
return res_x
inp = Input(shape = (shape_))
x = cbr(inp, 64, 7, 1, 1)
x = BatchNormalization()(x)
x = wave_block(x, 16, 3, 12)
x = BatchNormalization()(x)
x = wave_block(x, 32, 3, 8)
x = BatchNormalization()(x)
x = wave_block(x, 64, 3, 4)
x = BatchNormalization()(x)
x = wave_block(x, 128, 3, 1)
x = cbr(x, 32, 7, 1, 1)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
out = Dense(11, activation = 'softmax', name = 'out')(x)
model = models.Model(inputs = inp, outputs = out)
opt = Adam(lr = LR)
# opt = tfa.optimizers.SWA(opt)
# model.compile(loss = losses.CategoricalCrossentropy(), optimizer = opt, metrics = ['accuracy'])
model.compile(loss = categorical_crossentropy, optimizer = opt, metrics = ['accuracy'])
return model
# function that decrease the learning as epochs increase (i also change this part of the code)
def lr_schedule(epoch):
if epoch < 30:
lr = LR
elif epoch < 40:
lr = LR / 3
elif epoch < 50:
lr = LR / 5
elif epoch < 60:
lr = LR / 7
elif epoch < 70:
lr = LR / 9
elif epoch < 80:
lr = LR / 11
elif epoch < 90:
lr = LR / 13
else:
lr = LR / 100
return lr
# class to get macro f1 score. This is not entirely necessary but it's fun to check f1 score of each epoch (be carefull, if you use this function early stopping callback will not work)
class MacroF1(Callback):
def __init__(self, model, inputs, targets):
self.model = model
self.inputs = inputs
self.targets = np.argmax(targets, axis = 2).reshape(-1)
def on_epoch_end(self, epoch, logs):
pred = np.argmax(self.model.predict(self.inputs), axis = 2).reshape(-1)
score = f1_score(self.targets, pred, average = 'macro')
print(f'F1 Macro Score: {score:.5f}')
# main function to perfrom groupkfold cross validation (we have 1000 vectores of 4000 rows and 8 features (columns)). Going to make 5 groups with this subgroups.
def run_cv_model_by_batch(train, test, _splits, batch_col, feats, sample_submission, nn_epochs, nn_batch_size):
seed_everything(SEED)
K.clear_session()
# config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
# gpu_options=tf.compat.v1.GPUOptions(
# visible_device_list='4', # specify GPU number
# allow_growth=True
# )
# )
# sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=config)
# tf.compat.v1.keras.backend.set_session(sess)
# tf.compat.v1 ---> tf (tensorflow2系からtensorflow1系に変更)
config = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
# gpu_options=tf.GPUOptions(
# visible_device_list='4', # specify GPU number
# allow_growth=True
# )
)
sess = tf.Session(graph=tf.get_default_graph(), config=config)
tf.keras.backend.set_session(sess)
oof_ = np.zeros((len(train), 11)) # build out of folds matrix with 11 columns, they represent our target variables classes (from 0 to 10)
preds_ = np.zeros((len(test), 11))
target = ['open_channels']
group = train['group']
kf = GroupKFold(n_splits=_splits)
splits = [x for x in kf.split(train, train[target], group)]
new_splits = []
for sp in splits:
new_split = []
new_split.append(np.unique(group[sp[0]]))
new_split.append(np.unique(group[sp[1]]))
new_split.append(sp[1])
new_splits.append(new_split)
# pivot target columns to transform the net to a multiclass classification estructure (you can also leave it in 1 vector with sparsecategoricalcrossentropy loss function)
tr = pd.concat([pd.get_dummies(train.open_channels), train[['group']]], axis=1)
tr.columns = ['target_'+str(i) for i in range(11)] + ['group']
target_cols = ['target_'+str(i) for i in range(11)]
train_tr = np.array(list(tr.groupby('group').apply(lambda x: x[target_cols].values))).astype(np.float32)
train = np.array(list(train.groupby('group').apply(lambda x: x[feats].values)))
test = np.array(list(test.groupby('group').apply(lambda x: x[feats].values)))
for n_fold, (tr_idx, val_idx, val_orig_idx) in enumerate(new_splits[0:], start=0):
train_x, train_y = train[tr_idx], train_tr[tr_idx]
valid_x, valid_y = train[val_idx], train_tr[val_idx]
print(f'Our training dataset shape is {train_x.shape}')
print(f'Our validation dataset shape is {valid_x.shape}')
gc.collect()
shape_ = (None, train_x.shape[2]) # input is going to be the number of feature we are using (dimension 2 of 0, 1, 2)
model = Classifier(shape_)
# using our lr_schedule function
cb_lr_schedule = LearningRateScheduler(lr_schedule)
model.fit(train_x,train_y,
epochs = nn_epochs,
callbacks = [cb_lr_schedule, MacroF1(model, valid_x, valid_y)], # adding custom evaluation metric for each epoch
batch_size = nn_batch_size,verbose = 2,
validation_data = (valid_x,valid_y))
preds_f = model.predict(valid_x)
f1_score_ = f1_score(np.argmax(valid_y, axis=2).reshape(-1), np.argmax(preds_f, axis=2).reshape(-1), average = 'macro') # need to get the class with the biggest probability
print(f'Training fold {n_fold + 1} completed. macro f1 score : {f1_score_ :1.5f}')
preds_f = preds_f.reshape(-1, preds_f.shape[-1])
oof_[val_orig_idx,:] += preds_f
te_preds = model.predict(test)
te_preds = te_preds.reshape(-1, te_preds.shape[-1])
preds_ += te_preds / _splits
# calculate the oof macro f1_score
f1_score_ = f1_score(np.argmax(train_tr, axis = 2).reshape(-1), np.argmax(oof_, axis = 1), average = 'macro') # axis 2 for the 3 Dimension array and axis 1 for the 2 Domension Array (extracting the best class)
print(f'Training completed. oof macro f1 score : {f1_score_:1.5f}')
# submission
save_path = f'{DIR_OUTPUT}submission_nb{NB}_cv_{f1_score_:.4f}.csv'
print(f'submission save path: {save_path}')
sample_submission['open_channels'] = np.argmax(preds_, axis = 1).astype(int)
sample_submission.to_csv(save_path, index=False, float_format='%.4f')
# probas
save_path = f'{DIR_OUTPUT_IGNORE}probas_nb{NB}_cv_{f1_score_:.4f}'
print(f'probas save path: {save_path}')
np.savez_compressed(save_path, oof_, preds_)
return oof_
%%time
# this function run our entire program
def run_everything():
print(f'Reading Data Started...({time.ctime()})')
train, test, sample_submission = read_data()
train, test = normalize(train, test)
print(f'Reading and Normalizing Data Completed')
print(f'Creating Features({time.ctime()})')
print(f'Feature Engineering Started...')
train = run_feat_engineering(train, batch_size = GROUP_BATCH_SIZE)
test = run_feat_engineering(test, batch_size = GROUP_BATCH_SIZE)
train, test, features = feature_selection(train, test)
print(f'Feature Engineering Completed...')
print(f'Training Wavenet model with {SPLITS} folds of GroupKFold Started...({time.ctime()})')
oof_ = run_cv_model_by_batch(train, test, SPLITS, 'group', features, sample_submission, EPOCHS, NNBATCHSIZE)
print(f'Training completed...')
return oof_
oof_ = run_everything()
###Output
Reading Data Started...(Mon May 11 10:59:45 2020)
###Markdown
analysis
###Code
df_tr = pd.read_csv(PATH_TRAIN)
batch_list = []
for n in range(10):
batchs = np.ones(500000)*n
batch_list.append(batchs.astype(int))
batch_list = np.hstack(batch_list)
df_tr['batch'] = batch_list
# group 特徴量を作成
group = group_feat_train(df_tr)
df_tr = pd.concat([df_tr, group], axis=1)
y = df_tr['open_channels'].values
oof = np.argmax(oof_, axis=1).astype(int)
for group in sorted(df_tr['group'].unique()):
idxs = df_tr['group'] == group
oof_grp = oof[idxs].astype(int)
y_grp = y[idxs]
print(f'group_score({group}): {f1_score(y_grp, oof_grp, average="micro"):4f}')
max_ = df_tr.loc[df_tr['batch']==3]['signal'].max()
min_ = df_tr.loc[df_tr['batch']==3]['signal'].min()
idxs = (df_tr['batch']==7) & (df_tr['signal']>max_)
df_tr['signal'][idxs] = max_
idxs = (df_tr['batch']==7) & (df_tr['signal']<min_)
df_tr['signal'][idxs] = min_
###Output
_____no_output_____
###Markdown
可視化
###Code
x_idx = np.arange(len(df_tr))
idxs = y != oof
failed = np.zeros(len(df_tr))
failed[idxs] = 1
n = 200
b = np.ones(n)/n
failed_move = np.convolve(failed, b, mode='same')
fig, axs = plt.subplots(2, 1, figsize=(20, 6))
axs = axs.ravel()
# fig = plt.figure(figsize=(20, 3))
for i_gr, group in enumerate(sorted(df_tr['group'].unique())):
idxs = df_tr['group'] == group
axs[0].plot(np.arange(len(df_tr))[idxs], df_tr['signal'].values[idxs], color=cp[i_gr], label=f'group={group}')
for x in range(10):
axs[0].axvline(x*500000 + 500000, color='gray')
axs[0].text(x*500000 + 250000, 5, x)
axs[0].plot(x_idx, failed_move*10, '.', color='black', label='failed_mv')
axs[0].set_xlim(0, 5500000)
axs[0].legend()
axs[1].plot(x_idx, y)
axs[1].set_xlim(0, 5500000)
# fig.legend()
###Output
_____no_output_____ |
python-note/python_notebooks-master/modules/logistic_regression_classifier.ipynb | ###Markdown
Logistic Regression Classifier**Logistic regression** helps prove the relationship between input/output variables.- input variables: independent- output variables: dependentDependent variable are restricted to a fixed set of values which involve classification classes.A _logistic function_ is used to estimate probabilities between input/output variables in order to extablish a relationship. Which is a _sigmoid curve_ used to incorporate various parameters. Relative to generalized linear model analysis, where a line is fitted a series of points in order to minimize erroneous results. So _logistic_ regression is used instead of _linear_. Logistic regression facilitates classification, even though it is not a classification technique. And common to machine learning for its simplicity.With [tkinter](https://docs.python.org/2/library/tkinter.html) installed, logistic regression will be used to visualize a classifier below: Import
###Code
import numpy as np
from sklearn import linear_model as lm
import matplotlib.pyplot as plt
from utilities import visualize_classifier as vc
###Output
_____no_output_____
###Markdown
data arrayAssign `numpy` arrays to two variables:
###Code
X = np.array([[3.1, 7.2], [4, 6.7], [2.9, 8], [5.1, 4.5], [6, 5], [5.6, 5], [3.3, 0.4], [3.9, 0.9], [2.8, 1], [0.5, 3.4], [1, 4], [0.6, 4.9]])
y = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])
###Output
_____no_output_____
###Markdown
create classifier
###Code
c = lm.LogisticRegression(solver = 'liblinear', C = 1)
###Output
_____no_output_____
###Markdown
train classifier
###Code
c.fit(X, y)
###Output
_____no_output_____
###Markdown
visualize classifier
###Code
vc(c, X, y)
###Output
_____no_output_____ |
.ipynb_checkpoints/exercise_210-checkpoint.ipynb | ###Markdown
A Simple Neural NetworkIt is easy to create a single-layer neural network using pytorch.The outputs of the neural network are merely linear combinations of the inputs.
###Code
import torch
inputs = torch.Tensor([[1, 2]])
print("The inputs are:")
print(inputs)
###Output
The inputs are:
1 2
[torch.FloatTensor of size 1x2]
###Markdown
We've used horizontal matrices for the input and output vectors (matrices with 1 row).So, you have to multiply the inputs by the weights using $inputs * weights$ instead of $weights * inputs$.
###Code
weights = torch.Tensor([[3, 7],[4, 1]])
print("The weights are:")
print(weights)
bias = torch.Tensor([[0.5, 0.3]])
print("The biases are:")
print(bias)
###Output
The weights are:
3 7
4 1
[torch.FloatTensor of size 2x2]
The biases are:
0.5000 0.3000
[torch.FloatTensor of size 1x2]
###Markdown
The if the input is $f$ and the output is $c$, then $c = f*W + b$
###Code
outputs = inputs.mm(weights) + bias
print("The outputs are:")
print(outputs)
###Output
The outputs are:
11.5000 9.3000
[torch.FloatTensor of size 1x2]
|
paper/Advection_diffusion/AD_artificial/Loop_noise_only.ipynb | ###Markdown
2D Advection-Diffusion equation in this notebook we provide a simple example of the DeepMoD algorithm and apply it on the 2D advection-diffusion equation.
###Code
# General imports
import numpy as np
import torch
# DeepMoD functions
from deepymod import DeepMoD
from deepymod.model.func_approx import NN
from deepymod.model.library import Library2D_third
from deepymod.model.constraint import LeastSquares
from deepymod.model.sparse_estimators import Threshold,PDEFIND
from deepymod.training import train
from deepymod.training.sparsity_scheduler import TrainTestPeriodic
from scipy.io import loadmat
# Settings for reproducibility
np.random.seed(1)
torch.manual_seed(1)
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
###Output
_____no_output_____
###Markdown
Prepare the data Next, we prepare the dataset.
###Code
data = loadmat('Diffusion_2D_space41.mat')
data = np.real(data['Expression1']).reshape((41,41,41,4))[:,:,:,3]
x_dim, y_dim, t_dim = data.shape
time_range = [0.01,0.02,0.05,0.1,0.15,0.2,0.3,0.5,0.75,1,1.5,2,3,4]
for i in time_range:
# Downsample data and prepare data without noise:
down_data= np.take(np.take(np.take(data,np.arange(0,x_dim,4),axis=0),np.arange(0,y_dim,4),axis=1),np.arange(0,t_dim,3),axis=2)
print("Dowmsampled shape:",down_data.shape, "Total number of data points:", np.product(down_data.shape))
index = len(np.arange(0,t_dim,i))
width, width_2, steps = down_data.shape
x_arr, y_arr, t_arr = np.linspace(0,1,width), np.linspace(0,1,width_2), np.linspace(0,1,steps)
x_grid, y_grid, t_grid = np.meshgrid(x_arr, y_arr, t_arr, indexing='ij')
X, y = np.transpose((t_grid.flatten(), x_grid.flatten(), y_grid.flatten())), np.float32(down_data.reshape((down_data.size, 1)))
# Add noise
noise_level = i
y_noisy = y + noise_level * np.std(y) * np.random.randn(y.size, 1)
# Randomize data
idx = np.random.permutation(y.shape[0])
X_train = torch.tensor(X[idx, :], dtype=torch.float32, requires_grad=True).to(device)
y_train = torch.tensor(y_noisy[idx, :], dtype=torch.float32).to(device)
# Configure DeepMoD
network = NN(3, [40, 40, 40, 40], 1)
library = Library2D_third(poly_order=0)
estimator = Threshold(0.05)
sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=200, delta=1e-5)
constraint = LeastSquares()
model = DeepMoD(network, library, estimator, constraint).to(device)
optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=2e-3)
logdir='final_runs/Noise_runs_11_11_14/'+str(i)+'/'
train(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir=logdir, split=0.8, max_iterations=50000, delta=1e-6, patience=200)
###Output
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 4.52e-05 Reg: 6.02e-06 L1: 1.01e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 2.57e-05 Reg: 2.99e-06 L1: 1.04e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 2.85e-05 Reg: 3.00e-06 L1: 1.25e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 3.23e-05 Reg: 4.60e-06 L1: 1.01e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 5.06e-05 Reg: 2.60e-06 L1: 1.20e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49025 MSE: 1.60e-04 Reg: 1.06e-05 L1: 1.00e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 1.34e-04 Reg: 5.44e-06 L1: 1.82e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 3.33e-04 Reg: 3.09e-06 L1: 1.27e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 7.10e-04 Reg: 1.01e-05 L1: 1.19e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 1.35e-03 Reg: 1.02e-05 L1: 1.24e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 2.17e-03 Reg: 5.04e-05 L1: 1.26e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 5.47e-03 Reg: 1.96e-04 L1: 1.03e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 1.52e-03 Reg: 2.82e-06 L1: 1.28e+00 Algorithm converged. Writing model to disk.
Dowmsampled shape: (11, 11, 14) Total number of data points: 1694
49975 MSE: 1.72e-02 Reg: 2.26e-04 L1: 5.34e+00 Algorithm converged. Writing model to disk.
|
2019/code/04-dqn.ipynb | ###Markdown
Q-learningThis notebook will guide you through implementation of vanilla Q-learning algorithm.You need to implement QLearningAgent (follow instructions for each method) and use it on a number of tests below.
###Code
# In google collab, uncomment this:
# !wget https://bit.ly/2FMJP5K -q -O setup.py
# !bash setup.py 2>&1 1>stdout.log | tee stderr.log
# This code creates a virtual display to draw game images on.
# If you are running locally, just ignore it
# import os
# if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
# !bash ../xvfb start
# %env DISPLAY = : 1
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
%%writefile qlearning.py
from collections import defaultdict
import random
import math
import numpy as np
class QLearningAgent:
def __init__(self, alpha, epsilon, discount, get_legal_actions):
"""
Q-Learning Agent
based on https://inst.eecs.berkeley.edu/~cs188/sp19/projects.html
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate aka gamma)
Functions you should use
- self.get_legal_actions(state) {state, hashable -> list of actions, each is hashable}
which returns legal actions for a state
- self.get_qvalue(state,action)
which returns Q(state,action)
- self.set_qvalue(state,action,value)
which sets Q(state,action) := value
!!!Important!!!
Note: please avoid using self._qValues directly.
There's a special self.get_qvalue/set_qvalue for that.
"""
self.get_legal_actions = get_legal_actions
self._qvalues = defaultdict(lambda: defaultdict(lambda: 0))
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def get_qvalue(self, state, action):
""" Returns Q(state,action) """
return self._qvalues[state][action]
def set_qvalue(self, state, action, value):
""" Sets the Qvalue for [state,action] to the given value """
self._qvalues[state][action] = value
#---------------------START OF YOUR CODE---------------------#
def get_value(self, state):
"""
Compute your agent's estimate of V(s) using current q-values
V(s) = max_over_action Q(state,action) over possible actions.
Note: please take into account that q-values can be negative.
"""
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return 0.0
if len(possible_actions) == 0:
return 0.0
<YOUR CODE HERE >
return value
def update(self, state, action, reward, next_state):
"""
You should do your Q-Value update here:
Q(s,a) := (1 - alpha) * Q(s,a) + alpha * (r + gamma * V(s'))
"""
# agent parameters
gamma = self.discount
learning_rate = self.alpha
<YOUR CODE HERE >
self.set_qvalue(state, action, < YOUR_QVALUE > )
def get_best_action(self, state):
"""
Compute the best action to take in a state (using current q-values).
"""
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return None
if len(possible_actions) == 0:
return None
<YOUR CODE HERE >
return best_action
def get_action(self, state):
"""
Compute the action to take in the current state, including exploration.
With probability self.epsilon, we should take a random action.
otherwise - the best policy action (self.get_best_action).
Note: To pick randomly from a list, use random.choice(list).
To pick True or False with a given probablity, generate uniform number in [0, 1]
and compare it with your probability
"""
# Pick Action
possible_actions = self.get_legal_actions(state)
action = None
# If there are no legal actions, return None
if len(possible_actions) == 0:
return None
# agent parameters:
epsilon = self.epsilon
<YOUR CODE HERE >
return chosen_action
###Output
_____no_output_____
###Markdown
Try it on taxiHere we use the qlearning agent on taxi env from openai gym.You will need to insert a few agent functions here.
###Code
import gym
env = gym.make("Taxi-v2")
n_actions = env.action_space.n
from qlearning import QLearningAgent
agent = QLearningAgent(
alpha=0.5,
epsilon=0.25,
discount=0.99,
get_legal_actions=lambda s: range(n_actions))
def play_and_train(env, agent, t_max=10**4):
"""
This function should
- run a full game, actions given by agent's e-greedy policy
- train agent using agent.update(...) whenever it is possible
- return total reward
"""
total_reward = 0.0
s = env.reset()
for t in range(t_max):
# get agent to pick action given state s.
a = <YOUR CODE >
next_s, r, done, _ = env.step(a)
# train (update) agent for state s
<YOUR CODE HERE >
s = next_s
total_reward += r
if done:
break
return total_reward
from IPython.display import clear_output
rewards = []
for i in range(1000):
rewards.append(play_and_train(env, agent))
agent.epsilon *= 0.99
if i % 100 == 0:
clear_output(True)
print('eps =', agent.epsilon, 'mean reward =', np.mean(rewards[-10:]))
plt.plot(rewards)
plt.show()
###Output
_____no_output_____
###Markdown
Binarized state spacesUse agent to train efficiently on CartPole-v0.This environment has a continuous set of possible states, so you will have to group them into bins somehow.The simplest way is to use `round(x,n_digits)` (or numpy round) to round real number to a given amount of digits.The tricky part is to get the n_digits right for each state to train effectively.Note that you don't need to convert state to integers, but to __tuples__ of any kind of values.
###Code
env = gym.make("CartPole-v0")
n_actions = env.action_space.n
print("first state:%s" % (env.reset()))
plt.imshow(env.render('rgb_array'))
###Output
_____no_output_____
###Markdown
Play a few gamesWe need to estimate observation distributions. To do so, we'll play a few games and record all states.
###Code
all_states = []
for _ in range(1000):
all_states.append(env.reset())
done = False
while not done:
s, r, done, _ = env.step(env.action_space.sample())
all_states.append(s)
if done:
break
all_states = np.array(all_states)
for obs_i in range(env.observation_space.shape[0]):
plt.hist(all_states[:, obs_i], bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
Binarize environment
###Code
from gym.core import ObservationWrapper
class Binarizer(ObservationWrapper):
def observation(self, state):
# state = <round state to some amount digits.>
# hint: you can do that with round(x,n_digits)
# you will need to pick a different n_digits for each dimension
return tuple(state)
env = Binarizer(gym.make("CartPole-v0"))
all_states = []
for _ in range(1000):
all_states.append(env.reset())
done = False
while not done:
s, r, done, _ = env.step(env.action_space.sample())
all_states.append(s)
if done:
break
all_states = np.array(all_states)
for obs_i in range(env.observation_space.shape[0]):
plt.hist(all_states[:, obs_i], bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
Learn binarized policyNow let's train a policy that uses binarized state space.__Tips:__ * If your binarization is too coarse, your agent may fail to find optimal policy. In that case, change binarization. * If your binarization is too fine-grained, your agent will take much longer than 1000 steps to converge. You can either increase number of iterations and decrease epsilon decay or change binarization.* Having 10^3 ~ 10^4 distinct states is recommended (`len(QLearningAgent._qvalues)`), but not required.* A reasonable agent should get to an average reward of >=50.
###Code
agent = QLearningAgent(
alpha=0.5,
epsilon=0.25,
discount=0.99,
get_legal_actions=lambda s: range(n_actions))
rewards = []
for i in range(1000):
rewards.append(play_and_train(env, agent))
# OPTIONAL YOUR CODE: adjust epsilon
if i % 100 == 0:
clear_output(True)
print('eps =', agent.epsilon, 'mean reward =', np.mean(rewards[-10:]))
plt.plot(rewards)
plt.show()
###Output
_____no_output_____ |
notebooks/old_notebooks/datasets_showcase.ipynb | ###Markdown
1-D Three-Region Dataset
###Code
X_train, Y_train, X_val, Y_val, C, R = nregion.load_data(regions=10)
nregion.graph(X_train, Y_train, size = 30)
nregion.graph(X_val, Y_val, size = 30)
X_train.shape
###Output
_____no_output_____
###Markdown
1-D Random Dataset
###Code
X_train, Y_train, X_val, Y_val = nregion.load_random_data(points=100)
nregion.graph(X_train, Y_train, size = 10)
nregion.graph(X_val, Y_val, size = 10)
###Output
_____no_output_____
###Markdown
1-D Random Regions
###Code
X_train, Y_train, X_val, Y_val, C, R = nregion.load_random_regions(regions=9, validation = 0.20, points=50)
nregion.graph(X_train, Y_train, size = 10)
nregion.graph(X_val, Y_val, size = 10)
###Output
_____no_output_____
###Markdown
2D Blobs Dataset
###Code
X, Y = blobs.load_data(size=100)
blobs.graph(X,Y)
###Output
_____no_output_____
###Markdown
2D Circles Dataset
###Code
X, Y = circle.load_data(size=100, factor = 4)
circle.graph(X,Y)
###Output
_____no_output_____ |
notebooks/Introducing Spectacle.ipynb | ###Markdown
Introducing SpectacleSpectacle is an automated line finding and analysis package written in Python. Its primary use revolves around generating spectral models either explicitly, or as a reduction of input spectral data.- GitHub: https://github.com/misty-pipeline/spectacle- Docs: https://spectacle-py.rtfd.io Basic model generationSpectacle provides an easy way to define spectral lines, characterized by their column densities, doppler b parameters, and their offsets. These are added to a spectral model object, containing methods with which the spectrum can be altered (e.g. LSFs, redshift, etc).Ion information like oscillator strength and gamma values are determined by an internal lookup table which Spectacle references by finding the closest matching $\lambda_0$ or ion name defined in the table.
###Code
# Imports
from spectacle.modeling import Spectral1D, OpticalDepth1D
import astropy.units as u
import numpy as np
import matplotlib.pyplot as plt
# Ignore warnings for the notebook demonstration
import warnings
warnings.filterwarnings('ignore')
# Suppress info messages for the notebook demonstration
import logging
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
%matplotlib inline
plt.rcParams["figure.figsize"] = [10, 4]
###Output
_____no_output_____
###Markdown
We go ahead and define some ions we wish to use in the spectral model. Note that in the first case, the ion is defined by its $\lambda_0$ value. In the second, we use its lookup table name.
###Code
line1 = OpticalDepth1D(lambda_0=1216 * u.AA, v_doppler=20 * u.km/u.s, column_density=13.5)
line2 = OpticalDepth1D("OVI1038", v_doppler=30 * u.km/u.s, column_density=14)
###Output
_____no_output_____
###Markdown
Spectral models are created with the `Spectral1D` class. We pass in the lines we defined above. Alternatively, we could have defined the lines as a list of names or $\lambda_0$ values and passed them in as the first argument. In that case, the parameters of the individual lines would be implicitly set to the internal defaults.The `Spectral1D` class also handles the input of the redshift at which the output flux values will be shown. Likewise, it is also here that we define the continuum to use for the spectrum. The continuum can be a numerical value, or an Astropy model.The `output` keyword determines whether the resulting data from evaluating the spectral model is flux, flux decrement, or optical depth. Here, we will just use `flux`. The `rest_wavelength` keyword argument is used for cases where multiple ions are given and the input dispersion is in velocity space. This is not necessary if the dispersion is given in wavelength space.Note also that we query the ion lookup table to get the exact value of `HI1216`, users can of course input a specific quantity themselves.
###Code
from spectacle.registries import line_registry
rest_wave = line_registry.with_name("HI1216")['wave']
spec_mod = Spectral1D([line1, line2], z=0, continuum=1, output='flux', rest_wavelength=rest_wave)
###Output
_____no_output_____
###Markdown
Now that we've defined our spectral model, we need only plot it. To do so, we must pass it a dispersion in either wavelength or velocity space. Note that the dispersion must be an Astropy quantity, with proper units.
###Code
# Define dispersion in wavelength space
wav = np.linspace(1000, 1300, 1000) * u.Angstrom
f, ax = plt.subplots()
ax.step(wav, spec_mod(wav))
###Output
_____no_output_____
###Markdown
We can view several lines of the same ion in velocity space. Let's add a new line to the spectral model and focus just on `HI1216`. We'll giveit a `\Delta v` so that the two `HI1216` lines don't appear on top of each other.
###Code
line3 = OpticalDepth1D("HI1216", v_doppler=20 * u.km/u.s, column_density=13, delta_v=40 * u.km/u.s)
spec_mod = spec_mod.with_line(line3)
# Define the dispersion in wavelength space
vel = np.linspace(-100, 100, 100) * u.km/u.s
f, ax = plt.subplots()
ax.step(vel, spec_mod(vel))
###Output
_____no_output_____
###Markdown
We can check out our `OVI1038` line in velocity space as well, by adjusting the `rest_wavelength` on the spectral model.
###Code
new_rest_wave = line_registry.with_name("OVI1038")['wave']
spec_mod.rest_wavelength = new_rest_wave
f, ax = plt.subplots()
ax.step(vel, spec_mod(vel))
###Output
_____no_output_____
###Markdown
Extending the spectral modelThe spectral model can be extended in many ways. Below, we show the effect of adding both the HST COS LSF as well as a Gaussian LSF with $sigma = 10$.
###Code
# Let's see the effect of the LSFs on the HI1216 lines
spec_mod.rest_wavelength = rest_wave
spec_mod_cos_lsf = spec_mod.with_lsf('cos')
spec_mod_gauss_lsf = spec_mod.with_lsf('gaussian', stddev=10)
vel = np.linspace(-75, 75, 150) * u.km/u.s
flux = spec_mod(vel)
f, ax = plt.subplots()
ax.step(vel, flux, label="No LSF")
ax.step(vel, spec_mod_cos_lsf(vel), label="COS LSF")
ax.step(vel, spec_mod_gauss_lsf(vel), label="Gaussian LSF ($\sigma = 10$)")
plt.legend()
###Output
_____no_output_____
###Markdown
We can also set the spectral model to output values to a particular redshift by setting the `z` argument in the `Spectral1D` class, or by calling the `with_redshift` method on an already created one. We'll focus just on the `OVI` line so that the effect of higher redshifts is more obvious.
###Code
f, ax = plt.subplots(3, 1)
for i, z in enumerate(np.linspace(0.0, 1, 3)):
new_mod = spec_mod.with_redshift(z)
# Adjust the bounds of the dispersion to encompass the new redshift range
xmin = new_mod.lines[1].lambda_0.value / (1/(1 + z)) - 0.5
xmax = new_mod.lines[1].lambda_0.value / (1/(1 + z)) + 0.5
w = np.linspace(xmin, xmax, 100) * u.AA
ax[i].step(w, new_mod(w), label="$z={}$".format(z))
ax[i].legend()
f.set_size_inches(8,8)
f.tight_layout()
###Output
_____no_output_____
###Markdown
Generating models from dataSpectacle can also generate models automatically from input spectral data. This process uses a series of derivatives to find peaks and troughs, as well as bounding information, for each feature present in the spectrum. It then generates a line profile for each identified feature and constructs the `Spectral1D` model automatically.We'll use line parameters from KODIAQ data to construct a spectal model that will serve to generate our initial raw data.
###Code
# Retrieve the data
from astropy.io import ascii
si4k = ascii.read("https://www.dropbox.com/s/sr24x6tdsagj6sn/tabfitsiiv.txt?dl=1")
si4k_los = si4k.group_by('LOS')
this_los = si4k_los.groups[0]
lines = []
for comp in range(len(this_los)):
line_params = {
'name': "SiIV1394",
'delta_v': this_los['dv'][comp] * u.Unit('km/s'),
'column_density': this_los['col'][comp],
'v_doppler': this_los['b'][comp] * u.Unit('km/s')
}
lines.append(OpticalDepth1D(**line_params))
spectrum = Spectral1D(lines, output='flux', continuum=1)
vel = np.arange(-200, 75, 0.5) * u.Unit('km/s')
flux = spectrum(vel)
f, ax = plt.subplots()
ax.step(vel, flux)
ax.set_title("Raw KODIAQ Data")
###Output
_____no_output_____
###Markdown
Now that we have our raw KODIAQ data, we can setup our line finder to help us automatically recover the lines of the spectrum. The `LineFinder1D` class accepts a list of ions that represent a subset of the ion lookup table. This tells the line finder that potential lines can only be one of these ions. This is useful for finding lines in wavelength space, where several different ions may be present. The line finder will find the ion information closest to the centroid of the feature from the lookup table subset.In our case, the entire spectrum is composed of `SiVI1394` and we're doing the line finding in velocity space, so we don't need to worry about that.The `auto_fit` keyword tells the line finder to automatically run a Levenberg-Marquart LSQ fitter on the resulting spectral model. The `continuum` keyword is the same as the one we use when constructing a `Spectral1D` object. The `output` keyword is similar to that of the `Spectral1D` object and tells the line finder that the input data is expected to be a flux/flux decrement/optical depth value. The `threshold` tells the line finding routine how far beyond the continuum to start considering things absorption features.Many fitting
###Code
from spectacle.fitting import LineFinder1D
line_finder = LineFinder1D(ions=["SiIV1394"], auto_fit=True, continuum=1, output='flux', threshold=0.05)
result_spec_mod = line_finder(vel, flux)
f, ax = plt.subplots()
ax.step(vel, flux, label='Raw Data')
ax.step(vel, result_spec_mod(vel), linestyle='--', label='Fitted Model')
ax.legend()
###Output
_____no_output_____
###Markdown
As we can see, the line finder did a pretty good job recoverying the absorption features of the spectrum. We can show the individual sub models representing each line to be sure that the correct number of features were identified.
###Code
f, ax = plt.subplots()
ax.step(vel, result_spec_mod(vel), color='tab:orange', label='Fitted Model')
for line in result_spec_mod.lines:
# The `reset` keyword means that the internal list of lists is dumped
# and repopulated by the given line.
line_mod = result_spec_mod.with_line(line, reset=True)
ax.step(vel, line_mod(vel), linestyle='--', color='k', alpha=0.5)
ax.axvline(line_mod.lines[0].delta_v.value, 0.9, 1.05)
ax.legend()
### Retrieving fit uncertainties
Both the LSQ `CurveFitter` and the MCMC `EmceeFitter` implemented in Spectacle contain an `unceratinti
line_finder.fitter.uncertainties
###Output
_____no_output_____
###Markdown
Line and region statisticsWe can also retrieve statistical information for the individual lines and the blended regions of the spectral model. For both lines and regions, three statistics are calculated: the equivalent width, the velocity widget at 90% maximum, and the full-width at half-maximum.The `line_stats` method takes a dispersion to use for calculating the statistics, and returns a table containing each individual line, their profile parameters, and the resulting statistics.
###Code
# Individual line statistics
line_stats = result_spec_mod.line_stats(vel)
line_stats
###Output
_____no_output_____
###Markdown
The `region_stats` also take a dispersion array, as well as a rest wavelength value `rest_wavelength`. This is used to do conversions of the dispersion array between wavelength and velocity space. An additional parameter can be provided, `abs_tol`, which dictates to the region finder the threshold above the continuum at which a region can be defined.
###Code
# Region line statistics
reg_stats = result_spec_mod.region_stats(vel, rest_wavelength=1393.755 * u.Angstrom, abs_tol=0.05)
reg_stats
###Output
_____no_output_____
###Markdown
We can plot the region information to get a better idea of what each found region encompasses.
###Code
f, ax = plt.subplots()
ax.step(vel, flux)
clrs = ['tab:olive', 'tab:orange']
for i, row in enumerate(reg_stats):
ax.fill_between(vel[(vel.value >= row['region_start'].value) & (vel.value <= row['region_end'].value)],
-0.1, 1.5, color=clrs[i], alpha=0.25, label='Region {}'.format(i + 1))
ax.set_ylim(-0.05, 1.05)
ax.legend()
###Output
_____no_output_____ |
codici/old/loss.ipynb | ###Markdown
Rischio e minimizzazioneDato un qualunque algoritmo che fornisce per ogni valore di input $x$ una previsione $f(x)$, la qualità delle previsioni fornite dall'algoritmo può essere definita per mezzo di una *funzione di costo* (loss function) $L(x_1, x_2)$, dove $x_1$ è il valore predetto dal modello e $x_2$ è il valore corretto associato a $x$ . Sostanzialmente, il valore della funzione di costo $L(f(x),y)$ misura quindi quanto "costa" (secondo il modello di costo indotto dalla funzione stessa) prevedere, dato $x$, il valore $f(x)$ invece del valore corretto $y$.Dato che evidentemente il costo è dipendente dalla coppia di valori $x,y$, una valutazione complessiva della qualità delle predizioni dell'algoritmo potrà essere fornita considerando il valore atteso della funzione di costo al variare di $x$ e $y$, nell'ipotesi di una (densità di) distribuzione di probabilità congiunta di tali valori $p(x,y)$. La distribuzione $p(x,y)$ ci fornisce quindi la probabilità che il prossimo punto su cui effettuare la predizione sia $x$ e che il valore corretto da predire sia $y$. Si noti che non si fa l'ipotesi che due diverse occorrenze di $x$ siano associate allo stesso valore di $y$: non si assume quindi una relazione funzionale, seppure sconosciuta, tra $x$ e $y$, ma solo una relazione in probabilità $p(y\mid x)$. Questo permette di considerare la presenza di rumore nelle osservazioni effettuate.Da quanto detto, indicando con $D_x$ e $D_y$ i domini di definizione di $x$ e $y$, e assunta una distribuzione $p(x,y)$ che fornisce un modello statistico del contesto in cui si intende effettuare le predizioni, la qualità di un algoritmo di previsione che calcola la funzione $f(x)$ sarà data dal *rischio*$$\mathcal{R}(f)=\mathbb{E}_p[L(f(x),y)]=\int_{D_x}\int_{D_y} L(f(x),y)p(x,y)dxdy$$Il rischio di dice quindi quanto ci aspettiamo che ci costi prevedere $f(x)$, assumendo che:1. $x$ sia estratto a caso dalla distribuzione marginale $$ p(x)=\int_{D_y} p(x,y)dy $$2. il relativo valore corretto da predire sia estratto a caso dalla distribuzione condizionata $$p(y\mid x)=\frac{p(x,y)}{p(x)}$$3. il costo sia rappresentato dalla funzione $L(x_1,x_2)$ Esempio Consideriamo il caso in cui vogliamo effettuare previsioni sulla possibilità di pioggia in giornata, date le condizioni del cielo al mattino, assumendo che le possibili osservazioni siano "sereno" (S), "nuvoloso" (N), "coperto" (C), e che le previsioni siano "pioggia" (T) e "non pioggia" (F). La funzione di costo, sarà allora del tipo $L:\{T,F\}^2\mapsto\mathbb{R}$La definizione di una particolare funzione di costo è legata alla valutazione delle priorità dell'utente. Nel caso specifico, se si valuta allo stesso modo "sgradevole" uscire con l'ombrello (per una previsione T) senza poi doverlo usare che bagnarsi per la pioggia non avendo preso l'ombrello (per una previsione F) allora la funzione di costo risulta $L_1(x_1,x_2)$, definita dalla tabella seguente| $x_1$/$x_2$ | T | F || :---------: | :--: | :--: || T | 0 | 1 || F | 1 | 0 |Se invece reputiamo molto più sgradevole bagnarci per non aver preso l'ombrello rispetto a prendere l'ombrello stesso inutilmente, allora la funzione di costo $L_2(x_1,x_2)$, potrà essere definita come| $x_1$/$x_2$ | T | F || :---------: | :--: | :--: || T | 0 | 1 || F | 25 | 0 |Se facciamo l'ipotesi che la distribuzione congiunta su $\{S,N,C\}\times\{T,F\}$ sia | $x$/$y$ | T | F || :-----: | :--: | :--: || S | .05 | .2 || N | .25 | .25 || C | .2 | .05 |e consideriamo due possibili funzioni predittive $f_1(x)$ e $f_2(x)$| $x$ | $f_1(x)$ | $f_2(x)$ || :--: | :--------------------: | :------: || S | F | F || N | F | T || C | T | T |possiamo verificare che nel caso in cui la funzione di costo sia $L_1$ allora il rischio nei due casi è $\mathcal{R}(f_1)=0.65$ e $\mathcal{R}(f_2)=0.4$ per cui $f_2$ è preferibile a $f_1$. Al contrario, se la funzione di costo è $L_2$, allora risulta $\mathcal{R}(f_1)=1.55$ e $\mathcal{R}(f_2)=7.55$, per cui, al contrario, $f_1$ è preferibile a $f_2$.Come si vede, quindi, la scelta tra $f_1(x)$ e $f_2(x)$ è dipendente dalla funzione di costo adottata e dalla distribuzione $p(x,y)$ che invece è data e, tra l'altro, sconosciuta. Quindi, una diversa distribuzione potrebbe portare a conclusioni diverse anche considerando una stessa funzione di costo: se ad esempio si fa riferimento alla funzione di costo $L_1$, allora la distribuzione congiunta| $x$/$y$ | T | F || :-----: | :--: | :--: || S | .05 | .05 || N | .05 | .4 || C | .05 | .4 |determina dei valori di rischio $\mathcal{R}(f_1)=0.6$ e $\mathcal{R}(f_2)=0.9$, rendendo ora $f_1$ preferibile a $f_2$. Rischio empiricoDato che la distribuzione reale $p(x,y)$ è sconosciuta per ipotesi (se così non fosse potremmo sempre effettuare predizioni utilizzando la distribuzione condizionata reale $p(y\mid x)$) il calcolo del rischio reale è impossibile ed è necessario effettuare delle approssimazioni, sulla base dei dati disponibili. In particolare, possiamo applicare il metodo standard di utilizzando la media aritmetica su un campione come stimatore del valore atteso, e considerare il *rischio empirico* (empirical risk) calcolato effettuando l'operazione di media sul campione offerto dai dati disponibili nel training set $X=\{(x_1,y_1),\ldots,(x_n,y_n)\}$$$\overline{\mathcal{R}}(f; X)=\overline{L}(f(x), y; X)=\frac{1}{n}\sum_{i=1}^nL(f(x_i),y_i)$$La funzione utilizzata per le predizioni sarà allora quella che, nell'insieme di funzioni considerato, minimizza il rischio empirico$$f^*=\underset{f\in F}{\mathrm{argmin}}\;\overline{\mathcal{R}}(f;X)$$Si noti che, in effetti, il rischio empirico dipende sia dai dati in $X$ che dalla funzione $f$: in questo senso è una funzione rispetto a $X$ e un funzionale rispetto a $f$. La ricerca di $f^*$ comporta quindi una minimizzazione funzionale del rischio empirico. In generale, tale situazione viene semplificata limitando la ricerca all'interno di classi di funzioni definite da coefficienti: in questo modo, il rischio empirico può essere espresso come funzione dei coefficienti della funzione (oltre che di $X$) e la minimizzazione è una normale minimizzazione di funzione. Chiaramente, la speranza è che minimizzare il rischio empirico dia risultati simili a quelli che si otterrebbero minimizzando il rischio reale. Ciò dipende, in generale, da quattro fattori:- La dimensione del training set $X$. Al crescere della quantità di dati, $\overline{\mathcal{R}}(f; X)$ tende a $\mathcal{R}(f)$ per ogni funzione $f$- La distribuzione reale $p(x,y)$. Maggiore è la sua complessità, maggiore è la quantità di dati necessari per averne una buona approssimazione.- La funzione di costo $L$, che può creare problemi se assegna costi molto elevati in situazioni particolari e poco probabili- L'insieme $F$ delle funzioni considerate. Se la sua dimensione è elevata, e le funzioni hanno una struttura complessa, una maggior quantità di dati risulta necessaria per avere una buona approssimazione.Al tempo stesso, considerare un insieme piccolo di funzioni semplici rende sì la minimizzazione del rischio implicito su $F$ una buona approssimazione del minimo rischio reale su $F$ stesso, ma al tempo stesso comporta che tale minimo possa essere molto peggiore di quello ottenibile considerando classi più ampie di funzioni. Minimizzazione della funzione di rischio In generale, l'insieme $F$ delle funzioni è definito in modo parametrico $F=\{f(x;\theta)\}$ dove $\theta\in D_\theta$ è un coefficiente (tipicamente multidimensionale) che determina, all'interno della classe $F$ (definita tipicamente in termini ''strutturali'') la particolare funzione utilizzata. Un esempio tipico è offerto dalla *regressione lineare*, in cui si vuole prevedere il valore di un attributo $y$ con dominio $R$ sulla base dei valori di altri $m$ attributi $x_1,\ldots, x_m$ (che assumiamo per semplicità in $R$ anch'essi): nella regressione lineare, l'insieme delle possibili funzioni $f:R^m\mapsto R$ è limitato alle sole funzioni lineari $f_\mathbf{w}(x)=w_0+w_1x_1+\ldots+w_mx_m$, e il parametro $\theta$ corrisponde al vettore $\mathbf{w}=(w_0,\ldots,w_m)$ dei coefficienti.In questo caso, il rischio empirico, fissata la famiglia $F$ di funzioni, può essere ora inteso come funzione di $\theta$$$\overline{\mathcal{R}}(\theta; X)=\overline{L}(f(x;\theta), y; X)=\frac{1}{n}\sum_{i=1}^nL(f(x_i;\theta),y_i)\hspace{2cm}f\in F$$e la minimizzazione del rischio empirico può essere effettuata rispetto a $\theta$$$\theta^*=\underset{\theta\in D_\theta}{\mathrm{argmin}}\;\overline{\mathcal{R}}(\theta;X)$$da cui deriva la funzione ottima (nella famiglia $F$) $f^*=f(x;\theta^*)$la minimizzazione della funzione di rischio avrà luogo nel dominio di definizione $D_\theta$ di $\theta$, e potrà essere effettuata in modi diversi, in dipendenza della situazione e di considerazioni di efficienza di calcolo e di qualità delle soluzioni derivate. Ricerca analitica dell'ottimoSe il problema si pone in termini di minimizzazione *senza vincoli*, e quindi all'interno di $R^m$, un primo approccio è quello standard dell'analisi di funzioni, consistente nella ricerca di valori $\overline\theta$ di $\theta$ per i quali si annullano tutte le derivate parziali $\frac{\partial \overline{\mathcal{R}}(\theta; X)}{\partial \theta_i}$, tale cioè che, se indichiamo con $m$ la dimensione (numero delle componenti) di $\theta$, il sistema su $m$ incognite definito dalle $m$ equazioni $$\frac{\partial \overline{\mathcal{R}}(\theta; X)}{\partial \theta_i}\Bigr|_{\theta=\overline\theta}=0\hspace{2cm} i=1,\ldots,m$$risulta soddisfatto. La soluzione analitica di questo sistema risulta tipicamente ardua o impossibile, per cui vengono spesso adottate tecniche di tipo numerico. Gradient descentLa discesa del gradiente (*gradient descent*) è una delle tecniche di ottimizzazione più popolari, in particolare nel settore del Machine Learning e delle Reti Neurali. La tecnica consiste nel minimizzare una funzione obiettivo $J(\theta)$ definita sui parametri $\theta\in\mathbb{R}^d$ del modello mediante aggiornamenti successivi del valore di $\theta$ (a partire da un valore iniziale $\theta^{(0)}$) nella direzione opposta a quella del valore attuale del gradiente $J'(\theta)=\nabla J(\theta)$. Si ricorda, a tale proposito, che, data una funzione $f(x_1,x_2,\ldots,x_d)$, il gradiente $\nabla f$ di $f$ è il vettore $d$-dimensionale delle derivate di $f$ rispetto alle variabili $x_1,\ldots, x_d$: il vettore cioè tale che $[\nabla f]_i=\frac{\partial f}{\partial x_i}$. Un parametro $\eta$, detto *learning rate* determina la scala degli aggiornamenti effettuati, e quindi la dimensione dei passi effettuati nella direzione di un minimo locale.Possiamo interpretare la tecnica come il muoversi sulla superficie della funzione $J(\theta)$ seguendo sempre la direzione di massima pendenza verso il basso, fino a raggiungere un punto da cui è impossibile scendere ulteriormente. Varianti di discesa del gradienteIn molti casi, e sempre nell'ambito del ML, la funzione obiettivo corrisponde all'applicazione di una funzione di costo (*loss function*), predefinita e dipendente dal modello adottato, su un insieme dato di elementi di un dataset $X=(x_1,\ldots, x_n)$ (che nel caso di apprendimento supervisionato è un insieme di coppie $X=((x_1,t_1),\ldots,(x_n,t_n))$): rappresentiamo questa situazione con $J(\theta; X)$. Questo corrisponde all'approssimazione del *rischio* $$\mathcal{R}(\theta)=\int J(\theta,x)p(x)dx=E_{p}[\theta]$$In generale, la funzione di costo è definita in modo additivo rispetto agli elementi di $X$ (il costo relativo all'insieme $X$ è pari alla somma dei costi relativi ai suoi elementi), per cui il valore risulta $J(\theta;X)=\sum_{i=1}^nJ(\theta;x_i)$, o preferibilmente, per evitare una eccessiva dipendenza dal numero di elementi, come media $$J(\theta;X)=\frac{1}{n}\sum_{i=1}^nJ(\theta;x_i)$$ Si noti che, per le proprietà dell'operazione di derivazione, da questa ipotesi deriva l'additività anche del gradiente, per cui $$J'(\theta; X)=\sum_{i=1}^nJ'(\theta;x_i)$$ o $$J'(\theta;X)=\frac{1}{n}\sum_{i=1}^nJ'(\theta;x_i)$$Possiamo allora identificare tre varianti del metodo, che differiscono tra loro per la quantità di elementi di $X$ utilizzati, ad ogni passo, per calcolare il gradiente della funzione obiettivo. Una quantità maggiore di dati utilizzati aumenta l'accuratezza dell'aggiornamento, ma anche il tempo necessario per effettuare l'aggiornamento stesso (in particolare, per valutare il gradiente per il valore attuale di $\theta$). Batch gradient descentIn questo caso, il gradiente è valutato, ogni volta, considerando tutti gli elementi nel training set $X$. Quindi si ha che al passo $k$-esimo viene eseguito l'aggiornamento$$\theta^{(k+1)}=\theta^{(k)}-\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)$$ o anche, per i singoli coefficienti$$\theta_j^{(k+1)}=\theta_j^{(k)}-\eta\sum_{i=1}^n\frac{\partial J(\theta;x_i)}{\partial\theta_j}\Bigr\vert_{\small\theta=\theta^{(k)}}$$Dato che si richiede quindi, ad ogni iterazione, la valutazione del gradiente (con il valore attuale $\theta^{(k)}$ di tutti i coefficienti) su tutti gli elementi di $X$, questa soluzione tende ad essere molto lenta, soprattutto in presenza di dataset di dimensioni molto estese, come nel caso di reti neurali complesse e deep learning. Inoltre, l'approccio diventa del tutto impraticabile se il dataset è talmente esteso da non entrare neanche in memoria.In termini di codice, il metodo batch gradient descent si presenta come:```pythonfor i in range(n_epochs): g = 0 for k in range(dataset_size): g = g+evaluate_gradient(loss_function, theta, X[k]) theta = theta-eta*g```Il ciclo viene eseguito un numero di volte pari al numero di epoche, dove per *epoca* si intende una iterazione su tutti glielementi di $X$. Di conseguenza, la valutazione di $\theta$ viene aggiornata un numero di volte pari al numero di epoche. Ilmetodo batch gradient descent converge certamente al minimo globale se la funzione $J(\theta)$ è convessa, mentre altrimenticonverge a un minimo locale. EsempioApplichiamo le considerazioni a un semplice problema di classificazione su un dataset bidimensionale, riportato graficamente di seguito.
###Code
data = pd.read_csv("../dataset/testSet.txt", delim_whitespace=True, header=None, names=['x1','x2','t'])
plot_ds(data)
n = len(data)
nfeatures = len(data.columns)-1
X = np.array(data[['x1','x2']])
t = np.array(data['t']).reshape(-1,1)
X = np.column_stack((np.ones(n), X))
###Output
_____no_output_____
###Markdown
Il metodo considerato per la classificazione è la *logistic regression*, che determina un iperpiano (retta, in questo caso) di separazione minimizzando rispetto al vettore $\theta$ dei coefficienti dell'equazione dell'iperpiano (3 in questo caso) il rischio empirico sul dataset associato alla funzione di costo *cross-entropy*, per la quale il costo associato a un singolo elemento $x=(x_1,\ldots,x_d)$ è$$ J(\theta, x)=-\left(t\log y + (1-t)\log (1-y)\right) $$dove $t$ è il valore *target* è il valore $0/1$ della classe dell'elemento e $y\in (0,1)$ è il valore predetto dal modello, definito come $$y = \sigma(x) = \frac{1}{1+e^{-\sum_{i=1}^d\theta_ix_i+\theta_0}}$$
###Code
def sigma(theta, X):
return sp.expit(np.dot(X, theta))
###Output
_____no_output_____
###Markdown
Il rischio empirico associato all'intero dataset può essere allora definito come la corrispondente media$$J(\theta, X)=\frac{1}{n}\sum_{i=1}^n \left(t_i\log \sigma(x_i) -(1-t_i)\log (1-\sigma(x_i))\right)$$
###Code
def approx_zero(v):
eps = 1e-50
v[v<eps]=eps
return v
def cost(theta, X, t):
eps = 1e-50
v = sigma(theta,X)
v[v<eps]=eps
term1 = np.dot(np.log(v).T,t)
v = 1.0 - sigma(theta,X)
v[v<eps]=eps
term2 = np.dot(np.log(v).T,1-t)
return ((-term1 - term2) / len(X))[0]
###Output
_____no_output_____
###Markdown
Il gradiente della funzione di costo risulta allora pari a\begin{align*}\frac{\partial J(\theta,x)}{\partial\theta_i}&=-(t-\sigma(x))x_i\hspace{1cm}i=1,\ldots,d\\\frac{\partial J(\theta,x)}{\partial\theta_0}&=-(t-\sigma(x))\end{align*}e il corrispondente gradiente del rischio empirico è dato da \begin{align*}\frac{\partial J(\theta,X)}{\partial\theta_i}&=-\frac{1}{n}\sum_{j=1}^n (t_j-\sigma(x_j))x_{ji}\hspace{1cm}i=1,\ldots,d\\\frac{\partial J(\theta,X)}{\partial\theta_0}&=-\frac{1}{n}\sum_{i=1}^n(t_j-\sigma(x_j))\end{align*}
###Code
def gradient(theta, X, t):
return -np.dot(X.T, (t-sigma(theta, X))) / len(X)
###Output
_____no_output_____
###Markdown
Per quanto detto, una iterazione di BGD corrisponde agli aggiornamenti\begin{align*}\theta_j^{(k+1)}&=\theta_j^{(k)}-\eta\frac{\partial J(\theta,X)}{\partial\theta_j}{\LARGE\vert}_{\small\theta=\theta^{(k)}}=\theta_j^{(k)}+\frac{\eta}{n}\sum_{i=1}^n (t_i-\sigma(x_i))x_{ij}\hspace{1cm}j=1,\ldots,d\\\theta_0^{(k+1)}&=\theta_0^{(k)}-\eta\frac{\partial J(\theta,X)}{\partial\theta_0}{\LARGE\vert}_{\small\theta=\theta^{(k)}}=\theta_0^{(k)}+\frac{\eta}{n}\sum_{i=1}^n(t_i-\sigma(x_i))\end{align*}
###Code
def batch_gd(X, t, eta = 0.1, epochs = 10000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
theta = theta - eta * gradient(theta,X,t)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, theta_history, m, q
###Output
_____no_output_____
###Markdown
Applicando il metodo sul dataset, fissando un valore per il parametro $\eta$ e per il numero di epoche (dove una epoca corrisponde all'applicazione dell'iterazione su tutti gli elementi del dataset), otteniamo le sequenze dei costi e dei valori di coefficiente angolare e termine noto della retta di separazione.
###Code
cost_history, theta_history, m, q = batch_gd(X, t, eta = 0.1, epochs = 100000)
###Output
_____no_output_____
###Markdown
La convergenza regolare del metodo è evidente nella figura seguente, dove si mostrano un andamento tipico della funzione di costorispetto al numero di iterazioni e la sequenza di valori assunti da $\theta$, considerata bidimensionale.
###Code
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
m_star = 0.62595499
q_star = 7.3662299
f = lambda i: np.sqrt((m_star-m[i])**2+(q_star-q[i])**2)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
###Output
_____no_output_____
###Markdown
Di seguito, la retta di separazione risultante:
###Code
plot_ds(data,m[-1],q[-1])
###Output
_____no_output_____
###Markdown
Stochastic gradient descentNella stochastic gradient descent, a differenza del caso precedente, la valutazione del gradiente effettuata ad ogni iterazione fa riferimento a un solo elemento $x_i$ del training set. Quindi si ha $$\theta^{(k+1)}=\theta^{(k)}-\eta J'(\theta^{(k)};x_i)$$e, per i singoli coefficienti,$$\theta_j^{(k+1)}=\theta_j^{(k)}-\eta\frac{\partial J(\theta;x_i)}{\partial\theta_j}\LARGE\vert_{\small\theta=\theta^{(k)}}$$ La discesa del gradiente batch valuta il gradiente per tutti gli elementi, anche quelli simili tra loro, a ogni iterazione,eseguendo così un insieme ridondante di operazioni. SGD risolve questo problema effettuando una sola valutazione, e quindioperando in modo più veloce.Al tempo stesso, però, mentre i valori della funzione di costo nel caso di BGD decrescono con regolarità verso il minimo locale,applicando SGD si riscontra un andamento molto più irregolare, con fluttuazione della funzione di costo intorno a un trendcomplessivo di decrescita, ma con incrementi locali anche significativi. Questo da un lato può non risultare negativo, in quantole oscillazioni locali posso consentire di uscire dall'intorno di un minimo locale, proseguendo la ricerca di nuovi minimi. Altempo stesso, l'oscillazione locale rende difficile la convergenza finale verso il minimo.Questa oscillazione si riscontra anche nell'andamento dei valori dei coefficienti. Si noti comunque che, considerando la sequenza dei valori della funzione di costo assunti al termine di ogni *epoca* (sequenzadelle iterazioni che considerano tutti gli elementi del dataset), emerge la tendenza di decrescita di fondo. In termini di codice, il metodo stochastic gradient descent si presenta come:```pythonfor i in range(n_epochs): np.random.shuffle(data) for k in range(dataset_size): g = evaluate_gradient(loss_function, theta, X[k]) theta = theta-eta*g```Nel caso della logistic regression, l'aggiornamento a ogni iterazione risulta quindi\begin{align*}\theta_j^{(k+1)}&=\theta_j^{(k)}+\eta(t_i-\sigma(x_i))x_{ij}\hspace{1cm}j=1,\ldots,d\\\theta_0^{(k+1)}&=\theta_0^{(k)}+\eta(t_i-\sigma(x_i))\end{align*}
###Code
def stochastic_gd(X, t, eta = 0.01, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for j in range(epochs):
for i in range(n):
e = (t[i] - sigma(theta, X[i,:]))[0]
theta = theta + eta * e * X[i,:].reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, theta_history, m, q
###Output
_____no_output_____
###Markdown
Applicando il metodo è necessario ancora specificare il valore di $\theta$ e il numero di epoche. Per la struttura dell'algoritmo, si avranno allora un numero di iterazioni pari al numero di epoche moltiplicato per la dimensionae $n$ del dataset.
###Code
cost_history, theta_history, m, q = stochastic_gd(X, t, eta = 0.01, epochs = 10000)
low, high, step = 0*n, 150*n, 30
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
###Output
_____no_output_____
###Markdown
Come si può vedere dalla figura seguente, considerando i valori di costo e dei coefficienti soltanto alla fine delle varie epoche risulta una andamento uniforme dei valori stessi.
###Code
low, high, step = 0*n, 1000*n, n
plot_all(cost_history, m, q, low, high, step)
###Output
_____no_output_____
###Markdown
Mini-batch gradient descentQuesto approccio si pone in posizione intermedia rispetto ai due precedenti, generalizzando l'impostazione di SGD di considerare un solo elemento per iterazione a considerare sottoinsiemi diversi del dataset. L'algoritmo opera quindi partizionando, all'inizio di ogni epoca, il dataset in $\lceil n/s\rceil$ sottoinsiemi (*mini-batch*) di dimensione prefissata $s$, ed effettuando poi $\lceil n/s\rceil$ iterazioni all'interno di ognuna delle quali l'aggiornamento di $\theta$ viene effettuato valutando il gradiente sugli $s$ elementi del mini-batch attuale.La discesa del gradiente con mini-batch è l'algoritmo tipicamente utilizzato per l'addestramento di reti neurali, in particolare in presenza di reti *deep*.Se indichiamo con $X_i\subset X$ il mini-batch attualmente considerato, l'aggiornamento a ogni iterazione è il seguente$$\theta^{(k+1)}=\theta^{(k)}-\eta\sum_{x\in X_i}J'(\theta^{(k)};x)$$o anche$$\theta_j^{(k+1)}=\theta_j^{(k)}-\eta\sum_{x\in X_i}\frac{\partial J(\theta;x)}{\partial\theta_j}\LARGE\vert_{\small\theta=\theta^{(k)}}$$In questo modo, la varianza degli aggiornamenti dei coefficienti viene diminuita. Inoltre, è possibile fare uso, in pratica, di implementazioni molto efficienti del calcolo del gradiente rispetto a un mini-batch disponibili nelle più recenti librerie per il *deep learning*. La dimensione dei mini-batch varia tra $50$ e $256$.```pythonfor i in range(n_epochs): np.random.shuffle(data) for batch in get_batches(dataset, batch_size): g = 0 for x in batch: g = g+evaluate_gradient(loss_function, theta, batch) theta = theta-eta*g```Ne risulta un andamento oscillante sia della funzione di costo che dei valori stimati dei coefficienti. Chiaramente, l'oscillazione sarà tanto più marcata quanto minore è la dimensione dei mini-batch, e quindi quanto più si tende a SGD.Gli aggiornamenti nel caso della logistic regression derivano immediatamente da quanto sopra \begin{align*} \theta_j^{(k+1)}&=\theta_j^{(k)}+\eta\sum_{x_i\in MB}( t_i-y_i)x_{ij}\hspace{1cm}j=1,\ldots,d\\ \theta_0^{(k+1)}&=\theta_0^{(k)}+\eta\sum_{x_i\in MB}(t_i-y_i) \end{align*}
###Code
def mb_gd(X, t, eta = 0.01, epochs = 1000, minibatch_size = 5):
mb = int(np.ceil(float(n)/minibatch_size))
idx = np.arange(0,n)
np.random.shuffle(idx)
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
cost_history_iter = []
for j in range(epochs):
for k in range(mb-1):
g = 0
for i in idx[k*minibatch_size:(k+1)*minibatch_size]:
e = (t[i] - sigma(theta, X[i,:]))[0]
g = g + e * X[i,:]
theta = theta + eta * g.reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
g = 0
for i in idx[k*minibatch_size:n]:
e = (t[i] - sigma(theta, X[i,:]))[0]
g = g + e * X[i,:]
theta = theta + eta * g.reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = mb_gd(X, t, eta = 0.01, epochs = 10000, minibatch_size = 5)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
###Output
_____no_output_____
###Markdown
CriticitàI metodi elementari di discesa del gradiente illustrati sopra non garantiscono in generale una elevata convergenza. Inoltre, il loro utilizzo pone un insieme di questioni- la scelta del valore del learning rate $\eta$ può risultare difficile. Un valore troppo piccolo può comportare una convergenza eccessivamente lenta, mentre un valore troppo grande può portare ad oscillazioni intorno al minimo, o addirittura a divergenza- per ovviare a questa problematica è possibile utilizzare dei metodi di aggiustamento di $\eta$ nel tempo, ad esempio riducendolo secondo uno schema predefinito o quando il decremento della funzione di costo calcolata in due epoche successive risulti inferiore a una soglia data. Sia gli schemi che le soglie devono però essere predefiniti e non possono quindi adattarsi in dipendenza delle caratteristiche del dataset- lo stesso learning rate si applica per l'aggiornamento di tutti i coefficienti- in molti casi la funzione di costo, in particolare se si ha a che fare con reti neurali, risulta fortemente non convessa, caratterizzata quindi da numerosi minimi locali e da punti di sella. I metodi considerati possono avere difficoltà a uscire da situazioni di questo tipo, e in particolare dai punti di sella, spesso circondati da regioni a gradiente molto limitato. MomentoI metodi precedenti risultano poco efficienti in situazioni in cui la funzione di costo varia in modo molto diverso al variare della direzione considerata (ad esempio se si hanno valli che discendono lentamente e con pareti laterali ripide). In questo caso, infatti, gli algoritmi precedenti procedono molto lentamente in direzione del minimo, oscillando in modo sostanziale nella direzione trasversale ad essa: questa situazione è illustrata a sinistra nella figura sottostante.Il *metodo del momento* fa riferimento ad una interpretazione fisica del metodo di ottimizzazione, in cui il processo di discesa del gradiente viene visto come lo spostamento di un corpo di massa $m=1$ che si muove sulla superficie della funzione di costo $J(\theta)$ soggetto a una forza peso $F(\theta)=-\nabla U(\theta)$, dove $U(\theta)=\eta h(\theta)=\eta J(\theta)$ è l'energia potenziale del corpo nella posizione $\theta$ (si assume quindi che la costante fisica $g$ relativa alla forza peso $F=-mgh$ sia pari a $\eta$). In questo modello, il valore negativo del gradiente $-\eta J'(\theta)$ è quindi pari al vettore forza (e accelerazione, in quanto $a=\frac{F}{m}$) del corpo nel punto $\theta$. Nel metodo della discesa del gradiente, si assume che lo spostamento del corpo in un certo punto $\theta$ sia determinato dalla accelerazione calcolata nello stesso punto, e quindi dal gradiente $J'(\theta)$, in quanto vale la regola di aggiornamento $\theta^{(k+1)}=\theta^{(k)}-\eta J'(\theta^{(k)})$. Nel metodo del momento, si fa riferimento a un modello più consistente con la realtà fisica di un corpo che si muove su una superficie soggetto alla forza peso, modello che prevede di utilizzare il concetto di velocità $v(\theta)$. In questo modello, lo spostamento del corpo a partire da un certo punto $\theta$ è determinato dalla velocità calcolata nello stesso punto $\theta^{(k+1)}=\theta^{(k)}+v^{(k+1)}$, dove la variazione di velocità è data dalla accelerazione $v^{(k+1)}=v^{(k)}-\eta J'(\theta^{(k)})$. Come si può osservare, si ha che\begin{align*}v^{(k+1)}&=-\eta J'(\theta^{(k)})+v^{(k)}=-\eta J'(\theta^{(k)})-\eta J'(\theta^{(k-1)})+v^{(k-1)}=\cdots=-\eta\sum_{i=0}^kJ'(\theta^{(i)})+v^{(0)}\\\theta^{(k+1)}&=\theta^{(k)}+v^{(k+1)}=\theta^{(k)}-\eta\sum_{i=0}^kJ'(\theta^{(i)})+v^{(0)}\end{align*}che corrisponde all'associare lo spostamento alla somma (integrale nel caso della fisica) delle accelerazioni passate. Il riferimento a questo modello porta l'algoritmo a tendere ad ogni passo a mantenere, almeno in parte, la direzione del passo precedente (in quanto $v^{(k+1)}=-\eta J'(\theta^{(k)})+v^{(k)})$, premiando le direzioni che si manifestano con costanza in una sequenza di passi. Ne deriva il comportamento a destra della figura precedente, in cui l'inerzia nella direzione del minimo porta a una limitazione delle oscillazioni.Si noti che ciò non avviene nella discesa del gradiente, in cui si ha $v^{(k+1)}=-\eta J'(\theta^{(k)})$.Matematicamente, l'effetto di inerzia viene ottenuto sottraendo alla velocità (vettoriale) calcolata al passo precedente la valutazione del gradiente effettuata nella corrispondente posizione. Il gradiente viene sottratto in quanto, mantenendo la corrispondenza con la meccanica di , un gradiente positivo tende a ridurre la velocità.Il metodo del momento utilizza tipicamente un secondo parametro $\gamma$, che determina la frazione di $v^{(k)}$ che permane nella definizione di $v^{(k+1)}$, e che svolge la funzione (fisicamente) di un coefficiente di attrito. Si ottiene quindi la formulazione:\begin{align*}v^{(k+1)}&=\gamma v^{(k)} -\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)\\\theta^{(k+1)}&=\theta^{(k)}+v^{(k+1)}\end{align*}Il metodo del momento ad ogni passo determina inizialmente il vettore di spostamento attuale, a partire da quello al passo precedente e dal gradiente di $\theta$: il contributo relativo dei due termini è pesato dalla coppia di parametri $\gamma$ e $\eta$. Lo spostamento calcolato viene quindi applicato al valore attuale di $\theta$ (il segno meno deriva come sempre dal fatto che stiamo assumendo di cercare un minimo locale).Se il gradiente è orientato nella stessa direzione della velocità attuale, tale velocità viene incrementata, per cui l'aggiornamento di $\theta$ diviene maggiore, incrementandosi man mano che la direzione di spostamento rimane coerente con il gradiente nei punti attraversati.```pythonv = 0for i in range(n_epochs): g = 0 for k in range(dataset_size): g = g+evaluate_gradient(loss_function, theta, X[k]) v = gamma*v-eta*g theta = theta+v``` Come si può vedere, mentre $\theta^{(k)}=(\theta_1^{(k)},\ldots,\theta_d^{(k)})^T$ è la valutazione della soluzione ottima al passo $k$, $v^{(k)}=(v_1^{(k)},\ldots,v_d^{(k)})^T$ è l'aggiornamento applicato a tale valore per ottenere $\theta^{(k+1)}$: possiamo vedere quindi $v$ come il vettore velocità di spostamento di $\theta$ nello spazio delle soluzioni.Come già illustrato sopra, possiamo esprimere l'aggiornamento nel modo seguente, evidenziando come esso dipenda dal gradiente calcolato in tutte le posizioni precedentemente attraversate, con un effetto che va a diminuire esponenzialmente con $\gamma$ man mano che si risale nel passato. Assumendo $v^{(0)}=0$:\begin{align*}\theta^{(k+1)}&=\theta^{(k+1)}+v^{(k+1)}= \theta^{(k)}+\gamma v^{(k)}-\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)=\theta^{(k)}+\gamma^2 v^{(k-1)}-\gamma\eta\sum_{i=1}^nJ'(\theta^{(k-1)};x_i) -\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)\\&=\theta^{(k)}+\gamma^2 v^{(k-1)}-\eta\left(\sum_{i=1}^nJ'(\theta^{(k)};x_i)+\gamma\sum_{i=1}^nJ'(\theta^{(k-1)};x_i)\right)=\cdots=\theta^{(k)}-\eta\left(\sum_{j=0}^k\gamma^j\sum_{i=1}^nJ'(\theta^{(k-j)};x_i)\right)\end{align*}Gli aggiornamenti nel caso della logistic regression derivano immediatamente\begin{align*} v_j^{(k+1)}&=\gamma v_j^{(k)}+\frac{\eta}{n}\sum_{i=1}^n( t_i-\sigma(x_i))x_{ij}\hspace{1cm}j=1,\ldots,d\\ v_0^{(k+1)}&=\gamma v_0^{(k)}+\frac{\eta}{n}\sum_{i=1}^n(t_i-\sigma(x_i)) \\\theta_j^{(k+1)}&=\theta_j^{(k)}+v_j^{(k+1)}\hspace{1cm}j=0,\ldots,d\end{align*}
###Code
def momentum_gd(X,t, eta = 0.1, gamma = 0.97, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
v = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
v = gamma*v - eta * gradient(theta,X,t)
theta = theta + v
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = momentum_gd(X, t, eta = 0.1, gamma = 0.97, epochs = 10000)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
###Output
_____no_output_____
###Markdown
Accelerazione del gradiente di NesterovNel metodo del momento, la conoscenza al passo $k$ di $\theta^{(k)}$ e di $v^{(k)}$ permette, senza calcolare il gradiente, di avere una valutazione approssimata $\tilde{\theta}^{(k+1)}=\theta^{(k)}+\gamma v^{(k)}$ di $$\theta^{(k+1)}=\theta^{(k)}+v^{(k+1)}=\theta^{(k)}+\gamma v^{(k)}-\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)=\tilde{\theta}^{(k+1)}-\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)$$Il metodo di Nesterov segue lo stesso approccio del metodo del momento, con la differenza che, ad ogni passo, la valutazione del gradiente viene effettuata, con un *look-ahead* approssimato, non nel punto attuale $\theta^{(k)}$ dello spazio delle soluzioni visitato ma, più o meno, nel punto successivo $\theta^{(k+1)}$ (approssimato da $\tilde{\theta}^{(k+1)}$). In questo modo, le variazioni di $v$ (e quindi di $\theta$) vengono anticipate rispetto a quanto avviene nel metodo del momento.\begin{align*}v^{(k+1)}&=\gamma v^{(k)} +\eta\sum_{i=1}^nJ'(\tilde{\theta}^{(k)};x_i)=\gamma v^{(k)} +\eta\sum_{i=1}^nJ'(\theta^{(k)}+\gamma v^{(k)};x_i)\\\theta^{(k+1)}&=\theta^{(k)}+v^{(k+1)}\end{align*} ```pythonv = 0for i in range(n_epochs): g = 0 theta_approx = theta+gamma*v for k in range(dataset_size): g = g+evaluate_gradient(loss_function, theta_approx, X[k]) v = gamma*v-eta*g theta = theta+v```
###Code
def nesterov_gd(X,t, eta = 0.1, gamma = 0.97, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
v = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
v = gamma*v - eta * gradient(theta+gamma*v,X,t)
theta = theta + v
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = nesterov_gd(X, t, eta = 0.1, gamma = 0.97, epochs = 10000)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
###Output
_____no_output_____ |
uploads/EDA.ipynb | ###Markdown
Exploratory Data Analysis - EDAThis notebook reads in data created by INPUT_NN.ipynb and performs integrity checks as well as somesimple exploration prior to building a Neural Network.Two files are read in - the first are inputs which contain London and Tokyo index dataas inputs. The Nasdaq data is used as the target data.From INPUT_NN we know that the first two columns of nn_inputs correspond with LONDON exchange data.First Column is price change. Second Column is Trade Volume changeThird and fourth correspond to TOKYO exchange data.Similarly, 3rd column is price change. 4th Column is the associated Trade Volume Change.Target contains only one data type -price change (it is the only thing we're predicting)
###Code
%autosave 0
import os
print(os.getcwd())
from collections import Counter
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
np.set_printoptions(threshold=np.inf)
# read output from INPUT_NN.ipynb
inputs=np.load("nn_inputs" + '.npy')
num_input_samples =inputs.shape[0]
print("Number of samples in the input: %d" % num_input_samples)
num_inputAttributes=inputs.shape[1]
print("Number of Attributes in the input: %d" % num_inputAttributes)
print("===========================")
'''
for row in inputs:
print(row)
''';
targets=np.load("nn_targets" + '.npy')
num_target_samples = targets.shape[0]
print("Number of Samples in the Target file: %d" % num_target_samples)
print("Note the shape of the target - it's a vector:")
print(targets.shape)
#print(targets)
if num_target_samples != num_input_samples:
print("ERROR: Number of input samples MUST equal the number of target samples to run the Neural Network! ")
plt.hist(df_zip['price'], bins=20)
plt.show()
if include_volume:
Symbol=INPUTS[0]
test_volume_change= ('Vol_change_%s' % Symbol['label'])
plt.plot(df_nn[test_volume_change])
plt.show()
# Print info on the dataframe
df_nn.info()
# Fill in missing input data as 0s
df_nn.fillna(value=0, inplace=True)
df_nn.info()
print(df_nn.columns)
###Output
Index(['Change_LONDON', 'Vol_change_LONDON', 'Change_TOKYO',
'Vol_change_TOKYO', 'Target_change_NSDQ'],
dtype='object')
###Markdown
Now we're ready to convert our Dataframes into numpy arrays (matrices) and then plug them in a neural net.
###Code
targets = df_nn[target_priceChange].values
print("targets type is: ", type(targets) )
print(targets.shape)
if target_priceChange in df_nn.columns:
df_nn.drop(target_priceChange, axis=1, inplace=True)
inputs = df_nn.values
print(inputs.shape)
print(inputs[0:5])
# Write them out...then read them back in to be sure.
print("Writing out the following INPUT array: %s" % str(inputs.shape) )
np.save("nn_inputs", inputs)
del inputs
inputs=np.load("nn_inputs" + '.npy')
print("Read Back: %s" % str(inputs.shape) )
print("Writing out the following TARGET array: %s" % str(targets.shape))
np.save("nn_targets", targets)
del targets
targets=np.load("nn_targets" + '.npy')
print("Read Back: %s" % str(targets.shape) )
print("Done")
###Output
Done
|
kaggle/notebooks/jsmp-basic-eda-starter.ipynb | ###Markdown
Data Loading
###Code
%%time
trainDf = pd.read_csv('/kaggle/input/jane-street-market-prediction/train.csv')
###Output
_____no_output_____
###Markdown
Reducing Memory UsageI had trouble to use the dataset due to it using around 5GB of RAM just after being loaded. I found this function from [sbunzini](https://www.kaggle.com/sbunzini/reduce-memory-usage-by-75) to mitigate the issue.
###Code
def reduce_memory_usage(df):
start_memory = df.memory_usage().sum() / 1024**2
print(f"Memory usage of dataframe is {start_memory} MB")
for col in df.columns:
col_type = df[col].dtype
if col_type != 'object':
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
pass
else:
df[col] = df[col].astype('category')
end_memory = df.memory_usage().sum() / 1024**2
print(f"Memory usage of dataframe after reduction {end_memory} MB")
print(f"Reduced by {100 * (start_memory - end_memory) / start_memory} % ")
return df
trainDf = reduce_memory_usage(trainDf)
###Output
_____no_output_____
###Markdown
Data Preparation Removing columns that will not be used as feature for the training phase.
###Code
dropCols = ["resp", "resp_1", "resp_2", "resp_3", "resp_4", "ts_id"]
trainDf = trainDf.drop(columns=dropCols)
###Output
_____no_output_____
###Markdown
Filling "na" with 0 for starter. It might be wiser to use some other techniques (imputing, mean, ...) but for a first version, this will do the job.
###Code
trainDf.isnull().sum()
trainDf.fillna(0, inplace=True)
###Output
_____no_output_____
###Markdown
According to the data tab of the competition :> Trades with weight = 0 were intentionally included in the dataset for completeness, although such trades will not contribute towards the scoring evaluation.So, I make a slice without those rows before looking into the details.
###Code
trainDfW = trainDf[trainDf["weight"] > 0]
trainDfW.head()
###Output
_____no_output_____
###Markdown
Data Exploration Basics
###Code
trainDf.shape
trainDfW.shape
trainDfW.head()
trainDfW.describe()
###Output
_____no_output_____
###Markdown
Data Understanding Correlation Matrix
###Code
%%time
corrDfW = trainDfW.corr()
fig, ax = plt.subplots(figsize=(25,25))
sn.heatmap(corrDfW, linewidths=.5, annot=False, ax=ax)
plt.show()
###Output
_____no_output_____
###Markdown
Although the matrix is quite heavy, it allows to identify some interesting clusters. Some features seems highly (positively or negatively) correlated. The next step would be to pinpoint those features and check from features.csv if they share the same tags... ToDo PCA
###Code
%%time
scaler = MinMaxScaler()
scaledTrain = scaler.fit_transform(trainDfW)
pca = PCA().fit(scaledTrain)
exCumul = np.cumsum(pca.explained_variance_ratio_)
px.area(
x=range(1, exCumul.shape[0] + 1),
y=exCumul,
labels={"x": "# Components", "y": "Explained Variance"}
)
###Output
_____no_output_____
###Markdown
Here, we can see that :* One of the component accounts for a third (36.9%) of the total variance* The threshold of 90% variance is explained by 8 components* The threshold of 95% variance is explained by 11 components
###Code
pca = PCA(n_components=2)
dfComp = pca.fit_transform(scaledTrain)
total_var = pca.explained_variance_ratio_.sum() * 100
fig = px.scatter(dfComp, x=0, y=1, color=trainDfW['weight'], title=f'Total Explained Variance: {total_var:.3f}%', labels={'0': 'PC 1', '1': 'PC 2'})
fig.show()
###Output
_____no_output_____
###Markdown
Now lets take a look of the two major principal components when we remove feature_0 from the dataset.
###Code
dfNoF0 = trainDfW.drop("feature_0", 1)
scaledTrainNoF0 = scaler.fit_transform(dfNoF0)
pca = PCA(n_components=2)
dfComp = pca.fit_transform(scaledTrainNoF0)
total_var = pca.explained_variance_ratio_.sum() * 100
fig = px.scatter(dfComp, x=0, y=1, color=trainDfW['weight'], title=f'Total Explained Variance: {total_var:.3f}%', labels={'0': 'PC 1', '1': 'PC 2'})
fig.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/monkeyBread-checkpoint.ipynb | ###Markdown
Monkey Bread
###Code
from SpectralCV import ecog_pipe as ep
import numpy as np
import scipy as sp
import scipy.io as io
import scipy.signal as sig
import math as math
import random
from scipy import integrate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
plt.style.use('seaborn-colorblind')
plt.rcParams['image.cmap'] = 'RdBu'
###Output
_____no_output_____
###Markdown
Baking all 4 loaves
###Code
data_path1 ="\\Users\\Lauren\\Data\\NeuroTycho\\20120730PF_Anesthesia+and+Sleep_Chibi_Toru+Yanagawa_mat_ECoG128\\Session%d\\"
data_path2 ="\\Users\\Lauren\\Data\\NeuroTycho\\20120802PF_Anesthesia+and+Sleep_Chibi_Toru+Yanagawa_mat_ECoG128\\Session%d\\"
data_path3 ="\\Users\\Lauren\\Data\\NeuroTycho\\20120731PF_Anesthesia+and+Sleep_George_Toru+Yanagawa_mat_ECoG128\\Session%d\\"
data_path4 ="\\Users\\Lauren\\Data\\NeuroTycho\\20120803PF_Anesthesia+and+Sleep_George_Toru+Yanagawa_mat_ECoG128\\Session%d\\"
chan = 129
fs = 1000
nperseg = 500
noverlap = nperseg/2
#data1 = ep.monkeyBread(data_path1, chan, fs, nperseg, noverlap)
#create h5py path to chibi bread
import h5py
scvh5 = h5py.File('scv.h5', 'a')
monkey = scvh5['monkey']
monkey.create_dataset('chibiPF0730_2', data=ep.monkeyBread(data_path1, chan, fs, nperseg, noverlap))
monkey.create_dataset('chibiPF0802_2', data=ep.monkeyBread(data_path2, chan, fs, nperseg, noverlap))
monkey.create_dataset('georgePF0731_2', data=ep.monkeyBread(data_path3, chan, fs, nperseg, noverlap))
monkey.create_dataset('georgePF0803_2', data=ep.monkeyBread(data_path4, chan, fs, nperseg, noverlap))
scvh5.close()
###Output
_____no_output_____
###Markdown
Using the data
###Code
import matplotlib.pyplot as plt
import h5py
from SpectralCV import ecog_pipe as ep
#load data from h5
h5_file = '../Voytek/scv.h5'
def addattrs(dset, fs, nperseg, noverlap):
dset.attrs['fs'] = fs
dset.attrs['nperseg'] = nperseg
dset.attrs['noverlap'] = noverlap
with h5py.File(h5_file, 'a') as h5:
dset = h5['monkey/chibiPF0730_05']
addattrs(dset, fs, nperseg, noverlap)
dset = h5['monkey/chibiPF0802_05']
addattrs(dset, fs, nperseg, noverlap)
dset = h5['monkey/georgePF0731_05']
addattrs(dset, fs, nperseg, noverlap)
dset = h5['monkey/georgePF0803_05']
addattrs(dset, fs, nperseg, noverlap)
with h5py.File(h5_file, 'a') as h5:
print(h5['monkey/georgePF0803_2'].attrs['nperseg'])
# plotting
with h5py.File(h5_file, 'r') as h5:
bread = h5['monkey/chibiPF0730_05k']
print(bread.shape)
print(bread[0][1][:])
#for i in range(5):
# plt.figure(i+1)
# xaxis = np.arange(0,501,2)
# plt.loglog(xaxis, bread[i][:][:].T)
with h5py.File(h5_file, 'a') as h5:
print([k for k in h5['monkey'].items()])
with h5py.File(h5_file, 'a') as h5:
#del h5['monkey/georgePF0731_2']
#del h5['monkey/georgePF0731_05']
h5['monkey/georgePF0803_05'] = h5['monkey/georgePF0803_05k']
del h5['monkey/georgePF0803_05k']
###Output
_____no_output_____ |
analyses/SERV-1/Art-5min-ARIMA-p99.ipynb | ###Markdown
Time series forecasting using ARIMA Import necessary libraries
###Code
%matplotlib notebook
import numpy
import pandas
import datetime
import sys
import time
import matplotlib.pyplot as ma
import statsmodels.tsa.seasonal as st
import statsmodels.tsa.arima_model as arima
import statsmodels.tsa.stattools as tools
###Output
/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
return f(*args, **kwds)
###Markdown
Load necessary CSV file
###Code
try:
ts = pandas.read_csv('../../datasets/srv-1-art-5m.csv')
except:
print("I am unable to connect to read .csv file", sep=',', header=1)
ts.index = pandas.to_datetime(ts['ts'])
# delete unnecessary columns
del ts['id']
del ts['ts']
del ts['min']
del ts['max']
del ts['sum']
del ts['cnt']
del ts['p50']
del ts['p95']
del ts['avg']
# print table info
ts.info()
###Output
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 23727 entries, 2018-04-11 19:10:00 to 2018-07-16 11:45:00
Data columns (total 1 columns):
p99 23727 non-null int64
dtypes: int64(1)
memory usage: 370.7 KB
###Markdown
Get values from specified range
###Code
ts = ts['2018-06-16':'2018-07-15']
###Output
_____no_output_____
###Markdown
Remove possible zero and NA values (by interpolation)We are using MAPE formula for counting the final score, so there cannot occure any zero values in the time series. Replace them with NA values. NA values are later explicitely removed by linear interpolation.
###Code
def print_values_stats():
print("Zero Values:\n",sum([(1 if x == 0 else 0) for x in ts.values]),"\n\nMissing Values:\n",ts.isnull().sum(),"\n\nFilled in Values:\n",ts.notnull().sum(), "\n")
idx = pandas.date_range(ts.index.min(), ts.index.max(), freq="5min")
ts = ts.reindex(idx, fill_value=None)
print("Before interpolation:\n")
print_values_stats()
ts = ts.replace(0, numpy.nan)
ts = ts.interpolate(limit_direction="both")
print("After interpolation:\n")
print_values_stats()
###Output
Before interpolation:
Zero Values:
0
Missing Values:
p99 99
dtype: int64
Filled in Values:
p99 8541
dtype: int64
After interpolation:
Zero Values:
0
Missing Values:
p99 0
dtype: int64
Filled in Values:
p99 8640
dtype: int64
###Markdown
Plot values
###Code
# Idea: Plot figure now and do not wait on ma.show() at the end of the notebook
ma.ion()
ma.show()
fig1 = ma.figure(1)
ma.plot(ts, color="blue")
ma.draw()
try:
ma.pause(0.001) # throws NotImplementedError, ignore it
except:
pass
###Output
_____no_output_____
###Markdown
Ignore timestamps, make the time series single dimensionalSince now the time series is represented by continuous single-dimensional Python list. ARIMA does not need timestamps or any irrelevant data.
###Code
dates = ts.index # save dates for further use
ts = [x[0] for x in ts.values]
###Output
_____no_output_____
###Markdown
Split time series into train and test seriesWe have decided to split train and test time series by two weeks.
###Code
train_data_length = 12*24*7
ts_train = ts[:train_data_length]
ts_test = ts[train_data_length+1:]
###Output
_____no_output_____
###Markdown
Estimate integrated (I) parameterCheck time series stationarity and estimate it's integrated parameter (maximum integration value is 2). The series itself is highly seasonal, so we can assume that the time series is not stationary.
###Code
def check_stationarity(ts, critic_value = 0.05):
try:
result = tools.adfuller(ts)
return result[0] < 0.0 and result[1] < critic_value
except:
# Program may raise an exception when there are NA values in TS
return False
integrate_param = 0
ts_copy = pandas.Series(ts_train, copy=True) # Create copy for stationarizing
while not check_stationarity(ts_copy) and integrate_param < 2:
integrate_param += 1
ts_copy = ts_copy - ts_copy.shift()
ts_copy.dropna(inplace=True) # Remove initial NA values
print("Estimated integrated (I) parameter: ", integrate_param, "\n")
###Output
Estimated integrated (I) parameter: 0
###Markdown
Print ACF and PACF graphs for AR(p) and MA(q) order estimationAutoCorellation and Parcial AutoCorellation Functions are necessary for ARMA order estimation. Configure the *NLagsACF* and *NlagsPACF* variables for number of lagged values in ACF and PACF graphs.
###Code
def plot_bar(ts, horizontal_line=None):
ma.bar(range(0, len(ts)), ts, width=0.5)
ma.axhline(0)
if horizontal_line != None:
ma.axhline(horizontal_line, linestyle="-")
ma.axhline(-horizontal_line, linestyle="-")
ma.draw()
try:
ma.pause(0.001) # throws NotImplementedError, ignore it
except:
pass
NlagsACF = 100
NLagsPACF = 50
# ACF
ma.figure(2)
plot_bar(tools.acf(ts_train, nlags=NlagsACF), 1.96 / numpy.sqrt(len(ts)))
# PACF
ma.figure(3)
plot_bar(tools.pacf(ts_train, nlags=NLagsPACF), 1.96 / numpy.sqrt(len(ts)))
###Output
_____no_output_____
###Markdown
ARIMA order estimationAccording to the Box-Jenkins model (https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc446.htm) we assumed that this time series is an AR(p) model. We can that the ACF graph is exponentially decreasing and followed by some mixture of sinusoid. The PACF graph shows that there is one significant spike at index 0. We can also see large anomalies in the time series, so in my opinion we should consider ARIMA(1,1,0) model, even though it should be already stationary (we are aware of overdifferencing problem).You can specify how many values you want to use for ARIMA model fitting (by setting *N_train_data* variable) and how many new values you want to predict in single step (by setting *N_values_to_forecast* variable). Prediction configuration
###Code
ARIMA_order = (1,1,0)
M_train_data = sys.maxsize
N_values_to_forecast = 1
###Output
_____no_output_____
###Markdown
Forecast new valuesUnexpectedly, we have a very large time series (over 8 thousand samples), so the forecasting takes much time.
###Code
predictions = []
confidence = []
print("Forecasting started...")
start_time = time.time()
ts_len = len(ts)
for i in range(train_data_length+1, ts_len, N_values_to_forecast):
try:
start = i-M_train_data if i-M_train_data >= 0 else 0
arima_model = arima.ARIMA(ts[start:i], order=ARIMA_order).fit(disp=0)
forecast = arima_model.forecast(steps=N_values_to_forecast)
for j in range(0, N_values_to_forecast):
predictions.append(forecast[0][j])
confidence.append(forecast[2][j])
except:
print("Error during forecast: ", i, i+N_values_to_forecast)
# Push back last successful predictions
for j in range(0, N_values_to_forecast):
predictions.append(predictions[-1] if len(predictions) > 0 else 0)
confidence.append(confidence[-1] if len(confidence) > 0 else 0)
print("Forecasting finished")
print("Time elapsed: ", time.time() - start_time)
###Output
/home/stepan/.local/lib/python3.6/site-packages/scipy/signal/signaltools.py:1333: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
out_full[ind] += zi
/home/stepan/.local/lib/python3.6/site-packages/scipy/signal/signaltools.py:1336: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
out = out_full[ind]
/home/stepan/.local/lib/python3.6/site-packages/scipy/signal/signaltools.py:1342: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
zf = out_full[ind]
###Markdown
Count mean absolute percentage errorWe use MAPE (https://www.forecastpro.com/Trends/forecasting101August2011.html) instead of MSE because the result of MAPE does not depend on size of values.
###Code
values_sum = 0
for value in zip(ts_test, predictions):
actual = value[0]
predicted = value[1]
values_sum += abs((actual - predicted) / actual)
values_sum *= 100/len(predictions)
print("MAPE: ", values_sum, "%\n")
###Output
MAPE: 250.4248669381217 %
###Markdown
Plot forecasted values
###Code
fig2 = ma.figure(4)
ma.plot(ts_test, color="blue")
ma.plot(predictions, color="red")
ts_len = len(ts)
date_offset_indices = ts_len // 6
num_date_ticks = ts_len // date_offset_indices + 1
ma.xticks(range(0, ts_len, date_offset_indices), [x.date().strftime('%Y-%m-%d') for x in dates[::date_offset_indices]])
ma.draw()
###Output
_____no_output_____ |
src/benchmark/Kim_Breast_Science_2021/PrepareInterfaceEntries.ipynb | ###Markdown
Prepare Interface Entries
###Code
import os
import os.path as op
from pathlib import Path
import sys
import pandas as pd
sys.path += ['../../..']
from src.benchmark.benchmark_utils import ReadyResults
interface_entries_s3 = ReadyResults("ELASPIC_Output/allresults_s3.txt").get_interface_ready_results()
interface_entries_s4 = ReadyResults("ELASPIC_Output/allresults_s4.txt").get_interface_ready_results()
###Output
Separating Core and Interface entries ..
Core data dimensions: (5, 103)
Core data preview:
UniProt_ID Mutation Interactor_UniProt_ID
0 P04637 R175H -
1 P38398 M1775R -
2 P38398 S1655F -
Interface data dimensions: (83, 103)
Interface data preview:
UniProt_ID Mutation Interactor_UniProt_ID
0 O96017 K373E O96017-12
1 O96017 K373E O43293
2 O96017 K373E O43293-2
Dropping duplicated entries ..
Size of dataframe before dropping duplicated entries: (5, 103)
Size of dataframe after dropping duplicated entries: (5, 103)
Dropping self interactions ..
Dropping duplicated entries ..
Size of dataframe before dropping duplicated entries: (76, 103)
Size of dataframe after dropping duplicated entries: (76, 103)
Separating Core and Interface entries ..
Core data dimensions: (2, 103)
Core data preview:
UniProt_ID Mutation Interactor_UniProt_ID
0 P04637 R175H -
1 Q9BX63 A745T -
Interface data dimensions: (76, 103)
Interface data preview:
UniProt_ID Mutation Interactor_UniProt_ID
0 O96017 K373E O96017-12
1 O96017 K373E O43293
2 O96017 K373E O43293-2
Dropping duplicated entries ..
Size of dataframe before dropping duplicated entries: (2, 103)
Size of dataframe after dropping duplicated entries: (2, 103)
Dropping self interactions ..
Dropping duplicated entries ..
Size of dataframe before dropping duplicated entries: (71, 103)
Size of dataframe after dropping duplicated entries: (71, 103)
###Markdown
(Try to) Find in older results ELASPIC Errors in S3 and S4.
###Code
def load_inputs_pairs(file_path):
with open(file_path) as file:
pairs = [tuple(line.strip().split('.')) for line in file.readlines()]
return pairs
inputs_pairs = load_inputs_pairs("ELASPIC_Input/input_pairs_20.txt")
oldresults_path = r"C:/Users/ibrah/Documents/GitHub/My-ELASPIC-Web-API/Elaspic_Results/Vault/old_merged_core_interface_vault_2021-11-17.txt"
def find_in_oldresults_data(old_data, protein, mutation):
query = old_data[
(old_data["Type"] == "interface") &
(old_data["UniProt_ID"] == protein) &
(old_data["Mutation"] == mutation)
].copy()
return query
oldresults_data = pd.read_csv(oldresults_path, sep='\t', low_memory=False)
oldresults_data.head()
rescued_dataframes = []
for pair in inputs_pairs:
rescued_dataframes.append(find_in_oldresults_data(oldresults_data, pair[0], pair[1]))
rescued_data = pd.concat(rescued_dataframes)
rescued_data
###Output
_____no_output_____
###Markdown
merge and export
###Code
concated_interface_results = pd.concat(
[
interface_entries_s3,
interface_entries_s4,
rescued_data,
]
)
concated_interface_results = concated_interface_results.drop_duplicates()
print(concated_interface_results.shape)
concated_interface_results.head()
concated_interface_results.to_csv("Kim_et_at_elaspic_results_interface.txt", sep='\t', index=False)
###Output
_____no_output_____ |
modeling_node-only.ipynb | ###Markdown
Non-Graph ModelingSome modeling done purely on node features (i.e. no network yet)
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Data Prep Note: Here, we use data from the Human Protein Atlas along with labels based on OMIM searches. The labels implicityly assume that if a gene does not come up as positive in a search then it is not associated with LUAD. This assumption is not correct. A more careful labeling method is required. Thus, we can't draw any conclusions from the results of this notebook; we merely hope to gain some insights on the node features.
###Code
HPA_data = pd.read_csv('data/HPA_Complete_v1.csv', index_col=0)
HPA_data.set_index('Ensembl', inplace=True)
HPA_data.head()
label_col = 'Total_pos'
# TODO: shuffle HPA data
filt = HPA_data[label_col] == 1
pos_data = HPA_data[filt]
num_pos = len(pos_data)
neg_data = HPA_data[~filt].iloc[:num_pos]
train_test_data = pos_data.append(neg_data)
train_test_data = train_test_data.sample(frac=1)
train_test_data.head()
labels = train_test_data[label_col]
feature_cols = [str(i) for i in range(100)]
feat_data = train_test_data[feature_cols]
feat_data.head()
print('n_samples: ', len(feat_data))
# Train-Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(feat_data, labels, test_size=0.2)
###Output
_____no_output_____
###Markdown
Modeling Random Forest
###Code
# random forest
from sklearn.ensemble import RandomForestClassifier
# define RF classifier
rf_clf = RandomForestClassifier(n_estimators=100)#, max_depth=5)
# fit classifier
rf_clf.fit(X_train, y_train)
rf_clf.score(X_train, y_train)
rf_clf.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Adaboost
###Code
from sklearn.ensemble import AdaBoostClassifier
# define Adaboost classifier
ada_clf = AdaBoostClassifier(n_estimators=100, algorithm='SAMME.R')
# fit classifier
ada_clf.fit(X_train, y_train)
ada_clf.score(X_train, y_train)
ada_clf.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
GradBoost
###Code
from sklearn.ensemble import GradientBoostingClassifier
# define GradBoost classifier
gb_clf = GradientBoostingClassifier(loss='deviance', n_estimators=100, criterion='friedman_mse')
# fit classifier
gb_clf.fit(X_train, y_train)
gb_clf.score(X_train, y_train)
gb_clf.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Grid Search over Parameters
###Code
from sklearn.model_selection import GridSearchCV
gradboost_clf = GradientBoostingClassifier()
params = {
'n_estimators': [20, 50, 100, 150],
'learning_rate': [0.01, 0.05, 0.1, 0.2],
'loss': ['deviance', 'exponential'],
'criterion':['friedman_mse']
}
# n_jobs=-1 => use all processors
gridsearch_clf = GridSearchCV(gradboost_clf, param_grid=params, refit=True, cv=5, n_jobs=-1, verbose=4)
gridsearch_clf.fit(X_train, y_train)
gridsearch_clf.best_params_
gridsearch_clf.best_score_
gridsearch_clf.score(X_test, y_test)
###Output
_____no_output_____ |
baseline_models/DLT/DLT.ipynb | ###Markdown
ARIMA Baseline Model
###Code
data = df[target_column]
def mean_absolute_percent_error(y_true, y_pred):
pct_error = abs(y_true - y_pred) / abs(y_true)
return pct_error.mean(axis=0) * 100
# 1 ARIMA Baseline Model
def ARIMA_Model(holdout,dataset):
# Fit a simple auto_arima model
modl = pm.auto_arima(dataset, start_p=0, start_q=0, start_P=0, start_Q=0,
max_p=5, max_q=5, max_P=5, max_Q=5, seasonal=True,
stepwise=True, suppress_warnings=True, D=10, max_D=10,
error_action='ignore')
# Create predictions for the future, evaluate on test
preds, conf_int = modl.predict(holdout, return_conf_int=True)
return preds, conf_int
# Validating the model (Sliding Window)
loop_value = int(len(data)/100)
train_window_size = 100
test_window_size = 10
step_size = train_window_size + test_window_size
arima_prediction = []
for i in range(0,loop_value):
arima_pred, arima_config = ARIMA_Model(test_window_size,data.iloc[i*train_window_size:(i+1)*train_window_size])
arima_prediction.append(arima_pred)
# Compute Real Values every 100 hours
r_value=[]
for i in range(1,loop_value+1):
v= data.iloc[i*100:i*train_window_size + test_window_size]
r_value.append(v)
# Computing metrics (MAPE)
arima_mape_list=[]
for i in range(0,len(r_value)):
mape=mean_absolute_percent_error(r_value[i],arima_prediction[i])
arima_mape_list.append(mape)
# Mean Value of MAPE
arima_MAPE = sum(arima_mape_list)/len(arima_mape_list)
# Print MAPE
print("The Mean Absolute Percentage Error in ARIMA Model is equal to",round(arima_MAPE,2))
# Train-test Split
train = data[10:]
test = data.tail(10)
# Forecasting t+10 timesteps
arima_forecast, arima_config = ARIMA_Model(10,train)
# Plot Forecasting Values
fig, ax = plt.subplots(figsize=(16, 10))
ax.plot(train[2100:].index, train.values[2100:]);
ax.plot(test.index, test.values, label='truth');
ax.plot(test.index, arima_forecast, linestyle='--', color='#ff7823');
ax.set_title("ARIMA t+10 Forecasting");
plt.savefig('ARIMA t+10 Forecasting.png')
###Output
_____no_output_____
###Markdown
Theta Baseline Model
###Code
# 2 Theta Baseline Model
# Step 1: Check for seasonality
# Step 2: Decompose Seasonality if it is deemed seasonal
# Step 3: Applying Theta Method
# Step 4: Reseasonalize the resulting forecast
def sesThetaF(y, s_period , h = 10, level = np.array([90,95,99])):
"""
@param y : array-like time series data
@param s_period : the no. of observations before seasonal pattern repeats
@param h : number of period for forcasting
@param level: confidence levels for prediction intervals
"""
if not s_period:
print('ERROR: s_period variable only accepts positive integer.')
sys.exit()
fcast = {} # store result
# Check seasonality
x = y.copy()
n = y.index.size
m = s_period
if m > 1 and n > 2 * m:
r = (acf(x, nlags = m))[1:]
temp = np.delete(r, m-1)
stat = np.sqrt((1+ 2 * np.sum(np.square(temp))) / n)
seasonal = (abs(r[m-1])/stat) > norm.cdf(0.95)
else:
seasonal = False
# Seasonal Decomposition
origx = x.copy()
if seasonal:
decomp = seasonal_decompose(x, model = 'multiplicative')
if decomp.seasonal < 1e-10 :
warnings.warn('Seasonal indexes equal to zero. Using non-seasonal Theta method')
else:
x = decomp.observed/decomp.seasonal
# Find theta lines
model = SimpleExpSmoothing(x).fit()
fcast['mean'] = model.forecast(h)
num = np.array(range(0,n))
temp = LinearRegression().fit(num.reshape(-1,1),x).coef_
temp = temp/2
alpha = np.maximum(1e-10, model.params['smoothing_level'])
fcast['mean'] = fcast['mean'] + temp * (np.array(range(0,h)) + (1 - (1 - alpha)**n)/alpha)
# Reseasonalize
if seasonal:
fcast['mean'] = fcast['mean'] * np.repeat(decomp.seasonal[-m:], (1 + h//m))[:h]
fcast['fitted'] = model.predict(x.index[0], x.index[n-1]) * decomp.seasonal
else:
fcast['fitted'] = model.predict(x.index[0], x.index[n-1])
fcast['residuals'] = origx - fcast['fitted']
return fcast
# Prediction Intervals
data = pd.Series(df['close']).asfreq("H")
data.fillna(method='ffill', inplace=True)
np.all(np.isfinite(data))
# Validating the model (Sliding Window)
theta_pred_list=[]
for i in range(0,loop_value):
theta_pred = sesThetaF(data[i*100:(i+1)*100],s_period=1,h = 10)
theta_pred_list.append(theta_pred['mean'])
r_value=[]
for i in range(1,loop_value+1):
v= data.iloc[i*100:i*train_window_size + test_window_size]
r_value.append(v)
# Computing metrics (MAPE)
theta_mape_list=[]
for i in range(0,len(r_value)):
mape=mean_absolute_percent_error(r_value[i],theta_pred_list[i])
theta_mape_list.append(mape)
# Mean Value of MAPE
theta_MAPE = sum(theta_mape_list)/len(theta_mape_list)
# Print MAPE
print("The Mean Absolute Percentage Error in Theta Model is equal to",round(theta_MAPE,2))
# Forecasting t+10 timesteps
theta_conf = sesThetaF(data,s_period=1,h = 10)
# Plot Forecasting Values
mean = theta_conf['mean']
fitted = theta_conf['fitted']
residuals = theta_conf['residuals']
plt.figure(figsize = (16,10))
plt.plot(fitted, marker = '.', color = 'red', label = 'In-sample Fitted')
plt.plot(mean, marker = '*', color = 'blue', label = 'Forecast')
plt.plot(residuals, marker = '', color = 'green', label = 'Residuals')
plt.title('Standard Theta Model')
plt.legend()
plt.show()
plt.savefig('Standard Theta Model t+10 Forecasting.png')
###Output
_____no_output_____
###Markdown
HW Exponential Smoothing Baseline Model
###Code
# Dataset pre-processing
data = df[target_column]
data = pd.Series(df['close']).asfreq("H")
np.all(np.isfinite(data))
data.fillna(method='ffill', inplace=True)
np.all(np.isfinite(data))
# 3 HWES Baseline Model
exp_smooth_pred_list=[]
for i in range(0,loop_value):
model = ExponentialSmoothing(data[i*100:(i+1)*100],freq="H")
model_fit = model.fit()
# make prediction
yhat = model_fit.predict(100, 109)
exp_smooth_pred_list.append(yhat)
exp_smooth_mape_list=[]
for i in range(0,len(r_value)):
mape=mean_absolute_percent_error(r_value[i],exp_smooth_pred_list[i])
exp_smooth_mape_list.append(mape)
exp_smooth_MAPE = sum(exp_smooth_mape_list)/len(exp_smooth_mape_list)
# Print MAPE
print("The Mean Absolute Percentage Error in Exponential Smoothing Method is equal to",round(exp_smooth_MAPE,2))
# Train-test Split
train = data[10:]
test = data.tail(10)
# Forecasting t+10 timesteps
model = ExponentialSmoothing(train,freq="H")
model_fit = model.fit()
# make prediction
yhat = model_fit.predict(len(train), len(train)+9)
# Plot Forecasting Values
fig, ax = plt.subplots(figsize=(16, 10))
ax.plot(train[2100:].index, train.values[2100:]);
ax.plot(test.index, test.values, label='truth');
# ax.plot(test.index, yhat, linestyle='--', color='#ff7823');
ax.set_title("Holt-Winter's Seasonal Smoothing");
plt.savefig("Holt-Winter's Seasonal Smoothing t+10 Forecasting.png")
###Output
_____no_output_____ |
DNA Embeddings/DNA_embedddings_INSECT.ipynb | ###Markdown
Classic CNN Model
###Code
from tensorflow.keras import datasets, layers, models, optimizers, callbacks
# CNN model architecture
model = models.Sequential()
model.add(layers.Conv2D(64, (3,3), activation='relu', input_shape=(sl, 5,1),padding="SAME"))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D((3,1)))
model.add(layers.Conv2D(32, (3,3), activation='relu',padding="SAME"))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D((3,1)))
model.add(layers.Conv2D(16, (3,3), activation='relu',padding="SAME"))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D((3,1)))
# model.add(layers.Conv2D(16, (3,3), activation='relu',padding="SAME"))
# model.add(layers.BatchNormalization())
# model.add(layers.MaxPooling2D((3,1)))
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(500, activation='tanh'))
model.add(layers.Dense(y_train.shape[1]))
model.summary()
# Step-decay learning rate scheduler
def step_decay(epoch):
initial_lrate = 0.001
drop = 0.5
epochs_drop = 2.0
lrate = initial_lrate * np.power(drop, np.floor((1+epoch)/epochs_drop))
return lrate
class LossHistory(callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.lr.append(step_decay(len(self.losses)))
loss_history = LossHistory()
lrate = callbacks.LearningRateScheduler(step_decay)
callbacks_list = [loss_history, lrate]
from tensorflow.keras.metrics import top_k_categorical_accuracy
#opt = tf.keras.optimizers.SGD(momentum=0.9, nesterov=True)
opt = tf.keras.optimizers.Adam()
model.compile(optimizer=opt, loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy','top_k_categorical_accuracy'])
# Validation time
history = model.fit(X_train, y_train, epochs=5, batch_size = 32, validation_data=(X_test, y_test), callbacks=callbacks_list, verbose=1)
# Final Test time
#history = model.fit(trainX, trainY, epochs=5, callbacks=callbacks_list)
print('Learning rates through epochs:', loss_history.lr)
from tensorflow.keras.models import Model
# Getting the DNA embeddings of all data from the last dense layer
layer_name = 'dense'
model2= Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
dna_embeddings=model2.predict(allX)
print(dna_embeddings.shape)
# saving the results into csv file to be later read into matlab
np.savetxt("DNA_CNN_embeddings_500_5e_adam.csv", dna_embeddings, delimiter=",")
###Output
_____no_output_____ |
Split Image Sets.ipynb | ###Markdown
Make data set:
###Code
#Setting path to images to the variable 'path'
path="../Data/"
#Getting the list of subdirectories in 'train', saved to 'categories'
categories=os.listdir(path+'train')
#Check
categories
#Making a test directory to move images to
os.mkdir(path+'test')
#Using a loop to move images from each class into a test set
for each_category in categories:
#Making the test folder with each subfolders
os.mkdir(path + f'test/{each_category}')
#Using list comprehension to get a list of images in each folder
list_images = [file for file in os.listdir(path + f'train/{each_category}') if file.endswith('.bmp')]
#Randomly shuffling the order
shuffle(list_images)
#Getting the names of the first 20% of images
num_test_images = int(len(list_images)*0.2)
for_test = list_images[:num_test_images]
#Looping through each image that was split off
for each_image in for_test:
#Renaming the file path to move those images into the test set
os.rename(path + f'train/{each_category}/{each_image}',
path + f'test/{each_category}/{each_image}')
#Checking that images have been moved
print(f'{each_image} moved to "{each_category}" in test.')
#Checking when images from a folder have finished moving
print(f'{each_category} completed.')
train_count=0
test_count=0
#Looping through each subdirectory in train and test
for each_category in categories:
#Get the list of images in each subdirectory
train_images = [file for file in os.listdir(path + f'train/{each_category}')]
#Add up the count of items in each subdirectory
train_count+=len(train_images)
test_images = [file for file in os.listdir(path + f'test/{each_category}')]
test_count+=len(test_images)
#Check
print(f'Number of train images: {train_count} \nNumber of test images: {test_count}')
###Output
Number of train images: 585
Number of test images: 145
|
_site/pages/clustering/Multivariate analysis.ipynb | ###Markdown
Data screening, multi-variate analysis and clusteringIn this chapter, you will analyze data from a large study investigating breast cancer subtypes (https://www.nature.com/articles/s41467-019-09018-y).The following topics are covered in this jupyter notebook:- Reading data into R- Basic visualization of relative protein abundances- General data properties- Principal component analysis to identify general trends- Hierarchical clustering to groups co-regulated proteins- Variance-sensitive cluster analysis using the VSClust appFor some accompanying slides, see http://computproteomics.bmb.sdu.dk/tmp/QuantWorkshop/48 Reading the dataWe will start by loading the data and visualize it carefully, also to find out how to proceed with the analysis.The next code fragments helps you to upload the file _PMC6453966.csv_ into R and look at its content. It is located in the subfolder _resources/data_.Also take a look into the original file of the paper: Supplementary table 1 in https://www.nature.com/articles/s41467-019-09018-yTask: 👨💻 Find the experimental design in the paper and/or data that allows assigning channels to tumor types.
###Code
rQuantTable <-read.csv("AddTheCorrectFolderAndFile",row.names=1)
# Which columns are there?
colnames(rQuantTable)
options(repr.matrix.max.cols = 100)
head(rQuantTable,n = )
#Take only protein abundances
# Add correct numbers for first_col and last_col
first_col <- 1
last_col <-1
QuantTable <- rQuantTable[, first_col:last_col]
# Assign tumor types by reading the file PMC6453966TumorTypes.csv
TCateg <- read.csv("AddTheCorrectFolderAndFile")
colnames(TCateg)
colnames(QuantTable) <- paste(TCateg$PAM50.subtype, "Sample", 1:45)
QuantTable <- QuantTable[,sort(colnames(QuantTable))]
head(QuantTable)
###Output
_____no_output_____
###Markdown
Add your answers here(double-click here to edit the cell) ❔ Question I: What do the different columns contain? How many cancer subtypes and how many replicates?_Answer_ ❔ Question II: What is in channel 131?_Answer_ ❔ Question II: Is the data log-transformed? How would you check that?_Answer_ VisualizationWe want to understand the data better by visualization. This helps also to identify important properties that might not be clear from the data description.[Boxplots](https://en.wikipedia.org/wiki/Box_plot) are useful for roughly comparing the vdistributions of the different samples. For even nicer visualization, use violin plots. A [histogram](https://en.wikipedia.org/wiki/Histogram) shows the distribution of values by binning them. Scatter plots allow direct comparison of the values between 2 samples_Tasks:_ 👨💻 Make a boxplot and discuss how and why it looks like that. 👨💻 Visualize the distribution of some of the channels and also compare them via a scatter plot. 👨💻 Repeat the same for the log-transformed data. 👨💻 Calculate the dynamics range of all values.
###Code
# some figures
boxplot(QuantTable)
# Change the number of bins to find a nicer visualization
numBin <- 10
hist(QuantTable[,1],numBin, main="Distribution of values")
# Why is the distribution so assymetric? Take log?
hist(log(QuantTable[,1]),numBin)
# log-trafo
lQuantTable <- log2(QuantTable)
boxplot(lQuantTable)
# scatter plot
plot(lQuantTable$Set.1.TMT10Tag_126, lQuantTable$Set.1.TMT10Tag_129C, pch=15, col="#33333366")
###Output
_____no_output_____
###Markdown
Add your answers here(double-click here to edit the cell) ❔ Question I: The data seems to centered. Around which value?_Answer_ ❔ Question II: Which normalization was used by the authors of the paper? On which data level (PSMs, peptides, proteins)? How is that visible in the plots_Answer_ ❔ Question III: Any idea why the distribution is so sharp?_Answer_ ❔ Question IV: Why can't you see any linear relationship between the samples?_Answer_ Dynamic range, similarity and missing valuesIn the following, we will calculate the dynamic range of the full data and get more insight into data similarity by calculating correlations. We also assess the number of missing valuesMissing values in proteomics data are a big problem. Value imputation, particularly wrong imputation will create bias in the statistical analysis.
###Code
# What is the dynamics range of the data?
# the functions min and max calculate the smallest (largest) value of an entire table.
Add_your_code_here
# Correlations between samples, replicates should show higher correlations
library(lattice)
levelplot(cor(lQuantTable))
# Number of missing values per column
table(rowSums(is.nan(as.matrix(lQuantTable))))
###Output
_____no_output_____
###Markdown
Add your answers here(double-click here to edit the cell) ❔ Question I: What does the dynamics range mean? Is it larger or smaller than expected? Take a look at https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2938101/ to find more explanations._Answer_ ❔ Question II: Do you find similarity between replicates? Is there a sample that seems to be displaced? How are the types distributed over the TMT runs?_Answer_ ❔ Question II: How many missing values does the data have per column? Why so many/few? Would you analyze the data differently?_Answer_ Principal component analysisPCA analysis is a way to plot multi-dimensional data via projection to the so-called principal components. Simple explanation: A multi-dimensional data set is projected onto the coordinates that correspond to most variance. This means that we get a 2-dimensional projection when plotting the first 2 principal components.There is so much more and you can get some more idea [here](http://setosa.io/ev/principal-component-analysis/)We will use only the components of the new coordinates (also called loadings) to see how the different cancer subtypes and replicates compare to each other.
###Code
# write into file for usage in VSClust later on
write.csv(lQuantTable,"FolderOfYourChoice/PMC6453966_VSClust_in.csv")
# PCA for some testing
pca.out <- princomp(QuantTable, cor = T)
#scoring plot
plot(pca.out$loadings, pch=16, col=rep(1:5,each=9) )
text(pca.out$loadings, pos=2, labels=colnames(lQuantTable),cex=0.5)
###Output
_____no_output_____
###Markdown
Add your answers here(double-click here to edit the cell) ❔ Question I: How would you interpret a PCA loading plot?_Answer_ ❔ Question II: Can the different cancer subtypes be distinguished? If yes/no, what could be the biological reason? _Answer_ ❔ Question III: Can you predict which subtype should show most of the differences?_Answer_ Hierarchical clusteringThis cluster analysis is super-easy in R and provides a nice view of common changes within the proteins. It is used everywhere.The disadvantage of this clustering method is its limitation to smaller data sets as the plots get very messy for thousands of proteins. Therefore, we will target a subset and cluster it in some examples.Note: The metric is based on a distance matrix, requiring to calculate the distances between each of the features (here proteins). This becomes computationally expensive for very large data sets.👨💻 You will retrieve the _100 proteins with the largest changes_ of their averaged abundance between the two cancer subtypes. The clustering will be carried out for the averaged and the full data. Basing the analysis on averaged values only would neglected biological variance, and thus most likely lead to erronenous interpretations.
###Code
# heat map of the most changing proteins between the first 2 conditions
# Calculate means of each cancer subtype
AvQuant <- NULL
for (i in 1:5)
AvQuant <- cbind(AvQuant, rowMeans(lQuantTable[,(i-1)*9+(1:9)]))
# Make a histogram of changes between the averages abundances
hist(...)
# What happens with the 100 most different proteins?
mostDiffInd <- names(sort(abs(AvQuant[,1] - AvQuant[,2]), decreasing = T)[1:100])
heatmap(AvQuant[mostDiffInd,], Colv=NA)
# Now look on the replicate level (full data). Do you still have the same impression about the significant changes?
heatmap(as.matrix(lQuantTable[mostDiffInd,]), Colv = NA,cexRow = 0.3)
###Output
_____no_output_____
###Markdown
Add your answers here(double-click here to edit the cell) ❔ Question I: Which is the expected fold-change of the most changing proteins?_Answer_ ❔ Question II: Is there a difference in the number of proteins increasing/decreasing in the second conditions (within the 100 most changing ones)?_Answer_ ❔ Question III: What does the this command do? `names(sort(abs(AvQuant[,1] - AvQuant[,2]), decreasing = T)[1:100])`_Answer_ ❔ Question IV: Would you still consider the top 100 proteins to show significant changes between the first 2 conditions? Assume an FDR of 1% or 5%_Answer_ Digging deeper into the behavior of one proteinLet's take one of the top 100 proteins (CRABP1) and check its quantitative values as well as its biological function. Will it be related to breast cancer? What does a simple Google search tell us?👨💻 Plot its abundance over all replicates and conditions using colors for the different cancer subtypes. 👨💻 Do some search in the web about the function of CRABP1 and whether it is related to any cancer.Use uniprot.org as starting point. 👨💻 Now search Google for _breast cancer_ and _CRABP1_. 👨💻 Get a random human protein from UniProt: https://www.uniprot.org/uniprot/?query=reviewed:yes+AND+organism:9606&random=yes👨💻 + 📓 Look it up again and do also the cancer search on Google. Get the gene name and also plot its quantitative changes.
###Code
# Take e.g. MUC5B
plot(t(lQuantTable["CRABP1",]), pch=16, col=rep(1:5,each=9))
###Output
_____no_output_____ |
backend/Nobel_data.ipynb | ###Markdown
``` sqlCREATE TABLE usanobel ( id FLOAT, nobelwinners VARCHAR, category VARCHAR, year INT, name VARCHAR, organization VARCHAR, city VARCHAR, state VARCHAR, country VARCHAR, orgcount VARCHAR, gender VARCHAR, multiorg INT, lat FLOAT, lon FLOAT); ```
###Code
from sqlalchemy import create_engine
engine = create_engine('postgres://postgres:2903@localhost:5432/nobel_winners')
engine.table_names()
usa_nobel_df.to_sql('usanobel', engine, if_exists='append', index=False)
ncses_df = pd.read_csv("./ncsesdata.csv")
ncsesF_df = ncses_df[(ncses_df['state']!= 'DC')]
ncsesF_df
!pip install chart_studio
# Test out slider for one year of data
fig = go.Figure(data=go.Choropleth(
locations=ncsesF_df['state'],
z = ncsesF_df['se_jobs'].astype(float),
locationmode = 'USA-states',
colorscale = 'Blues',
colorbar_title = "% of SE Jobs",
))
fig.update_layout(
title_text = '2008 Science & Engineering Jobs by State',
geo_scope = 'usa',
)
fig.show()
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import chart_studio.plotly as py
# min year in your dataset
year = 2008
# your color-scale
scl = [[0.0, '#ecf2f9'],[0.2, '#c6d9ec'],[0.4, '#9fbfdf'],
[0.6, '#6699cc'],[0.8, '#336699'],[1.0, '#204060']]
data_slider = []
for year in ncsesF_df['year'].unique():
df_segmented = ncsesF_df[(ncsesF_df['year'] == year)].copy()
for col in df_segmented.columns:
df_segmented[col] = df_segmented[col].astype(str)
df_segmented['text'] = df_segmented['se']+ ' (S&E Jobs) '+ df_segmented['jobs'] + ' (All Jobs)'
data_each_yr = dict(
type='choropleth',
locations = df_segmented['state'],
z=df_segmented['se_jobs'].astype(float),
locationmode='USA-states',
colorscale = scl,
text = df_segmented['text'],
colorbar= {'title':'% S&E Jobs'})
data_slider.append(data_each_yr)
steps = []
for index in range(len(data_slider)):
step = dict(method='restyle',
args=['visible', [False] * len(data_slider)],
label='Year {}'.format(index + 2008))
step['args'][1][index] = True
steps.append(step)
sliders = [dict(active=0, pad={"t": 1}, steps=steps)]
layout = dict(title ='Science & Engineering Share of Jobs', geo=dict(scope='usa',
projection={'type': 'albers usa'}),
sliders=sliders)
fig = go.Figure(data=data_slider, layout=layout)
fig.show()
# API returns {'data': data_slider, 'layout':layout}
# JS: response
# let data = response['data']
# let layout = response['layout']
# Plotly.newPlot('plot3', data, layout)
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import chart_studio.plotly as py
# min year in your dataset
year = 2008
# your color-scale
scl = [[0.0, '#ffffff'],[0.2, '#e6f2ff'],[0.4, '#99ccff'],
[0.6, '#4da6ff'],[0.8, '#0066cc'],[1.0, '#004080']] # purples
data_slider = []
for year in ncsesF_df['year'].unique():
df_segmented = ncsesF_df[(ncsesF_df['year'] == year)].copy()
for col in df_segmented.columns:
df_segmented[col] = df_segmented[col].astype(str)
df_segmented['text'] = df_segmented['rd']+ '(R&D) '+ df_segmented['gdp'] + ' (GDP)'
data_each_yr = dict(
type='choropleth',
locations = df_segmented['state'],
z=df_segmented['rd_gdp'].astype(float),
locationmode='USA-states',
colorscale = scl,
text = df_segmented['text'],
colorbar= {'title':'% S&E Jobs'})
data_slider.append(data_each_yr)
steps = []
for index in range(len(data_slider)):
step = dict(method='restyle',
args=['visible', [False] * len(data_slider)],
label='Year {}'.format(index + 2008))
step['args'][1][index] = True
steps.append(step)
sliders = [dict(active=0, pad={"t": 1}, steps=steps)]
layout = dict(title ='Research & Development of GDP by state (USD Millions)', geo=dict(scope='usa',
projection={'type': 'albers usa'}),
sliders=sliders)
fig = go.Figure(data=data_slider, layout=layout)
fig.show()
###Output
_____no_output_____ |
Notebook-Class-exercises/Step-3-Prepare-Data-Task-7-Merge-Datasets.ipynb | ###Markdown
Step 3 - Prepare Data - Task 7 - Merge Dataset Import libraries
###Code
import pandas as pd
from datetime import date
###Output
_____no_output_____
###Markdown
Set up environment flag
###Code
using_Google_colab = False
using_Anaconda_on_Mac_or_Linux = True
using_Anaconda_on_windows = False
###Output
_____no_output_____
###Markdown
If using Google colab, get connected to google drive
###Code
if using_Google_colab:
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
PD 7.1 Activity 1 - Upload county level covid cases data by county
###Code
if using_Google_colab:
df_sorted_confirmed_cases_county = pd.read_csv('/content/drive/MyDrive/COVID_Project/output/confirmed_cases_by_county.csv')
if using_Anaconda_on_Mac_or_Linux:
df_sorted_confirmed_cases_county = pd.read_csv('../output/confirmed_cases_by_county.csv')
if using_Anaconda_on_windows:
df_sorted_confirmed_cases_county = pd.read_csv(r'../output/confirmed_cases_by_county.csv')
df_sorted_confirmed_cases_county = df_sorted_confirmed_cases_county.astype({'countyFIPS': int,
'stateFIPS': int,
'Date': 'datetime64[ns]'})
df_sorted_confirmed_cases_county
###Output
_____no_output_____
###Markdown
PD 7.1 Activity 2 - Upload county level covid deaths by county
###Code
if using_Google_colab:
df_sorted_covid_deaths_county = pd.read_csv('/content/drive/MyDrive/COVID_Project/output/covid_deaths_by_county.csv')
if using_Anaconda_on_Mac_or_Linux:
df_sorted_covid_deaths_county = pd.read_csv('../output/covid_deaths_by_county.csv')
if using_Anaconda_on_windows:
df_sorted_covid_deaths_county = pd.read_csv(r'../output/covid_deaths_by_county.csv')
df_sorted_covid_deaths_county = df_sorted_covid_deaths_county.astype({'countyFIPS': int,
'stateFIPS': int,
'Date': 'datetime64[ns]'})
df_sorted_covid_deaths_county
###Output
_____no_output_____
###Markdown
PD 7.2 Activity 3 - Merge covid cases and deaths data for each county and date
###Code
df_partial_abt_by_county = pd.merge(df_sorted_confirmed_cases_county, df_sorted_covid_deaths_county,
on=['stateFIPS','countyFIPS', 'Date'],
suffixes=('', '_DROP'),
how='inner').filter(regex='^(?!.*_DROP)')
df_partial_abt_by_county
###Output
_____no_output_____
###Markdown
PD 7.3 Activity 4 - Upload county level Google social mobility data
###Code
if using_Google_colab:
df_google_mobility_data = pd.read_csv('/content/drive/MyDrive/COVID_Project/input/Google/Region_Mobility_Report_CSVs/2020_US_Region_Mobility_Report.csv')
if using_Anaconda_on_Mac_or_Linux:
df_google_mobility_data = pd.read_csv('../input/Google/Region_Mobility_Report_CSVs/2020_US_Region_Mobility_Report.csv')
if using_Anaconda_on_windows:
df_google_mobility_data = pd.read_csv('..\input\Google\Region_Mobility_Report_CSVs\2020_US_Region_Mobility_Report.csv')
df_google_mobility_data = df_google_mobility_data.astype({'date': 'datetime64[ns]'})
df_google_mobility_data
###Output
_____no_output_____
###Markdown
PD 7.3 Activity 5 - Understand Google Mobility data
###Code
df_google_mobility_data.columns
df_partial_abt_by_county[df_partial_abt_by_county['County Name'] == 'Los Angeles County'].count()
df_google_mobility_data[df_google_mobility_data['sub_region_2'] == 'Los Angeles County'].count()
df_google_mobility_data[df_google_mobility_data['sub_region_2'] == 'Los Angeles County'].date.min()
df_google_mobility_data[df_google_mobility_data['sub_region_2'] == 'Los Angeles County'].date.max()
df_partial_abt_by_county[df_partial_abt_by_county['County Name'] == 'Los Angeles County'].Date.min()
df_partial_abt_by_county[df_partial_abt_by_county['County Name'] == 'Los Angeles County'].Date.max()
###Output
_____no_output_____
###Markdown
PD 7.3 Activity 6: Reformat Google Mobility data
###Code
df_google_mobility_data_clean = df_google_mobility_data.dropna(subset=['census_fips_code'])
df_google_mobility_data_clean = df_google_mobility_data_clean.astype({'census_fips_code': int})
df_google_mobility_data_clean
###Output
_____no_output_____
###Markdown
PD 7.4 Activity 7 - Merge Covid Cases, deaths and Google Data
###Code
df_abt_by_county = pd.merge(df_partial_abt_by_county,
df_google_mobility_data, left_on=['countyFIPS', 'Date'],
right_on=['census_fips_code', 'date'],
suffixes=('', '_DROP'),
how='left').filter(regex='^(?!.*_DROP)')
df_abt_by_county
###Output
_____no_output_____
###Markdown
PD7.4 Activity 8: Save County Data as Analytics Base Table
###Code
if using_Google_colab:
df_abt_by_county.to_csv('/content/drive/MyDrive/COVID_Project/output/abt_by_county.csv')
if using_Anaconda_on_Mac_or_Linux:
df_abt_by_county.to_csv('../output/abt_by_county.csv')
if using_Anaconda_on_windows:
df_abt_by_county.to_csv('..\output\abt_by_county.csv')
###Output
_____no_output_____ |
notebooks/NUTS_schools.ipynb | ###Markdown
8 schools data
###Code
from numpyro.infer import Predictive
from numpyro.infer.reparam import TransformReparam, LocScaleReparam
from jax import random
from numpyro.infer import MCMC, HMC
import numpyro.distributions as dist
import numpyro
import numpy as np
###Output
_____no_output_____
###Markdown
Let us explore NumPyro using a simple example. We will use the eight schools example from Gelman et al., Bayesian Data Analysis: Sec. 5.5, 2003, which studies the effect of coaching on SAT performance in eight schools.The data is given by:
###Code
J = 8
y = np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0])
sigma = np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0])
###Output
_____no_output_____
###Markdown
where `y` are the treatment effects and `sigma` the standard error. We build a hierarchical model for the study where we assume that the group-level parameters `theta` for each school are sampled from a Normal distribution with unknown mean `mu` and standard deviation `tau`, while the observed data are in turn generated from a Normal distribution with mean and standard deviation given by `theta` (true effect) and `sigma`, respectively. This allows us to estimate the population-level parameters `mu` and `tau` by pooling from all the observations, while still allowing for individual variation amongst the schools using the group-level `theta` parameters.This is written in `numpyro` using:
###Code
def eight_schools(J, sigma, y=None):
mu = numpyro.sample('mu', dist.Normal(0, 5))
tau = numpyro.sample('tau', dist.HalfCauchy(5))
with numpyro.plate('J', J):
theta = numpyro.sample('theta', dist.Normal(mu, tau))
numpyro.sample('obs', dist.Normal(theta, sigma), obs=y)
###Output
_____no_output_____
###Markdown
Let us infer the values of the unknown parameters in our model by running MCMC using the No-U-Turn Sampler (NUTS). Note the usage of the extra_fields argument in MCMC.run. By default, we only collect samples from the target (posterior) distribution when we run inference using MCMC. However, collecting additional fields like potential energy or the acceptance probability of a sample can be easily achieved by using the extra_fields argument. For a list of possible fields that can be collected, see the `HMCState` object. In this example, we will additionally collect the `potential_energy` for each sample.
###Code
kernel = HMC(eight_schools)
mcmc = MCMC(kernel, num_warmup=500, num_samples=1000)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, J, sigma, y=y, extra_fields=('potential_energy',))
mcmc.print_summary()
pe = mcmc.get_extra_fields()['potential_energy']
print('Expected log joint density: {:.2f}'.format(np.mean(-pe)))
###Output
Expected log joint density: -55.26
###Markdown
The values above 1 for the split Gelman Rubin diagnostic `r_hat` indicates that the chain has not fully converged. The low value for the effective sample size `n_eff`, particularly for `tau`, and the number of divergent transitions looks problematic. Fortunately, this is a common pathology that can be rectified by using a non-centered paramaterization for `tau` in our model. This is straightforward to do in `numpyro` by using a `TransformedDistribution` instance together with a "reparameterization effect handler". Let us rewrite the same model but instead of sampling `theta` from a Normal(`mu`, `tau`), we will instead sample it from a base Normal(0, 1) distribution that is transformed using an `AffineTransform`. Note that by doing so, `nunmpyro` runs HMC by generating samples `theta_base` for the base Normal(0, 1) distribution instead. We see that the resulting chain does not suffer from the same pathology — the Gelman Rubin diagnostic is 1 for all the parameters and the effective sample size looks quite good!
###Code
def eight_schools_noncentered(J, sigma, y=None):
mu = numpyro.sample('mu', dist.Normal(0, 5))
tau = numpyro.sample('tau', dist.HalfCauchy(5))
with numpyro.plate('J', J):
with numpyro.handlers.reparam(config={'theta': TransformReparam()}):
theta = numpyro.sample(
'theta',
dist.TransformedDistribution(dist.Normal(0., 1.),
dist.transforms.AffineTransform(mu, tau)))
numpyro.sample('obs', dist.Normal(theta, sigma), obs=y)
nuts_kernel = NUTS(eight_schools_noncentered)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=1000)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, J, sigma, y=y, extra_fields=('potential_energy',))
mcmc.print_summary(exclude_deterministic=False)
pe = mcmc.get_extra_fields()['potential_energy']
print('Expected log joint density: {:.2f}'.format(np.mean(-pe)))
###Output
sample: 100%|██████████| 1500/1500 [00:04<00:00, 301.80it/s, 15 steps of size 3.87e-01. acc. prob=0.93]
###Markdown
Now, assume that we have a new school for which we have not observed any test scores, but we would like to generate predictions. `numpyro` provides a `Predictive` class for such a purpose. Note that in the absence of any observed data, we simply use the population-level parameters to generate predictions. The `Predictive` utility conditions the unobserved `mu` and `tau` sites to values drawn from the posterior distribution from our last MCMC run, and runs the model forward to generate predictions.
###Code
def new_school():
mu = numpyro.sample('mu', dist.Normal(0, 5))
tau = numpyro.sample('tau', dist.HalfCauchy(5))
return numpyro.sample('obs', dist.Normal(mu, tau))
predictive = Predictive(new_school, mcmc.get_samples())
samples_predictive = predictive(random.PRNGKey(1))
print(np.mean(samples_predictive['obs']))
###Output
4.0959787
|
Keras/02_Deep_Learning_With_MNIST.ipynb | ###Markdown
Run Now in Colab View source on GitHub Deep Learning With MNIST Dataset
###Code
import keras
keras.__version__
import numpy as np
from keras import models
from keras import layers
from keras.utils import to_categorical
from keras.datasets import mnist
(train_X, train_y), (test_X, test_y) = mnist.load_data()
type(train_X)
print("Training Data shape",train_X.shape,"Training Label shape",train_y.shape)
print("Test Data shape",test_X.shape,"Training Label shape",test_y.shape)
train_X[0]
train_y[0]
import matplotlib.pyplot as plt
plt.imshow(train_X[0], cmap=plt.cm.binary)
plt.show()
from keras import models
from keras import layers
nn = models.Sequential()
nn.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
nn.add(layers.Dense(10, activation='softmax'))
nn.summary()
nn.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_X = train_X.reshape((60000, 28 * 28))
train_X = train_X.astype('float32') / 255
test_X = test_X.reshape((10000, 28 * 28))
test_X = test_X.astype('float32') / 255
from keras.utils import to_categorical
train_y = to_categorical(train_y)
test_y = to_categorical(test_y)
nn.fit(train_X, train_y, epochs=5, batch_size=128)
test_loss, test_acc = nn.evaluate(test_X, test_y)
print("Test Loss", test_loss * 100 ,"%")
print('Test Accuracy:', test_acc * 100 ,"%")
predicts = nn.predict([test_X])
target = 43
print(predicts[target])
test_y[target]
np.argmax(predicts[target])
digit = test_X[target].reshape(28,28)
plt.imshow(digit, cmap=plt.cm.binary)
plt.show()
np.argmax(predicts[target]) == np.argmax(test_y[target])
###Output
_____no_output_____ |
modeling_final.ipynb | ###Markdown
Data Load
###Code
from google.colab import drive
drive.mount('/content/drive')
pip install catboost
#Library Imports
import os # 디렉토리 변경
import copy
import numpy as np # 넘파이
import pandas as pd # 판다스
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import seaborn as sns
%matplotlib inline
import statsmodels.api as sm
import scipy.stats
from scipy.stats import skew
from scipy.stats import spearmanr
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
# Learning algorithms
import sklearn
from sklearn.linear_model import *
from sklearn.svm import SVR
from sklearn.cluster import KMeans
import lightgbm as lgb
from lightgbm import LGBMRegressor
import catboost
from catboost import CatBoostRegressor
from xgboost import XGBRegressor
# model validation
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV # 파라미터 설정 고민을 줄여주는 고마운 친구
from sklearn.metrics import make_scorer # loss function 커스터마이징
os.chdir('/content/drive/MyDrive/Colab Notebooks/dacon/energy')
# 데이터 로드 (인코딩은 euc-kr)
train_df = pd.read_csv('train.csv', encoding='euc-kr')
test_df = pd.read_csv('test.csv', encoding='euc-kr')
submission = pd.read_csv('sample_submission.csv', encoding='euc-kr')
# renaming columns
# train origin columns name : num, date_time, 전력사용량(kWh), 기온(°C), 풍속(m/s), 습도(%), 강수량(mm), 일조(hr), 비전기냉방설비운영, 태양광보유
# test origin columns name : num, date_time, 기온(°C), 풍속(m/s), 습도(%), 강수량(mm), 일조(hr), 비전기냉방설비운영, 태양광보유
train_df.columns = ['num','datetime','target','temperature','windspeed','humidity','precipitation','insolation','nelec_cool_flag','solar_flag']
test_df.columns = ['num','datetime','temperature','windspeed','humidity','precipitation','insolation','nelec_cool_flag','solar_flag']
###Output
_____no_output_____
###Markdown
Feature Engineering Train Feature Engineering
###Code
train_df['datetime'] = pd.to_datetime(train_df['datetime'])
# 6월 제거 유무
#train_df['month'] = train_df['datetime'].dt.month # 월(숫자)
#train_df = train_df.loc[train_df['month']!=6,:].reset_index(drop=True)
#train_df = train_df.drop(columns=['month']) #쓸모없는 특징 drop
train_df['dayofyear'] = train_df['datetime'].dt.dayofyear
train_df['hour'] = train_df['datetime'].dt.hour
train_df['weekday'] = train_df['datetime'].dt.weekday #time feature
train_df['hour_te'] = np.sin(2*np.pi*(train_df['hour'])/23) #time encoding hour
train_df['hour_te1'] = np.cos(2*np.pi*(train_df['hour'])/23) #time encoding hour
# 체감온도
train_df['더위체감지수']=13.12+0.6215*train_df['temperature']-13.947*train_df['windspeed']**0.16+0.486*train_df['temperature']*train_df['windspeed']**0.16
train_df['더위체감지수']=pd.cut(train_df['더위체감지수'], bins=[0, 21, 25, 28, 31, 50], labels=[1,2,3,4,5])
# 불쾌지수
t = 9/5*train_df['temperature']
train_df['불쾌지수'] = t - 0.55*(1-train_df['humidity']/100)*(t-26)+32
train_df['불쾌지수'] = pd.cut(train_df['불쾌지수'], bins = [0, 68, 75, 80, 200], labels = [1,2,3,4]) #불쾌지수는 카테고리로 나누는게 성능상승에 도움이 됨
train_dfs = []
for i in range(1,61):
train_dfs.append(train_df[train_df['num']==i])
for i in range(len(train_dfs)):
train_dfs[i] = train_dfs[i].drop(columns=['windspeed','precipitation','insolation','num',
'datetime','nelec_cool_flag','solar_flag']) #쓸모없는 특징 drop
###Output
_____no_output_____
###Markdown
Test Feature Engineering
###Code
for i in range(1,61):
test_df[test_df['num']==i] = test_df[test_df['num']==i].interpolate() #기상예보값 interpolat
test_df['datetime'] = pd.to_datetime(test_df['datetime'])
test_df['dayofyear'] = test_df['datetime'].dt.dayofyear
test_df['hour'] = test_df['datetime'].dt.hour
test_df['weekday'] = test_df['datetime'].dt.weekday #time feature
test_df['hour_te'] = np.sin(2*np.pi*(test_df['hour'])/23) #time encoding hour
test_df['hour_te1'] = np.cos(2*np.pi*(test_df['hour'])/23) #time encoding hour
# 체감온도
test_df['더위체감지수']=13.12+0.6215*test_df['temperature']-13.947*test_df['windspeed']**0.16+0.486*test_df['temperature']*test_df['windspeed']**0.16
test_df['더위체감지수']=pd.cut(test_df['더위체감지수'], bins=[0, 21, 25, 28, 31, 50], labels=[1,2,3,4,5])
# 불쾌지수
t = 9/5*test_df['temperature']
test_df['불쾌지수'] = t - 0.55*(1-test_df['humidity']/100)*(t-26)+32
test_df['불쾌지수'] = pd.cut(test_df['불쾌지수'], bins = [0, 68, 75, 80, 200], labels = [1,2,3,4]) #불쾌지수는 카테고리로 나누는게 성능상승에 도움이 됨
test_dfs = []
for i in range(1,61):
test_dfs.append(test_df[test_df['num']==i])
for i in range(len(test_dfs)):
test_dfs[i] = test_dfs[i].drop(columns=['windspeed','precipitation','insolation','num',
'datetime','nelec_cool_flag','solar_flag']) #쓸모없는 특징 drop
def CDH(xs): #cooling degree hour를 구현
ys = []
for i in range(len(xs)):
if i < 11:
ys.append(np.sum(xs[:(i+1)]-26))
else:
ys.append(np.sum(xs[(i-11):(i+1)]-26))
return np.array(ys)
for i in range(60): #cdh 특징 추가
train_dfs[i]['cdh'] = CDH(np.concatenate([train_dfs[i]['temperature'].values,test_dfs[i]['temperature'].values]))[:-len(test_dfs[i])]
test_dfs[i]['cdh'] = CDH(np.concatenate([train_dfs[i]['temperature'].values,test_dfs[i]['temperature'].values]))[-len(test_dfs[i]):]
train_dfs[0]
#일단보류
#def detect_outliers(df,ratio): #iqr 이상치제거
# outlier_indices = []
# Q1 = np.percentile(df, 25)
# Q3 = np.percentile(df, 75)
# IQR = Q3 - Q1
# outlier_step = ratio * IQR
# return ~(df < Q1 - outlier_step) | (df > Q3 + outlier_step)
#이상치 제거 iqr은 1.25
#for i in range(60):
# idx = detect_outliers(train_y[i],1.25)
# train_y[i] = train_y[i][idx]
# train_x[i] = train_x[i][idx]
#train_x와 train_y로 나눔
train_x = []
train_y = []
for i in range(len(train_dfs)):
train_x.append(copy.deepcopy(train_dfs[i][train_dfs[i].columns[1:]]))
train_y.append(copy.deepcopy(train_dfs[i][train_dfs[i].columns[0]]))
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning Modeling after hyperparameter tuning for each building* 시간이 매우 오래 걸려서 pass
###Code
# loss function : SMAPE 정의
# from sklearn.metrics import mean_absolute_error
#def smape(true, pred):
# true = np.array(true) # np.array로 바꿔야 에러 없음
# pred = np.array(pred)
# return np.mean((np.abs(true-pred))/(np.abs(true) + np.abs(pred))) # *2 , *100은 상수이므로 생략
#SMAPE = make_scorer(smape, greater_is_better=False) # smape 값이 작아져야하므로 False
# 파라미터 설정, 모델생성 함수
#def get_best_params(model, params, i):
# grid_model = GridSearchCV(
# model,
# param_grid = params, # 파라미터
# cv=3, # Kfold : 5
# scoring= SMAPE) #loss function
#
# grid_model.fit(train_x[i], train_y[i], verbose=100)
# scr = grid_model.best_score_
#
# print('최적 하이퍼 파라미터:\n', grid_model.best_params_)
# print(f'{model.__class__.__name__} 최적 score 값 {scr}\n\n')
#
# return grid_model.best_estimator_
# 파라미터 후보군 설정
# 어떤 파라미터로 하는게 좋을지 고민된다면 고민하는 것들을 리스트 안에 다 넣어보세요 알아서 골라줄겁니다.
# 저는 예시로 learning_rate만 0.1 or 0.01 중 더 좋은걸 골라달라고 했습니다.
#params = {
# 'boosting_type':['goss'],
# 'objective' : ['MAE'],
# 'n_estimators' : [10000, 12000],
# 'learning_rate' : [0.1, 0.01],
# 'num_leaves' : [37, 39, 41],
# 'subsample' : [1]
#}
# 모델정의
#model=LGBMRegressor(params)
#
#best_lgbm = []
#for i in range(len(train_dfs)):
# print(str(i)+' buliding\'s optimize hyperparameter GridSearchCV')
# # 학습진행
# best_lgbm.append(get_best_params(model, params, i))
# best_lgbm
#for dc in data_cols:#d특정 feature dc를 drop 시킴
# for k in kfold_split:#kfold 의 nspilt 의 값 k
# folds = []
# for i in range(len(train_dfs)):
# cross=KFold(n_splits=k, shuffle=True, random_state=random_seed)
# fold=[]
# for train_idx, valid_idx in cross.split(train_x[i], train_y[i]):
# fold.append((train_idx, valid_idx))
# folds.append(fold)
#
# for i in range(len(train_dfs)):
# for fold in range(k):
# print(dc,random_seed,k,i)
# train_idx, valid_idx = folds[i][fold]
# X_train=np.array(train_x[i].drop(columns=dc).iloc[train_idx])
# y_train=np.array(train_y[i].iloc[train_idx])
# X_valid=np.array(train_x[i].drop(columns=dc).iloc[valid_idx])
# y_valid=np.array(train_y[i].iloc[valid_idx])
#
# model=best_lgbm[0]
# model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], verbose=100)
# v = model.predict(np.array(test_dfs[i][train_x[i].drop(columns=dc).columns]))
#
###Output
_____no_output_____
###Markdown
After setting model parameters, model each building
###Code
#과적합 방지를 위해 여러 k_fold로 반복하도록 설정
#특징중 몇개를 뺄경우 성능 향상을 기대할수 있고 과적합 또한 방지 가능하다
#random_seed = 0
#dcs = [[],['temperature'], ['humidity'], ['hour_te','hour_te1'], ['불쾌지수'], ['cdh']]
#ks = [2,3,4,5,6,7,8,9,10,4]
#과적합 방지를 위해 여러 k_fold로 반복하도록 설정
#특징중 몇개를 뺄경우 성능 향상을 기대할수 있고 과적합 또한 방지 가능하다
random_seed = 0
data_cols = [[]]
kfold_split = [5]
# 최종 적용 파라미터
cat_mae_params = {
'objective': 'MAE',
'n_estimators': 10000,
'early_stopping_rounds': 4,
} #catboost hyper parameter
lgbm_mae_params = {
'objective': 'MAE',
'boosting_type': 'goss',
'n_estimators': 11000,
'early_stopping_round': 15,
'num_leaves': 39,
} #lightgbm hyper parameter
xgb_mae_params = {
'objective': 'reg:squarederror',
'n_estimators': 20000,
'max_depth': 8,
'learning_rate': 0.03,
'colsample_bytree': 0.9,
'subsample': 0.7,
'reg_alpha': 0.01,
'reg_lambda': 0.01,
'n_jobs': -1,
'early_stoppings': 100
} #eXtreme Gradient Boosting hyper parameter
for dc in data_cols:#d특정 feature dc를 drop 시킴
for k in kfold_split:#kfold 의 nspilt 의 값 k
folds = []
for i in range(len(train_dfs)):
cross=KFold(n_splits=k, shuffle=True, random_state=random_seed)
fold=[]
for train_idx, valid_idx in cross.split(train_x[i], train_y[i]):
fold.append((train_idx, valid_idx))
folds.append(fold)
for i in range(len(train_dfs)):
for fold in range(k):
print(dc,random_seed,k,i)
train_idx, valid_idx = folds[i][fold]
X_train=np.array(train_x[i].drop(columns=dc).iloc[train_idx])
y_train=np.array(train_y[i].iloc[train_idx])
X_valid=np.array(train_x[i].drop(columns=dc).iloc[valid_idx])
y_valid=np.array(train_y[i].iloc[valid_idx])
#catboost 학습
#model=CatBoostRegressor(**cat_mae_params)
#model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], verbose=100)
#v = model.predict(np.array(test_dfs[i][train_x[i].drop(columns=dc).columns]))
#lgbm 학습
model=LGBMRegressor(**lgbm_mae_params)
model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], verbose=100)
v = model.predict(np.array(test_dfs[i][train_x[i].drop(columns=dc).columns]))
submission['answer'].iloc[(i)*168:(i+1)*168] += v/(len(kfold_split)*k*len(data_cols))
random_seed += 1
submission.to_csv('submission_out_month6.csv', index=False)
submission
###Output
_____no_output_____
###Markdown
XGBoost 실험(버려)
###Code
for dc in data_cols:#d특정 feature dc를 drop 시킴
for k in kfold_split:#kfold 의 nspilt 의 값 k
folds = []
for i in range(len(train_dfs)):
cross=KFold(n_splits=k, shuffle=True, random_state=random_seed)
fold=[]
for train_idx, valid_idx in cross.split(train_x[i], train_y[i]):
fold.append((train_idx, valid_idx))
folds.append(fold)
for i in range(len(train_dfs)):
for fold in range(k):
print(dc,random_seed,k,i)
train_idx, valid_idx = folds[i][fold]
X_train=np.array(train_x[i].drop(columns=dc).iloc[train_idx])
y_train=np.array(train_y[i].iloc[train_idx])
X_valid=np.array(train_x[i].drop(columns=dc).iloc[valid_idx])
y_valid=np.array(train_y[i].iloc[valid_idx])
#catboost 학습
#model=CatBoostRegressor(**cat_mae_params)
#model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], verbose=100)
#v = model.predict(np.array(test_dfs[i][train_x[i].drop(columns=dc).columns]))
#lgbm 학습
#model=LGBMRegressor(**lgbm_mae_params)
#model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], verbose=100)
#v = model.predict(np.array(test_dfs[i][train_x[i].drop(columns=dc).columns]))
#xgb 학습
model=XGBRegressor(**xgb_mae_params)
model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], verbose=100)
v = model.predict(np.array(test_dfs[i][train_x[i].drop(columns=dc).columns]))
submission['answer'].iloc[(i)*168:(i+1)*168] += v/(len(kfold_split)*k*len(data_cols))
random_seed += 1
submission.to_csv('submission_xgb_ver1.csv', index=False)
submission
###Output
_____no_output_____ |
0008/eval example.ipynb | ###Markdown
```juliausing RCallx = 1f(var::String) = eval(:(@rput($(Symbol(var)))))module M2using RCallx = 2f(var::String) = eval(:(@rput($(Symbol(var)))))g(var::String) = Main.eval(:(@rput($(Symbol(var)))))g(m::Module, var::String) = Core.eval(m, :(@rput($(Symbol(var)))))end``````juliajulia> M2.f("x")2julia> R"x"RObject{IntSxp}[1] 2``````juliajulia> M2.g("x")1julia> R"x"RObject{IntSxp}[1] 1``````juliajulia> M2.g(M2, "x") equivalent to M2.f("x")2julia> R"x"RObject{IntSxp}[1] 2```
###Code
using RCall
x = 1
f(var::String) = eval(:(@rput($(Symbol(var)))))
module M2
using RCall
x = 2
f(var::String) = eval(:(@rput($(Symbol(var)))))
g(var::String) = Main.eval(:(@rput($(Symbol(var)))))
g(m::Module, var::String) = Core.eval(m, :(@rput($(Symbol(var)))))
end
using RCall
x = 1
f(var::String) = eval(:(@rput($(Symbol(var)))))
module M2
using RCall
x = 2
f(var::String) = eval(:(@rput($(Symbol(var)))))
g(var::String) = Main.eval(:(@rput($(Symbol(var)))))
g(m::Module, var::String) = Core.eval(m, :(@rput($(Symbol(var)))))
end
###Output
WARNING: replacing module M2.
|
Lecture 46 Dot Product and Angle between 2 Vectors.ipynb | ###Markdown
The dot product
###Code
## many ways to compute the dot product
np.random.seed(100)
v1 = np.random.rand(2)
v2 = np.random.rand(2)
# method 1
dp = sum( np.multiply(v1,v2) )
# method 2
dp = np.dot( v1,v2 )
# method 3
dp = np.matmul( v1,v2 )
def dot_product(v1, v2):
return np.dot(v1, v2)
w = dot_product(v1, v2)
print(w)
X = np.zeros(2)
x,y = zip(X, v1)
plt.plot(x, y, label='v1')
x,y = zip(X, v2)
plt.plot(x,y, label='v2')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
dot product : the geometric perspective $$\alpha = ||a||.||b||\cos\theta$$ Angle between two vectors
###Code
Image(url= "https://i.imgur.com/zvFu6Du.png", width=400)
def angle(u,v, degree=False):
u_norm = np.linalg.norm(u, 2)
v_norm = np.linalg.norm(v, 2)
if degree:
return np.degrees(math.acos(np.dot(u,v) * 1/u_norm * 1/v_norm))
else:
return math.acos(np.dot(u,v) * 1/u_norm * 1/v_norm)
np.random.seed(10)
u = np.random.rand(2)
v = np.random.rand(2)
theta = angle(u,v)
print(theta)
v1 = np.array([2, 4, -3])
v2 = np.array([0,-3, -3])
theta = angle(v1,v2)
print(round(theta, 3))
X = np.zeros(3)
x,y,z = zip(X, v1)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(x,y,z, label='v1')
x,y,z = zip(X, v2)
ax.plot(x,y,z, label='v2')
plt.legend()
plt.show()
###Output
_____no_output_____ |
VAE in Pytorch.ipynb | ###Markdown
Test the VAE architecture with a simple example: a curve with Gaussian noise
###Code
t.manual_seed(10)
N = 1000
X1 = t.rand(N, requires_grad = True)
X = t.transpose(t.stack((X1, -1.0*t.sqrt(0.25 - (X1-0.5)**2) +0.6 + 0.1*t.rand(N)), dim = 0 ), 0, 1)
plt.figure(figsize = (8,4))
#plt.plot(X[:, 0].data.numpy(), X[:, 1].data.numpy())
plt.scatter(X[:, 0].data.numpy(), X[:, 1].data.numpy(), linewidths =.3, s=3, cmap=plt.cm.cool)
plt.axis([0, 1, 0, 2])
plt.show()
#X.shape[0]
# declare the model
enc_layer_sizes = [X.shape[1], 10, 10, 10, 1]
enc_activations = [None, F.tanh, F.tanh, None, None]
dec_layer_sizes = [enc_layer_sizes[-1], 10, 10, 10, X.shape[1]]
dec_activations = [None, None, F.tanh, F.tanh, None]
model = VAE(enc_layer_sizes, dec_layer_sizes, enc_activations, dec_activations)
# parameters
lr = 0.001
batch_size = 100
epochs = 1000
num_batch = X.shape[0]/ batch_size
# optimizer
optimizer = t.optim.Adam(model.parameters(), lr=lr)
# training loop
for i in range(int(epochs * num_batch)):
k = i % num_batch
x = X[int(k * batch_size):int((k+1) * batch_size), :]
eps = t.rand(x.shape[0],enc_layer_sizes[-1], requires_grad = True)
# loss function
loss = model.forward(x, eps)
# backpropagation
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
if i%200 ==0:
print(loss.data)
# reconstruct the data with trained model
eps = t.rand(X.shape[0],enc_layer_sizes[-1], requires_grad = True)
Z = model.enc_mlp(X, eps)[0]
X_projected = model.dec_mlp(Z, X)[0]
plt.figure(figsize = (8,4))
plt.scatter(X[:, 0].data.numpy(), X[:, 1].data.numpy(), c='blue', lw=.3, s=3)
plt.scatter(X_projected[:, 0].data.numpy(), X_projected[:, 1].data.numpy(), c='red', lw=.3, s=3)
plt.axis([0, 1, 0, 1])
plt.show()
###Output
_____no_output_____ |
1_Titanic_Exploratory_Analysis.ipynb | ###Markdown
Kaggle Training : Titanic Here we perform an exploratory analysis of the datasets for:* Titanic Kaggle competition
###Code
# Required Libraries
import pandas as pd
import numpy as np
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load the data
###Code
# Path of dataset
datasetFolder = 'datasets/'
# Load datasets
df_train = pd.read_csv(datasetFolder + 'train.csv')
df_test = pd.read_csv(datasetFolder + 'test.csv')
df_gendersub = pd.read_csv(datasetFolder + 'gender_submission.csv')
###Output
_____no_output_____
###Markdown
Describe datasets
###Code
# Datasets sizes
print ("Train : ", df_train.shape)
print ("Test : ", df_test.shape)
print ("GenderSub : ", df_gendersub.shape)
# Let's review the first records of the dataset
df_train.head()
# # Select just some rows
# df_train[1:10]
# # Select some cols
# df_train[['PassengerId', 'Sex']]
# # Select specific rows and columns
# df_train.loc[0:10, ['PassengerId', 'Sex']]
# Data types per column
df_train.dtypes
# Basic Statistics
df_train.describe()
df_test.describe()
# Please note that We don't have the column Survived here
df_gendersub.head()
# This file is just an example, about how we should submit our results
###Output
_____no_output_____
###Markdown
Let's review the data
###Code
df_gendersub.groupby("Survived").count()
df_train.groupby("Sex").count()
df_train[['Survived', 'Sex', 'PassengerId']].pivot_table(index=['Survived', 'Sex'], aggfunc='count')
df_train.groupby(['Survived', 'Sex'])['PassengerId'].count().unstack(0).plot.bar()
###Output
_____no_output_____ |
Courses/Machine Learning Regression/Regression Week 4 Ridge Regression (gradient descent).ipynb | ###Markdown
Regression Week 4: Ridge Regression (gradient descent) In this notebook, you will implement ridge regression via gradient descent. You will:* Convert an SFrame into a Numpy array* Write a Numpy function to compute the derivative of the regression weights with respect to a single feature* Write gradient descent function to compute the regression weights given an initial weight vector, step size, tolerance, and L2 penalty Fire up Turi Create Make sure you have the latest version of Turi Create
###Code
import turicreate
###Output
The history saving thread hit an unexpected error (DatabaseError('database disk image is malformed',)).History will not be written to the database.
###Markdown
Load in house sales dataDataset is from house sales in King County, the region where the city of Seattle, WA is located.
###Code
sales = turicreate.SFrame('home_data.sframe/')
###Output
_____no_output_____
###Markdown
If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features. Import useful functions from previous notebook As in Week 2, we convert the SFrame into a 2D Numpy array. Copy and paste `get_numpy_data()` from the second notebook of Week 2.
###Code
import numpy as np # note this allows us to refer to numpy as np instead
def get_numpy_data(data_sframe, features, output):
data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame
# add the column 'constant' to the front of the features list so that we can extract it along with the others:
features = ['constant'] + features # this is how you combine two lists
# select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):
features_sframe = data_sframe[features]
# the following line will convert the features_SFrame into a numpy matrix:
feature_matrix = features_sframe.to_numpy()
# assign the column of data_sframe associated with the output to the SArray output_sarray
output_sarray = data_sframe[output]
# the following will convert the SArray into a numpy array by first converting it to a list
output_array = output_sarray.to_numpy()
return(feature_matrix, output_array)
###Output
_____no_output_____
###Markdown
Also, copy and paste the `predict_output()` function to compute the predictions for an entire matrix of features given the matrix and the weights:
###Code
def predict_output(feature_matrix, weights):
# assume feature_matrix is a numpy matrix containing the features as columns and weights is a corresponding numpy array
# create the predictions vector by using np.dot()
predictions = np.dot(feature_matrix, weights)
return(predictions)
###Output
_____no_output_____
###Markdown
Computing the Derivative We are now going to move to computing the derivative of the regression cost function. Recall that the cost function is the sum over the data points of the squared difference between an observed output and a predicted output, plus the L2 penalty term.```Cost(w)= SUM[ (prediction - output)^2 ]+ l2_penalty*(w[0]^2 + w[1]^2 + ... + w[k]^2).```Since the derivative of a sum is the sum of the derivatives, we can take the derivative of the first part (the RSS) as we did in the notebook for the unregularized case in Week 2 and add the derivative of the regularization part. As we saw, the derivative of the RSS with respect to `w[i]` can be written as: ```2*SUM[ error*[feature_i] ].```The derivative of the regularization term with respect to `w[i]` is:```2*l2_penalty*w[i].```Summing both, we get```2*SUM[ error*[feature_i] ] + 2*l2_penalty*w[i].```That is, the derivative for the weight for feature i is the sum (over data points) of 2 times the product of the error and the feature itself, plus `2*l2_penalty*w[i]`. **We will not regularize the constant.** Thus, in the case of the constant, the derivative is just twice the sum of the errors (without the `2*l2_penalty*w[0]` term).Recall that twice the sum of the product of two vectors is just twice the dot product of the two vectors. Therefore the derivative for the weight for feature_i is just two times the dot product between the values of feature_i and the current errors, plus `2*l2_penalty*w[i]`.With this in mind complete the following derivative function which computes the derivative of the weight given the value of the feature (over all data points) and the errors (over all data points). To decide when to we are dealing with the constant (so we don't regularize it) we added the extra parameter to the call `feature_is_constant` which you should set to `True` when computing the derivative of the constant and `False` otherwise.
###Code
def feature_derivative_ridge(errors, feature, weight, l2_penalty, feature_is_constant):
if feature_is_constant == True:
derivative = 2 * np.dot(errors, feature)
# Otherwise, derivative is twice the dot product plus 2*l2_penalty*weight
else:
derivative = 2 * np.dot(errors, feature) + 2 * l2_penalty * weight
return derivative
###Output
_____no_output_____
###Markdown
To test your feature derivartive run the following:
###Code
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
my_weights = np.array([1., 10.])
test_predictions = predict_output(example_features, my_weights)
errors = test_predictions - example_output # prediction errors
# next two lines should print the same values
print (feature_derivative_ridge(errors, example_features[:,1], my_weights[1], 1, False))
print (np.sum(errors*example_features[:,1])*2+20.)
print ('')
# next two lines should print the same values
print (feature_derivative_ridge(errors, example_features[:,0], my_weights[0], 1, True))
print (np.sum(errors)*2.)
###Output
-56554166782350.0
-56554166782350.0
-22446749336.0
-22446749336.0
###Markdown
Gradient Descent Now we will write a function that performs a gradient descent. The basic premise is simple. Given a starting point we update the current weights by moving in the negative gradient direction. Recall that the gradient is the direction of *increase* and therefore the negative gradient is the direction of *decrease* and we're trying to *minimize* a cost function. The amount by which we move in the negative gradient *direction* is called the 'step size'. We stop when we are 'sufficiently close' to the optimum. Unlike in Week 2, this time we will set a **maximum number of iterations** and take gradient steps until we reach this maximum number. If no maximum number is supplied, the maximum should be set 100 by default. (Use default parameter values in Python.)With this in mind, complete the following gradient descent function below using your derivative function above. For each step in the gradient descent, we update the weight for each feature before computing our stopping criteria.
###Code
def ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations=100):
print ('Starting gradient descent with l2_penalty = ' + str(l2_penalty))
weights = np.array(initial_weights) # make sure it's a numpy array
iteration = 0 # iteration counter
print_frequency = 1 # for adjusting frequency of debugging output
#while not reached maximum number of iterations:
while iteration <= max_iterations:
iteration += 1 # increment iteration counter
### === code section for adjusting frequency of debugging output. ===
if iteration == 10:
print_frequency = 10
if iteration == 100:
print_frequency = 100
if iteration%print_frequency==0:
print('Iteration = ' + str(iteration))
### === end code section ===
# compute the predictions based on feature_matrix and weights using your predict_output() function
predictions = predict_output(feature_matrix, weights)
# compute the errors as predictions - output
errors = predictions - output
# from time to time, print the value of the cost function
if iteration%print_frequency==0:
print ('Cost function = ', str(np.dot(errors,errors) + l2_penalty*(np.dot(weights,weights) - weights[0]**2)))
for i in range(len(weights)): # loop over each weight
# Recall that feature_matrix[:,i] is the feature column associated with weights[i]
# compute the derivative for weight[i].
#(Remember: when i=0, you are computing the derivative of the constant!)
if i == 0:
derivative = feature_derivative_ridge(errors, feature_matrix[:, i], weights[i], 0.0, True)
else:
derivative = feature_derivative_ridge(errors, feature_matrix[:, i], weights[i], l2_penalty, False)
# subtract the step size times the derivative from the current weight
weights[i] -= step_size*derivative
print ('Done with gradient descent at iteration ', iteration)
print ('Learned weights = ', str(weights))
return weights
###Output
_____no_output_____
###Markdown
Visualizing effect of L2 penalty The L2 penalty gets its name because it causes weights to have small L2 norms than otherwise. Let's see how large weights get penalized. Let us consider a simple model with 1 feature:
###Code
simple_features = ['sqft_living']
my_output = 'price'
###Output
_____no_output_____
###Markdown
Let us split the dataset into training set and test set. Make sure to use `seed=0`:
###Code
train_data,test_data = sales.random_split(.8,seed=0)
###Output
_____no_output_____
###Markdown
In this part, we will only use `'sqft_living'` to predict `'price'`. Use the `get_numpy_data` function to get a Numpy versions of your data with only this feature, for both the `train_data` and the `test_data`.
###Code
(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
(simple_test_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)
###Output
_____no_output_____
###Markdown
Let's set the parameters for our optimization:
###Code
initial_weights = np.array([0., 0.])
step_size = 1e-12
max_iterations=1000
###Output
_____no_output_____
###Markdown
First, let's consider no regularization. Set the `l2_penalty` to `0.0` and run your ridge regression algorithm to learn the weights of your model. Call your weights:`simple_weights_0_penalty`we'll use them later.
###Code
simple_weights_0_penalty = ridge_regression_gradient_descent(simple_feature_matrix,
output, initial_weights,
step_size, 0.0, max_iterations = 100)
simple_weights_0_penalty
###Output
Starting gradient descent with l2_penalty = 0.0
Iteration = 1
Cost function = 7433051851026171.0
Iteration = 2
Cost function = 5394267213135526.0
Iteration = 3
Cost function = 4023237736501159.0
Iteration = 4
Cost function = 3101256183922414.5
Iteration = 5
Cost function = 2481247644505113.5
Iteration = 6
Cost function = 2064308077891941.5
Iteration = 7
Cost function = 1783927097372279.5
Iteration = 8
Cost function = 1595378203154871.8
Iteration = 9
Cost function = 1468583991054997.2
Iteration = 10
Cost function = 1383318191484981.8
Iteration = 20
Cost function = 1211562140496239.0
Iteration = 30
Cost function = 1208313762678823.0
Iteration = 40
Cost function = 1208252326252869.8
Iteration = 50
Cost function = 1208251163612919.5
Iteration = 60
Cost function = 1208251140915263.0
Iteration = 70
Cost function = 1208251139777036.0
Iteration = 80
Cost function = 1208251139046557.0
Iteration = 90
Cost function = 1208251138323789.0
Iteration = 100
Cost function = 1208251137601168.0
Done with gradient descent at iteration 101
Learned weights = [7.85511563e-02 2.63024271e+02]
###Markdown
Next, let's consider high regularization. Set the `l2_penalty` to `1e11` and run your ridge regression algorithm to learn the weights of your model. Call your weights:`simple_weights_high_penalty`we'll use them later.
###Code
simple_weights_high_penalty = ridge_regression_gradient_descent(simple_feature_matrix,
output, initial_weights,
step_size, 1e11, max_iterations = 100)
simple_weights_high_penalty
###Output
Starting gradient descent with l2_penalty = 100000000000.0
Iteration = 1
Cost function = 7433051851026171.0
Iteration = 2
Cost function = 5618303898412631.0
Iteration = 3
Cost function = 4920613278115385.0
Iteration = 4
Cost function = 4652381942612294.0
Iteration = 5
Cost function = 4549258764014158.0
Iteration = 6
Cost function = 4509612390882265.0
Iteration = 7
Cost function = 4494370050281118.0
Iteration = 8
Cost function = 4488509984030220.5
Iteration = 9
Cost function = 4486256988531770.0
Iteration = 10
Cost function = 4485390752674688.0
Iteration = 20
Cost function = 4484848868034299.0
Iteration = 30
Cost function = 4484847880479027.0
Iteration = 40
Cost function = 4484846931081658.0
Iteration = 50
Cost function = 4484845981687379.5
Iteration = 60
Cost function = 4484845032293499.5
Iteration = 70
Cost function = 4484844082900019.0
Iteration = 80
Cost function = 4484843133506937.0
Iteration = 90
Cost function = 4484842184114254.5
Iteration = 100
Cost function = 4484841234721970.0
Done with gradient descent at iteration 101
Learned weights = [ 1.00782291 124.57384288]
###Markdown
This code will plot the two learned models. (The blue line is for the model with no regularization and the red line is for the one with high regularization.)
###Code
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(simple_feature_matrix,output,'k.',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_0_penalty),'b-',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_high_penalty),'r-')
###Output
_____no_output_____
###Markdown
Compute the RSS on the TEST data for the following three sets of weights:1. The initial weights (all zeros)2. The weights learned with no regularization3. The weights learned with high regularizationWhich weights perform best?
###Code
print (((test_output - predict_output(simple_test_feature_matrix, initial_weights))**2).sum())
print (predict_output(simple_test_feature_matrix, initial_weights)[0])
print (((test_output - predict_output(simple_test_feature_matrix, simple_weights_0_penalty))**2).sum())
print (predict_output(simple_test_feature_matrix, simple_weights_0_penalty)[0])
print (((test_output - predict_output(simple_test_feature_matrix, simple_weights_high_penalty))**2).sum())
print (predict_output(simple_test_feature_matrix, simple_weights_high_penalty)[0])
###Output
694653077641343.2
178141.60313616588
###Markdown
***QUIZ QUESTIONS***1. What is the value of the coefficient for `sqft_living` that you learned with no regularization, rounded to 1 decimal place? What about the one with high regularization?2. Comparing the lines you fit with the with no regularization versus high regularization, which one is steeper? no regularization was steeper3. What are the RSS on the test data for each of the set of weights above (initial, no regularization, high regularization)? initial: 1784273282524564.0 no regularization: 275723643923134.44high regularization: 694653077641343.2 Running a multiple regression with L2 penalty Let us now consider a model with 2 features: `['sqft_living', 'sqft_living15']`. First, create Numpy versions of your training and test data with these two features.
###Code
model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors.
my_output = 'price'
(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)
(test_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
###Output
_____no_output_____
###Markdown
We need to re-inialize the weights, since we have one extra parameter. Let us also set the step size and maximum number of iterations.
###Code
initial_weights = np.array([0.0,0.0,0.0])
step_size = 1e-12
max_iterations = 1000
###Output
_____no_output_____
###Markdown
First, let's consider no regularization. Set the `l2_penalty` to `0.0` and run your ridge regression algorithm to learn the weights of your model. Call your weights:`multiple_weights_0_penalty`
###Code
multiple_weights_0_penalty = ridge_regression_gradient_descent(feature_matrix,
output, initial_weights,
step_size, 0.0, max_iterations)
multiple_weights_0_penalty
###Output
Starting gradient descent with l2_penalty = 0.0
Iteration = 1
Cost function = 7433051851026171.0
Iteration = 2
Cost function = 4056752331500973.0
Iteration = 3
Cost function = 2529565114333592.0
Iteration = 4
Cost function = 1838556694275926.8
Iteration = 5
Cost function = 1525675575208603.5
Iteration = 6
Cost function = 1383789498674793.8
Iteration = 7
Cost function = 1319232606276634.5
Iteration = 8
Cost function = 1289648872028920.8
Iteration = 9
Cost function = 1275884724079266.8
Iteration = 10
Cost function = 1269278807577156.8
Iteration = 20
Cost function = 1257812386316614.8
Iteration = 30
Cost function = 1251954571266786.0
Iteration = 40
Cost function = 1246755423155437.5
Iteration = 50
Cost function = 1242139508748821.0
Iteration = 60
Cost function = 1238041401137188.0
Iteration = 70
Cost function = 1234403013463993.5
Iteration = 80
Cost function = 1231172774976820.2
Iteration = 90
Cost function = 1228304900059555.0
Iteration = 100
Cost function = 1225758739263725.8
Iteration = 200
Cost function = 1211738881421532.5
Iteration = 300
Cost function = 1207473080962631.5
Iteration = 400
Cost function = 1206175125770960.0
Iteration = 500
Cost function = 1205780190233996.0
Iteration = 600
Cost function = 1205660014471676.0
Iteration = 700
Cost function = 1205623439252682.0
Iteration = 800
Cost function = 1205612300984401.0
Iteration = 900
Cost function = 1205608902360341.5
Iteration = 1000
Cost function = 1205607858660559.5
Done with gradient descent at iteration 1001
Learned weights = [ -0.35780713 243.0557255 22.41312582]
###Markdown
Next, let's consider high regularization. Set the `l2_penalty` to `1e11` and run your ridge regression algorithm to learn the weights of your model. Call your weights:`multiple_weights_high_penalty`
###Code
multiple_weights_high_penalty = ridge_regression_gradient_descent(feature_matrix,
output, initial_weights,
step_size, 1e11, max_iterations)
multiple_weights_high_penalty
###Output
Starting gradient descent with l2_penalty = 100000000000.0
Iteration = 1
Cost function = 7433051851026171.0
Iteration = 2
Cost function = 4460489790285892.0
Iteration = 3
Cost function = 3796674468844608.5
Iteration = 4
Cost function = 3648319530437361.0
Iteration = 5
Cost function = 3615091103216103.0
Iteration = 6
Cost function = 3607602742514732.0
Iteration = 7
Cost function = 3605886322161656.0
Iteration = 8
Cost function = 3605474874533295.5
Iteration = 9
Cost function = 3605365167765576.0
Iteration = 10
Cost function = 3605329402184649.0
Iteration = 20
Cost function = 3605294281022695.0
Iteration = 30
Cost function = 3605293537267099.5
Iteration = 40
Cost function = 3605293082749905.0
Iteration = 50
Cost function = 3605292631106358.0
Iteration = 60
Cost function = 3605292179491500.5
Iteration = 70
Cost function = 3605291727877070.0
Iteration = 80
Cost function = 3605291276262785.0
Iteration = 90
Cost function = 3605290824648642.5
Iteration = 100
Cost function = 3605290373034643.5
Iteration = 200
Cost function = 3605285856902500.0
Iteration = 300
Cost function = 3605281340784634.5
Iteration = 400
Cost function = 3605276824681046.0
Iteration = 500
Cost function = 3605272308591734.5
Iteration = 600
Cost function = 3605267792516700.0
Iteration = 700
Cost function = 3605263276455942.0
Iteration = 800
Cost function = 3605258760409461.0
Iteration = 900
Cost function = 3605254244377257.0
Iteration = 1000
Cost function = 3605249728359329.0
Done with gradient descent at iteration 1001
Learned weights = [ 6.74968593 91.48927271 78.43658678]
###Markdown
Compute the RSS on the TEST data for the following three sets of weights:1. The initial weights (all zeros)2. The weights learned with no regularization3. The weights learned with high regularizationWhich weights perform best?
###Code
((test_output - predict_output(test_feature_matrix, initial_weights))**2).sum()
((test_output - predict_output(test_feature_matrix, multiple_weights_0_penalty))**2).sum()
((test_output - predict_output(test_feature_matrix, multiple_weights_high_penalty))**2).sum()
###Output
_____no_output_____
###Markdown
Predict the house price for the 1st house in the test set using the no regularization and high regularization models. (Remember that python starts indexing from 0.) How far is the prediction from the actual price? Which weights perform best for the 1st house?
###Code
test_output[0]
mult_0_predictions_test = predict_output(test_feature_matrix, multiple_weights_0_penalty)
mult_0_predictions_test[0]
mult_high_predictions_test = predict_output(test_feature_matrix, multiple_weights_high_penalty)
mult_high_predictions_test[0]
###Output
_____no_output_____ |
notebooks/Parallel.ipynb | ###Markdown
Testing different libraries for parallel processing in Python
###Code
import numpy as np
# Different ways to speed up your computations using multiple cpu cores
def slow_function(n=1000):
total = 0.0
for i, _ in enumerate(range(n)):
for j, _ in enumerate(range(1, n)):
total += (i * j)
return total
data = range(100)
###Output
_____no_output_____
###Markdown
Option 0: sequential loop
###Code
results = []
for _ in data:
results.append(slow_function())
print(results[:10])
###Output
[249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0]
###Markdown
Option 1: Multiprocessing- Advantage: native python library- Disadvantage: verbose
###Code
import multiprocessing as mp
pool = mp.Pool(mp.cpu_count())
results = [pool.apply_async(slow_function, args=()) for row in data]
pool.close()
pool.join()
results = [r.get() for r in results]
print(results[:10])
###Output
[249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0, 249001249500.0]
###Markdown
Option 2: Ray - Advantage: one of the least verbose library I'm aware of- Disadvantage: NOT native python library- More: * Docs: https://docs.ray.io/en/latest/index.html * Github: https://github.com/ray-project/ray (14.4k stars) * Install it first: `pip install ray`. * Bunch of useful tips: https://docs.ray.io/en/latest/auto_examples/tips-for-first-time.html
###Code
import ray
ray.init()
@ray.remote
def paralel_slow_function(x=1000):
return slow_function(x)
futures = [paralel_slow_function.remote() for _ in data]
print(ray.get(futures[:10]))
#ray.shutdown()
###Output
2021-01-09 21:49:39,719 INFO services.py:1090 -- View the Ray dashboard at [1m[32mhttp://127.0.0.1:8265[39m[22m
###Markdown
Option 4: pandarallel- Advantage: Do not need anything else if you are doing your work on pandas- Disadvantage: only works with pandas- More: * Docs: * Github: https://github.com/nalepae/pandarallel (1.3K stars) * Install it first: `pip install pandarallel`. * Bunch of useful tips: https://github.com/nalepae/pandarallel/blob/master/docs/examples.ipynb
###Code
import pandas as pd
s = pd.Series(data)
s.head()
# Usual way to apply a function with Pandas. Applying the `slow_function`.
# Got the similar running time as shown above.
s.apply(lambda x: slow_function())
from pandarallel import pandarallel
pandarallel.initialize(progress_bar=False) # You can specify number of cores, memory, progress_bar
s.parallel_apply(lambda x: slow_function())
###Output
_____no_output_____
###Markdown
Option 5: Dask- Advantage: It is fast and provides parallel implementations for numpy/pandas/sklean...- Disadvantage: implementation is similar to native numpy/pandas/sklean but not always the same- More: * Docs: https://docs.dask.org/en/latest/ * Github: https://github.com/dask/dask (7.7K stars) * Install it first: `pip install dask`. * Bunch of useful tips: https://mybinder.org/v2/gh/dask/dask-examples/master?urlpath=lab
###Code
import dask.dataframe as dd
import pandas as pd
s = pd.Series(data)
ds = dd.from_pandas(s, 12)
ds.apply(lambda x: slow_function(), meta=('float64')).head(10)
###Output
/home/palotti/.conda/envs/cp38/lib/python3.8/site-packages/dask/dataframe/core.py:6194: UserWarning: Insufficient elements for `head`. 10 elements requested, only 9 elements available. Try passing larger `npartitions` to `head`.
warnings.warn(msg.format(n, len(r)))
|
Investigate a dataset/.ipynb_checkpoints/investigate-a-dataset-template-checkpoint.ipynb | ###Markdown
> **Tip**: Welcome to the Investigate a Dataset project! You will find tips in quoted sections like this to help organize your approach to your investigation. Before submitting your project, it will be a good idea to go back through your report and remove these sections to make the presentation of your work as tidy as possible. First things first, you might want to double-click this Markdown cell and change the title so that it reflects your dataset and investigation. Project: Investigate a Dataset (Replace this with something more specific!) Table of ContentsIntroductionData WranglingExploratory Data AnalysisConclusions Introduction> **Tip**: In this section of the report, provide a brief introduction to the dataset you've selected for analysis. At the end of this section, describe the questions that you plan on exploring over the course of the report. Try to build your report around the analysis of at least one dependent variable and three independent variables.>> If you haven't yet selected and downloaded your data, make sure you do that first before coming back here. If you're not sure what questions to ask right now, then make sure you familiarize yourself with the variables and the dataset context for ideas of what to explore.
###Code
# Use this cell to set up import statements for all of the packages that you
# plan to use.
# Remember to include a 'magic word' so that your visualizations are plotted
# inline with the notebook. See this page for more:
# http://ipython.readthedocs.io/en/stable/interactive/magics.html
###Output
_____no_output_____
###Markdown
Data Wrangling> **Tip**: In this section of the report, you will load in the data, check for cleanliness, and then trim and clean your dataset for analysis. Make sure that you document your steps carefully and justify your cleaning decisions. General Properties
###Code
# Load your data and print out a few lines. Perform operations to inspect data
# types and look for instances of missing or possibly errant data.
###Output
_____no_output_____
###Markdown
> **Tip**: You should _not_ perform too many operations in each cell. Create cells freely to explore your data. One option that you can take with this project is to do a lot of explorations in an initial notebook. These don't have to be organized, but make sure you use enough comments to understand the purpose of each code cell. Then, after you're done with your analysis, create a duplicate notebook where you will trim the excess and organize your steps so that you have a flowing, cohesive report.> **Tip**: Make sure that you keep your reader informed on the steps that you are taking in your investigation. Follow every code cell, or every set of related code cells, with a markdown cell to describe to the reader what was found in the preceding cell(s). Try to make it so that the reader can then understand what they will be seeing in the following cell(s). Data Cleaning (Replace this with more specific notes!)
###Code
# After discussing the structure of the data and any problems that need to be
# cleaned, perform those cleaning steps in the second part of this section.
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis> **Tip**: Now that you've trimmed and cleaned your data, you're ready to move on to exploration. Compute statistics and create visualizations with the goal of addressing the research questions that you posed in the Introduction section. It is recommended that you be systematic with your approach. Look at one variable at a time, and then follow it up by looking at relationships between variables. Research Question 1 (Replace this header name!)
###Code
# Use this, and more code cells, to explore your data. Don't forget to add
# Markdown cells to document your observations and findings.
###Output
_____no_output_____
###Markdown
Research Question 2 (Replace this header name!)
###Code
# Continue to explore the data to address your additional research
# questions. Add more headers as needed if you have more questions to
# investigate.
###Output
_____no_output_____ |
labs/lab_01_Moreno.ipynb | ###Markdown
MAT281 - Laboratorio N°01 Problema 01 a) Calcular el número $\pi$En los siglos XVII y XVIII, James Gregory y Gottfried Leibniz descubrieron una serie infinita que sirve para calcular $\pi$:$$\displaystyle \pi = 4 \sum_{k=1}^{\infty}\dfrac{(-1)^{k+1}}{2k-1} = 4(1-\dfrac{1}{3}+\dfrac{1}{5}-\dfrac{1}{7} + ...) $$Desarolle un programa para estimar el valor de $\pi$ ocupando el método de Leibniz, donde la entrada del programa debe ser un número entero $n$ que indique cuántos términos de la suma se utilizará.* **Ejemplo**: *calcular_pi(3)* = 3.466666666666667, *calcular_pi(1000)* = 3.140592653839794 Definir Función
###Code
def calcular_pi(n):
"""
calcular_pi(n)
Aproximacion del valor de pi mediante el método de Leibniz
Parameters
----------
n : int
Numero de terminos de la suma que se usarán
Returns
-------
output : float
Valor aproximado de pi.
Examples
--------
>>> calcular_pi(3)
3.466666666666667
>>> calcular_pi(1000)
3.140592653839794
"""
pi = 0 # valor incial
for k in range(1,n+1):
numerador = (-1)**(k+1) # numerador de la iteracion i
denominador = 2*k-1 # denominador de la iteracion i
pi+=numerador/denominador # suma hasta el i-esimo termino
return 4*pi
# Acceso a la documentación
help(calcular_pi)
###Output
Help on function calcular_pi in module __main__:
calcular_pi(n)
calcular_pi(n)
Aproximacion del valor de pi mediante el método de Leibniz
Parameters
----------
n : int
Numero de terminos de la suma que se usarán
Returns
-------
output : float
Valor aproximado de pi.
Examples
--------
>>> calcular_pi(3)
3.466666666666667
>>> calcular_pi(1000)
3.140592653839794
###Markdown
Verificar ejemplos
###Code
# ejemplo 01
assert calcular_pi(3) == 3.466666666666667, "ejemplo 01 incorrecto"
# ejemplo 02
assert calcular_pi(1000) == 3.140592653839794, "ejemplo 02 incorrecto"
###Output
_____no_output_____
###Markdown
**Observación**:* Note que si corre la línea de comando `calcular_pi(3.0)` le mandará un error ... ¿ por qué ?* En los laboratorio, no se pide ser tan meticuloso con la documentacion.* Lo primero es definir el código, correr los ejemplos y luego documentar correctamente. b) Calcular el número $e$Euler realizó varios aportes en relación a $e$, pero no fue hasta 1748 cuando publicó su **Introductio in analysin infinitorum** que dio un tratamiento definitivo a las ideas sobre $e$. Allí mostró que:En los siglos XVII y XVIII, James Gregory y Gottfried Leibniz descubrieron una serie infinita que sirve para calcular π:$$\displaystyle e = \sum_{k=0}^{\infty}\dfrac{1}{k!} = 1+\dfrac{1}{2!}+\dfrac{1}{3!}+\dfrac{1}{4!} + ... $$Desarolle un programa para estimar el valor de $e$ ocupando el método de Euler, donde la entrada del programa debe ser un número entero $n$ que indique cuántos términos de la suma se utilizará.* **Ejemplo**: *calcular_e(3)* =2.5, *calcular_e(1000)* = 2.7182818284590455 Definir función
###Code
from math import factorial
def calcular_e(n):
"""
calcular_e(n)
Aproximacion del valor de e mediante el método de Euler
Parameters
----------
n : int
Numero de terminos de la suma que se usarán
Returns
-------
output : float
Valor aproximado de e.
Examples
--------
>>> calcular e(3)
2.5
>>> calcular_e(1000)
2.7182818284590455
"""
euler = 0
for k in range(0, n):
numerador = 1
denominador = factorial(k)
euler += numerador / denominador
return euler
###Output
_____no_output_____
###Markdown
Verificar ejemplos
###Code
# ejemplo 01
assert calcular_e(3) == 2.5, "ejemplo 01 incorrecto"
# ejemplo 02
assert calcular_e(1000) == 2.7182818284590455, "ejemplo 02 incorrecto"
###Output
_____no_output_____
###Markdown
Problema 02Sea $\sigma(n)$ definido como la suma de los divisores propios de $n$ (números menores que n que se dividen en $n$).Los [números amigos](https://en.wikipedia.org/wiki/Amicable_numbers) son enteros positivos $n_1$ y $n_2$ tales que la suma de los divisores propios de uno es igual al otro número y viceversa, es decir, $\sigma(n_1)=n_2$ y $\sigma(n_2)=n_1$.Por ejemplo, los números 220 y 284 son números amigos.* los divisores propios de 220 son 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 y 110; por lo tanto $\sigma(220) = 284$. * los divisores propios de 284 son 1, 2, 4, 71 y 142; entonces $\sigma(284) = 220$.Implemente una función llamada `amigos` cuyo input sean dos números naturales $n_1$ y $n_2$, cuyo output sea verifique si los números son amigos o no.* **Ejemplo**: *amigos(220,284)* = True, *amigos(6,5)* = False Definir Función
###Code
def amigos (n1,n2):
"""
amigos (n1,n2)
función para ver si dos números son amigos
(suma de los divisores propios de uno es igual al otro número y viceversa).
Parameters
----------
n1 : int
Primer número entero positivo
n2 : int
Segundo número entero positivo
Returns
-------
output: "Error: usted ha ingresado un parámetro negativo"
Uno o ambos parámetros ingresados es un entero negativo.
output : True
Los dos enteros positivos son amigos
output: False
Los dos enteros positivos no son amigos
Examples
--------
>>> amigos(220,284) == True
>>> amigos(6,5) == False
"""
if (n1<0):
return("Error: usted ha ingresado un parámetro negativo")
if (n2<0):
return("Error: usted ha ingresado un parámetro negativo")
else:
suma1=0
suma2=0
i=1
j=1
while i<n1:
if n1%i==0:
suma1+=i
i+=1
while j<n2:
if n2%j==0:
suma2+=j
j+=1
if(suma1==n2) and (suma2==n1):
return True
else:
return False
###Output
_____no_output_____
###Markdown
Verificar ejemplos
###Code
# ejemplo 01
assert amigos(220,284) == True, "ejemplo 01 incorrecto"
# ejemplo 02
assert amigos(6,5) == False, "ejemplo 02 incorrecto"
###Output
_____no_output_____
###Markdown
Problema 03La [conjetura de Collatz](https://en.wikipedia.org/wiki/Collatz_conjecture), conocida también como conjetura $3n+1$ o conjetura de Ulam (entre otros nombres), fue enunciada por el matemático Lothar Collatz en 1937, y a la fecha no se ha resuelto.Sea la siguiente operación, aplicable a cualquier número entero positivo:* Si el número es par, se divide entre 2.* Si el número es impar, se multiplica por 3 y se suma 1.La conjetura dice que siempre alcanzaremos el 1 (y por tanto el ciclo 4, 2, 1) para cualquier número con el que comencemos. Implemente una función llamada `collatz` cuyo input sea un número natural positivo $N$ y como output devulva la secuencia de números hasta llegar a 1.* **Ejemplo**: *collatz(9)* = [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1] Definir Función
###Code
def collatz(N):
"""
collatz (N)
función para comprobar la conjetura de Collatz
Parameters
----------
N : int
Numero entero con el cual se comprueba la conjetura de Collatz.
Returns
-------
output : list
lista con los números de la secuencia de la conjetura de Collatz del entero parámetro.
Examples
--------
>>> collatz(9) == [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
"""
lista=list()
lista.append(N)
numero=0
while (numero!=1):
if N%2==0:
numero=N/2
lista.append(int(numero))
N=numero
else:
numero=N*3+1
lista.append(int(numero))
N=numero
return lista
###Output
_____no_output_____
###Markdown
Verificar ejemplos
###Code
# ejemplo 01
assert collatz(9) == [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], "ejemplo 01 incorrecto"
###Output
_____no_output_____
###Markdown
Problema 04La [conjetura de Goldbach](https://en.wikipedia.org/wiki/Goldbach%27s_conjecture) es uno de los problemas abiertos más antiguos en matemáticas. Concretamente, G.H. Hardy, en 1921, en su famoso discurso pronunciado en la Sociedad Matemática de Copenhague, comentó que probablemente la conjetura de Goldbach no es solo uno de los problemas no resueltos más difíciles de la teoría de números, sino de todas las matemáticas. Su enunciado es el siguiente:$$\textrm{Todo número par mayor que 2 puede escribirse como suma de dos números primos - Christian Goldbach (1742)}$$Implemente una función llamada `goldbach` cuyo input sea un número natural positivo $N$ y como output devuelva la suma de dos primos ($N1$ y $N2$) tal que: $N1+N2=N$. * **Ejemplo**: goldbash(4) = (2,2), goldbash(6) = (3,3) , goldbash(8) = (3,5) Definir función
###Code
def primo(N):
"""
Primo(N)
función para comprobar si un número es primo
Parameters
----------
N : int
número entero para el cual queremos saber si es primo o no.
Returns
-------
output : True
el entero N es primo
output: False
el entero N no es primo
Examples
--------
>>> primo (2)=True
"""
if N<2:
return False
for i in range (2,N):
if N%i==0:
return False
return True
def goldbash(N):
"""
goldbash (N)
función para comprobar la conjetura de Goldbash
Parameters
----------
N : int
Numero entero con el cual se comprueba la conjetura de Goldbash.
Returns
-------
output : tuple
tupla con dos números enteros cuya suma es N.
Examples
--------
>>> goldbash(4) == (2,2)
>>> goldbash(6) == (3,3)
>>> goldbash(8) == (3,5)
"""
tupla=tuple()
lista=list()
for i in range (2,N):
if primo(i)==True:
lista.append(i)
i+=1
a=len(lista)
lista2=list()
for j in range(0,a):
for k in range (0,a):
if (lista[j]+lista[k])==N:
lista2.append((lista[j],lista[k]))
k+=1
j+=1
hola=(tuple(lista2))
return(hola[0])
###Output
_____no_output_____
###Markdown
Verificar ejemplos
###Code
# ejemplo 01
assert goldbash(4) == (2,2), "ejemplo 01 incorrecto"
# ejemplo 02
assert goldbash(6) == (3,3), "ejemplo 02 incorrecto"
# ejemplo 03
assert goldbash(8) == (3,5), "ejemplo 03 incorrecto"
###Output
_____no_output_____ |
ProyectoFinal_TwitterAnalysis.ipynb | ###Markdown
###Code
# Tweepy - Python library for accessing the Twitter API.
import tweepy
# TextBlob - Python library for processing textual data
from textblob import TextBlob
# WordCloud - Python linrary for creating image wordclouds
from wordcloud import WordCloud
# Pandas - Data manipulation and analysis library
import pandas as pd
# NumPy - mathematical functions on multi-dimensional arrays and matrices
import numpy as np
# Regular Expression Python module
import re
# Matplotlib - plotting library to create graphs and charts
import matplotlib.pyplot as plt
# Settings for Matplotlib graphs and charts
from pylab import rcParams
rcParams['figure.figsize'] = 12, 8
config = pd.read_csv("./config.csv")
# Twitter API config
twitterApiKey = config['twitterApiKey'][0]
twitterApiSecret = config['twitterApiSecret'][0]
twitterApiAccessToken = config['twitterApiAccessToken'][0]
twitterApiAccessTokenSecret = config['twitterApiAccessTokenSecret'][0]
# Authenticate
auth = tweepy.OAuthHandler(twitterApiKey, twitterApiSecret)
auth.set_access_token(twitterApiAccessToken, twitterApiAccessTokenSecret)
twetterApi = tweepy.API(auth, wait_on_rate_limit = True)
twitterAccount = "joeBiden"
tweets = tweepy.Cursor(twetterApi.user_timeline,
screen_name=twitterAccount,
count=None,
since_id=None,
max_id=None,
trim_user=True,
exclude_replies=True,
contributor_details=False,
include_entities=False
).items(50);
df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweet'])
df.head()
# Cleaning the tweets
def cleanUpTweet(txt):
# Remove mentions
txt = re.sub(r'@[A-Za-z0-9_]+', '', txt)
# Remove hashtags
txt = re.sub(r'#', '', txt)
# Remove retweets:
txt = re.sub(r'RT : ', '', txt)
# Remove urls
txt = re.sub(r'https?:\/\/[A-Za-z0-9\.\/]+', '', txt)
return txt
df['Tweet'] = df['Tweet'].apply(cleanUpTweet)
def getTextSubjectivity(txt):
return TextBlob(txt).sentiment.subjectivity
def getTextPolarity(txt):
return TextBlob(txt).sentiment.polarity
df['Subjectivity'] = df['Tweet'].apply(getTextSubjectivity)
df['Polarity'] = df['Tweet'].apply(getTextPolarity)
df.head(50)
df = df.drop(df[df['Tweet'] == ''].index)
df.head(50)
# negative, nautral, positive analysis
def getTextAnalysis(a):
if a < 0:
return "Negative"
elif a == 0:
return "Neutral"
else:
return "Positive"
df['Score'] = df['Polarity'].apply(getTextAnalysis)
df.head(50)
positive = df[df['Score'] == 'Positive']
print(str(positive.shape[0]/(df.shape[0])*100) + " % of positive tweets")
labels = df.groupby('Score').count().index.values
values = df.groupby('Score').size().values
plt.bar(labels, values)
for index, row in df.iterrows():
if row['Score'] == 'Positive':
plt.scatter(row['Polarity'], row['Subjectivity'], color="green")
elif row['Score'] == 'Negative':
plt.scatter(row['Polarity'], row['Subjectivity'], color="red")
elif row['Score'] == 'Neutral':
plt.scatter(row['Polarity'], row['Subjectivity'], color="blue")
plt.title('Twitter Sentiment Analysis')
plt.xlabel('Polarity')
plt.ylabel('Subjectivity')
# add legend
plt.show()
objective = df[df['Subjectivity'] == 0]
print(str(objective.shape[0]/(df.shape[0])*100) + " % of objective tweets")
# Creating a word cloud
words = ' '.join([tweet for tweet in df['Tweet']])
wordCloud = WordCloud(width=600, height=400).generate(words)
plt.imshow(wordCloud)
plt.show()
###Output
_____no_output_____ |
Model/ResNet50 - 50 epochs.ipynb | ###Markdown
###Code
from google.colab import drive
drive.mount('/content/drive')
train_df = pd.read_csv('/content/drive/MyDrive/COURSES/CS231/train_split.txt', sep=" ", header=None)
train_df.columns = ['patient id', 'file_paths', 'labels', 'data source']
train_df = train_df.drop(['patient id', 'data source'], axis=1)
train_df.head()
test_df = pd.read_csv('/content/drive/MyDrive/COURSES/CS231/test_split.txt', sep=" ", header=None)
test_df.columns = ['patient id', 'file_paths', 'labels', 'data source']
test_df = test_df.drop(['patient id', 'data source'], axis=1)
test_df.head()
TRAIN_PATH = "/content/drive/MyDrive/COURSES/CS231/data/train"
TEST_PATH = "/content/drive/MyDrive/COURSES/CS231/data/test"
###Output
_____no_output_____
###Markdown
Balancing Classes
###Code
train_df['labels'].value_counts()
file_count = 4649
samples = []
for category in train_df['labels'].unique():
category_slice = train_df.query("labels == @category")
samples.append(category_slice.sample(file_count, replace=False, random_state=1))
train_df = pd.concat(samples, axis=0).sample(frac=1.0, random_state=1).reset_index(drop=True)
print(train_df['labels'].value_counts())
print(len(train_df))
###Output
normal 4649
COVID-19 4649
pneumonia 4649
Name: labels, dtype: int64
13947
###Markdown
Spliting train_df into train_df and valid_df
###Code
train_df, valid_df = train_test_split(train_df, train_size=0.9, random_state=0)
print(train_df.labels.value_counts())
print(valid_df.labels.value_counts())
print(test_df.labels.value_counts())
###Output
COVID-19 4213
normal 4189
pneumonia 4150
Name: labels, dtype: int64
pneumonia 499
normal 460
COVID-19 436
Name: labels, dtype: int64
COVID-19 274
pneumonia 105
normal 100
Name: labels, dtype: int64
###Markdown
Image Data Generators
###Code
batch_size = 32
img_height = 224
img_width = 224
target_size = (img_height, img_width)
train_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet_v2.preprocess_input,
horizontal_flip=True, zoom_range=0.1)
test_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet_v2.preprocess_input)
train_generator = train_datagen.flow_from_dataframe(train_df, directory=TRAIN_PATH, x_col='file_paths', y_col='labels',
target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical')
valid_generator = test_datagen.flow_from_dataframe(valid_df, directory=TRAIN_PATH, x_col='file_paths', y_col='labels',
target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(test_df, directory=TEST_PATH, x_col='file_paths', y_col='labels',
target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical', shuffle = False)
###Output
Found 12552 validated image filenames belonging to 3 classes.
Found 1395 validated image filenames belonging to 3 classes.
Found 479 validated image filenames belonging to 3 classes.
###Markdown
Create Model
###Code
base_model = ResNet50V2(include_top=False, weights="imagenet", input_shape=(img_height, img_width, 3))
for layer in base_model.layers[:190]:
layer.trainable = False
for i, layer in enumerate(base_model.layers):
print(i, layer.name, "-", layer.trainable)
model = tf.keras.Sequential([
base_model,
Flatten(),
BatchNormalization(),
Dense(256, activation='relu'),
Dropout(0.5),
BatchNormalization(),
Dense(128, activation='relu'),
Dropout(0.5),
BatchNormalization(),
Dense(64, activation='relu'),
Dropout(0.5),
BatchNormalization(),
Dense(3, activation='softmax'),
])
lr = 5e-3
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=lr), metrics=['accuracy'])
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50v2 (Functional) (None, 7, 7, 2048) 23564800
_________________________________________________________________
flatten (Flatten) (None, 100352) 0
_________________________________________________________________
batch_normalization (BatchNo (None, 100352) 401408
_________________________________________________________________
dense (Dense) (None, 256) 25690368
_________________________________________________________________
dropout (Dropout) (None, 256) 0
_________________________________________________________________
batch_normalization_1 (Batch (None, 256) 1024
_________________________________________________________________
dense_1 (Dense) (None, 128) 32896
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
batch_normalization_2 (Batch (None, 128) 512
_________________________________________________________________
dense_2 (Dense) (None, 64) 8256
_________________________________________________________________
dropout_2 (Dropout) (None, 64) 0
_________________________________________________________________
batch_normalization_3 (Batch (None, 64) 256
_________________________________________________________________
dense_3 (Dense) (None, 3) 195
=================================================================
Total params: 49,699,715
Trainable params: 25,933,315
Non-trainable params: 23,766,400
_________________________________________________________________
###Markdown
Callbacks
###Code
patience = 10
# stop_patience = 10
factor = 0.1
callbacks = [
ModelCheckpoint("resnet50v2-final.h5", save_best_only=True, verbose = 0),
# EarlyStopping(patience=stop_patience, monitor='val_loss', verbose=1),
ReduceLROnPlateau(monitor='val_loss', factor=factor, patience=patience, min_lr=1e-6, verbose=1)
]
###Output
_____no_output_____
###Markdown
Model Training
###Code
epochs = 50
history = model.fit(train_generator, validation_data=valid_generator, epochs=epochs, callbacks=callbacks, verbose=1)
train_loss = [0.6487, 0.4469, 0.4074, 0.3849, 0.3576, 0.3427, 0.3471, 0.3380, 0.3410, 0.3383, 0.3361, 0.2940, 0.2783, 0.2717, 0.26, 0.2624, 0.2369, 0.2470, 0.2358, 0.2311, 0.2263, 0.2218, 0.2233, 0.2167, 0.2231, 0.2227, 0.2213, 0.2096, 0.2241, 0.2239, 0.2176, 0.2176, 0.2072, 0.2219, 0.2164, 0.2101, 0.2049, 0.2178, 0.2090, 0.2152, 0.2185, 0.2181, 0.2128, 0.2176, 0.2096, 0.2130, 0.2160, 0.2083, 0.2108, 0.2143]
val_loss = [0.3612, 0.3654, 0.6374, 0.3819, 0.5943, 1.1585, 0.4505, 0.4302, 0.5506, 0.6574, 1.1695, 1.3079, 1.7884, 3.1584, 5.1392, 4.6225, 4.8016, 4.9733, 4.8234, 5.7820, 6.4980, 4.4179, 4.2063, 4.1806, 4.2003, 5.5932, 1.5663, 1.1069, 3.2203, 2.6253, 3.3542, 4.0708, 4.2337, 5.4792, 4.8195, 3.8897, 4.0073, 4.3476, 5.2787, 5.0320, 5.5412, 3.6614, 3.8046, 4.0843, 3.6718, 3.9051, 4.3147, 4.5132, 6.02, 4.8454]
plt.plot(train_loss, label='Loss (training data)')
plt.plot(val_loss, label='Loss (validation data)')
plt.title('Loss for Training')
plt.ylabel('Loss')
plt.xlabel('No. epoch')
plt.legend(['train', 'validation'], loc="upper left")
plt.savefig('/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-1')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig("plot/resnet50_plot.png")
plt.show()
###Output
_____no_output_____
###Markdown
Predictions on Test Set
###Code
best_model = model
best_model.load_weights('/content/drive/MyDrive/COURSES/CS231/resnet50v2-final.h5')
best_model.evaluate(test_generator)
preds = best_model.predict(test_generator)
def print_info( test_gen, preds, print_code, save_dir, subject ):
class_dict=test_gen.class_indices
labels= test_gen.labels
file_names= test_gen.filenames
error_list=[]
true_class=[]
pred_class=[]
prob_list=[]
new_dict={}
error_indices=[]
y_pred=[]
for key,value in class_dict.items():
new_dict[value]=key # dictionary {integer of class number: string of class name}
# store new_dict as a text fine in the save_dir
classes=list(new_dict.values()) # list of string of class names
dict_as_text=str(new_dict)
dict_name= subject + '-' +str(len(classes)) +'.txt'
dict_path=os.path.join(save_dir, dict_name)
with open(dict_path, 'w') as x_file:
x_file.write(dict_as_text)
errors=0
for i, p in enumerate(preds):
pred_index=np.argmax(p)
true_index=labels[i] # labels are integer values
if pred_index != true_index: # a misclassification has occurred
error_list.append(file_names[i])
true_class.append(new_dict[true_index])
pred_class.append(new_dict[pred_index])
prob_list.append(p[pred_index])
error_indices.append(true_index)
errors=errors + 1
y_pred.append(pred_index)
if print_code !=0:
if errors>0:
if print_code>errors:
r=errors
else:
r=print_code
msg='{0:^28s}{1:^28s}{2:^28s}{3:^16s}'.format('Filename', 'Predicted Class' , 'True Class', 'Probability')
print_in_color(msg, (0,255,0),(55,65,80))
for i in range(r):
msg='{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(error_list[i], pred_class[i],true_class[i], ' ', prob_list[i])
print_in_color(msg, (255,255,255), (55,65,60))
#print(error_list[i] , pred_class[i], true_class[i], prob_list[i])
else:
msg='With accuracy of 100 % there are no errors to print'
print_in_color(msg, (0,255,0),(55,65,80))
if errors>0:
plot_bar=[]
plot_class=[]
for key, value in new_dict.items():
count=error_indices.count(key)
if count!=0:
plot_bar.append(count) # list containg how many times a class c had an error
plot_class.append(value) # stores the class
fig1=plt.figure()
fig1.set_figheight(len(plot_class)/3)
fig1.set_figwidth(10)
plt.style.use('fivethirtyeight')
for i in range(0, len(plot_class)):
c=plot_class[i]
x=plot_bar[i]
plt.barh(c, x, )
plt.title( ' Errors by Class on Test Set')
if len(classes)<= 30:
# create a confusion matrix and a test report
y_true= np.array(labels)
y_pred=np.array(y_pred)
cm = confusion_matrix(y_true, y_pred )
clr = classification_report(y_true, y_pred, target_names=classes)
length=len(classes)
if length<8:
fig_width=8
fig_height=8
else:
fig_width= int(length * .5)
fig_height= int(length * .5)
fig2 = plt.figure(figsize=(fig_width, fig_height))
sns.heatmap(cm, annot=True, vmin=0, fmt='g', cmap='Blues', cbar=False)
plt.xticks(np.arange(length)+.5, classes, rotation= 90)
plt.yticks(np.arange(length)+.5, classes, rotation=0)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Confusion Matrix")
plt.savefig("/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-4.png", dpi = 100)
plt.show()
print("Classification Report:\n----------------------\n", clr)
fig1.savefig("/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-3.png", dpi = 100)
save_dir = '/content/drive/MyDrive/COURSES/CS231'
subject = "kq"
print_code = 0
print_info(test_generator, preds, print_code, save_dir, subject)
###Output
_____no_output_____ |
AWS Machine Learning Foundations Course/Object_Oriented_Programming/shirt_exercise/Shirt_exercise.ipynb | ###Markdown
Use the Shirt ClassYou've seen what a class looks like and how to instantiate an object. Now it's your turn to write code that insantiates a shirt object. Explanation of the CodeThis exercise using Jupyter notebook includes three files:- shirt_exercise.ipynb, which is the file you are currently looking at- answer.py containing answers to the exercise- tests.py, tests for checking your code - you can run these tests using the last code cell at the bottom of this notebook Your TaskThe shirt_exercise.ipynb file, which you are currently looking at if you are reading this, has an exercise to help guide you through coding with an object in Python.Fill out the TODOs in each section of the Jupyter notebook. You can find a solution in the answer.py file.First, run this code cell below to load the Shirt class.
###Code
class Shirt:
def __init__(self, shirt_color, shirt_size, shirt_style, shirt_price):
self.color = shirt_color
self.size = shirt_size
self.style = shirt_style
self.price = shirt_price
def change_price(self, new_price):
self.price = new_price
def discount(self, discount):
return self.price * (1 - discount)
### TODO:
# - insantiate a shirt object with the following characteristics:
# - color red, size S, style long-sleeve, and price 25
# - store the object in a variable called shirt_one
#
#
###
Shirt('red', 'S', 'long-sleeve', 25)
shirt_one = Shirt('red', 'S', 'long-sleeve', 25)
### TODO:
# - print the price of the shirt using the price attribute
# - use the change_price method to change the price of the shirt to 10
# - print the price of the shirt using the price attribute
# - use the discount method to print the price of the shirt with a 12% discount
#
###
print(shirt_one.price)
shirt_one.change_price(10)
print(shirt_one.price)
print(shirt_one.discount(.12))
### TODO:
#
# - instantiate another object with the following characteristics:
# . - color orange, size L, style short-sleeve, and price 10
# - store the object in a variable called shirt_two
#
###
shirt_two = Shirt('orange', 'L', 'short-sleeve', 10)
### TODO:
#
# - calculate the total cost of shirt_one and shirt_two
# - store the results in a variable called total
#
###
total = shirt_one.price + shirt_two.price
#print(total)
### TODO:
#
# - use the shirt discount method to calculate the total cost if
# shirt_one has a discount of 14% and shirt_two has a discount
# of 6%
# - store the results in a variable called total_discount
###
total_discount = shirt_one.discount(.14) + shirt_two.discount(.06)
#print(total_discount)
###Output
_____no_output_____
###Markdown
Test your CodeThe following code cell tests your code. There is a file called tests.py containing a function called run_tests(). The run_tests() function executes a handful of assert statements to check your work. You can see this file if you go to the Jupyter Notebook menu and click on "File->Open" and then open the tests.py file.Execute the next code cell. The code will produce an error if your answers in this exercise are not what was expected. Keep working on your code until all tests are passing.If you run the code cell and there is no output, then you passed all the tests!As mentioned previously, there's also a file with a solution. To find the solution, click on the Jupyter logo at the top of the workspace, and then enter the folder titled 1.OOP_syntax_shirt_practice
###Code
# Unit tests to check your solution
from tests import run_tests
run_tests(shirt_one, shirt_two, total, total_discount)
###Output
_____no_output_____ |
1b_step_functions_sagemaker/sagemaker-custom/2_pipeline/wip/3_ml-pipeline-add.ipynb | ###Markdown
TODO: As of 08/11/20 SageMaker Python SDK doesn't support Scaling policies:https://github.com/aws/sagemaker-python-sdk/issues/1123Add Model MonitorThis might help:https://github.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/blob/master/02_deploy_and_monitor/deploy_and_monitor.ipynbhttps://github.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/blob/master/02_deploy_and_monitor/monitoringjob_utils.py
###Code
from stress import stress_button
stress_button
###Output
_____no_output_____ |
new_toxic_predict.ipynb | ###Markdown
###Code
!pip install transformers
!git clone https://github.com/NVIDIA/apex
%cd apex
!pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
!pip install fast-bert
!pip install torch
from transformers import BertTokenizer
from pathlib import Path
import torch
from box import Box
import pandas as pd
import collections
import os
from tqdm import tqdm, trange
import sys
import random
import numpy as np
import apex
from sklearn.model_selection import train_test_split
import datetime
from fast_bert.modeling import BertForMultiLabelSequenceClassification
from fast_bert.data_cls import BertDataBunch, InputExample, InputFeatures, MultiLabelTextProcessor, convert_examples_to_features
from fast_bert.learner_cls import BertLearner
from fast_bert.metrics import accuracy_multilabel, accuracy_thresh, fbeta, roc_auc
torch.cuda.empty_cache()
pd.set_option('display.max_colwidth', -1)
run_start_time = datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S')
DATA_PATH = Path('../data/')
LABEL_PATH = Path('../labels/')
AUG_DATA_PATH = Path('../data/data_augmentation/')
MODEL_PATH=Path('../models/')
LOG_PATH=Path('../logs/')
MODEL_PATH.mkdir(exist_ok=True)
model_state_dict = None
# BERT_PRETRAINED_PATH = Path('../../bert_models/pretrained-weights/cased_L-12_H-768_A-12/')
BERT_PRETRAINED_PATH = Path('../../bert_models/pretrained-weights/uncased_L-12_H-768_A-12/')
# BERT_PRETRAINED_PATH = Path('../../bert_fastai/pretrained-weights/uncased_L-24_H-1024_A-16/')
# FINETUNED_PATH = Path('../models/finetuned_model.bin')
FINETUNED_PATH = None
# model_state_dict = torch.load(FINETUNED_PATH)
LOG_PATH.mkdir(exist_ok=True)
OUTPUT_PATH = MODEL_PATH/'output'
OUTPUT_PATH.mkdir(exist_ok=True)
args = Box({
"run_text": "multilabel toxic comments with freezable layers",
"train_size": -1,
"val_size": -1,
"log_path": LOG_PATH,
"full_data_dir": DATA_PATH,
"data_dir": DATA_PATH,
"task_name": "toxic_classification_lib",
"no_cuda": False,
"bert_model": BERT_PRETRAINED_PATH,
"output_dir": OUTPUT_PATH,
"max_seq_length": 512,
"do_train": True,
"do_eval": True,
"do_lower_case": True,
"train_batch_size": 8,
"eval_batch_size": 16,
"learning_rate": 5e-5,
"num_train_epochs": 6,
"warmup_proportion": 0.0,
"no_cuda": False,
"local_rank": -1,
"seed": 42,
"gradient_accumulation_steps": 1,
"optimize_on_cpu": False,
"fp16": True,
"fp16_opt_level": "O1",
"weight_decay": 0.0,
"adam_epsilon": 1e-8,
"max_grad_norm": 1.0,
"max_steps": -1,
"warmup_steps": 500,
"logging_steps": 50,
"eval_all_checkpoints": True,
"overwrite_output_dir": True,
"overwrite_cache": False,
"seed": 42,
"loss_scale": 128,
"task_name": 'intent',
"model_name": 'xlnet-base-cased',
"model_type": 'xlnet'
})
import logging
logfile = str(LOG_PATH/'log-{}-{}.txt'.format(run_start_time, args["run_text"]))
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
handlers=[
logging.FileHandler(logfile),
logging.StreamHandler(sys.stdout)
])
logger = logging.getLogger()
logger.info(args)
# tokenizer = BertTokenizer.from_pretrained(BERT_PRETRAINED_PATH, do_lower_case=args['do_lower_case'])
device = torch.device('cuda')
if torch.cuda.device_count() > 1:
args.multi_gpu = True
else:
args.multi_gpu = False
label_cols = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
from fast_bert.prediction import BertClassificationPredictor
predictor = BertClassificationPredictor(args.output_dir/'model_out', args.output_dir, LABEL_PATH,
multi_label=True, model_type='xlnet', do_lower_case=False)
predictor = BertClassificationPredictor('../models/output/model_out', args.output_dir, LABEL_PATH, model_type='xlnet', do_lower_case=False)
args.output_dir
output = predictor.predict_batch(list(pd.read_csv("../data/test.csv")['comment_text'].values))
pd.DataFrame(output).to_csv('../data/output_xlnet.csv')
results = pd.read_csv('../data/output_xlnet.csv')
preds = pd.DataFrame([{item[0]: item[1] for item in pred} for pred in output])
preds.head()
test_df = pd.read_csv("../data/train.csv")
test_df.head()
output_df = pd.merge(test_df, preds, how='left', left_index=True, right_index=True)
del output_df['comment_text']
columns = ['id','toxic','severe_toxic','obscene','threat','insult','identity_hate']
output_df = output_df[columns]
output_df.to_csv('../data/output_xlnet.csv', index=None)
pd.read_csv('../data/output_xlnet.csv', index_col='id')
###Output
_____no_output_____ |
3_classification.ipynb | ###Markdown
MNIST data set
###Code
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1)
type(mnist)
X, y = mnist['data'], mnist['target']
X.shape, y.shape
digit = X[0]
plt.imshow(digit.reshape(28, 28), cmap='binary');
y[0]
y = y.astype(int)
###Output
_____no_output_____
###Markdown
Figure 3-1. Sample digits from the MNIST data set
###Code
rng = np.random.default_rng(42)
fig, axs = plt.subplots(10, 10, figsize=(16, 16), sharex=True, sharey=True)
for i in range(10):
X_new = X[y == i]
selected = rng.choice(X_new, size=10, replace=False)
for j in range(10):
axs[i, j].imshow(selected[j].reshape(28, 28), cmap='binary')
axs[i, j].axis('off')
split = 60000
X_train, X_test, y_train, y_test = X[:split], X[split:], y[:split], y[split:]
X_train.shape, y_test.shape
###Output
_____no_output_____
###Markdown
Binary classifier to detect 5
###Code
from sklearn.linear_model import SGDClassifier
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(X_train, y_train_5) # only classify 5's from others
sgd_clf.predict([digit]) # True if digit is 5, else False
from sklearn.model_selection import cross_val_score
scores_5 = cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring='accuracy')
scores_5
###Output
_____no_output_____
###Markdown
Confusion matrixNOTE: below, confusion_matrix from cross_val_predict is more accurate than the confusion_matrix from the full training set.cross_val_predict divides the training data into folds and makes predictions for validation fold by learning ONLY from training fold. This way when predictions are made using cross_val_predict, the estimator doesn't learn from a sample before making a prediction for the same sample. This avoids overfitting to the training data?
###Code
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
confusion_matrix(y_train_5, y_train_pred)
confusion_matrix(y_train_5, sgd_clf.predict(X_train))
###Output
_____no_output_____
###Markdown
precision & recall In a binary classification setting:1. recall is the fraction of the positives the model is able to predict. Also known as sensitiviy, true positive rate (TPR). - tp / (tp + fn)2. precision is the fraction of correct positive predictions. - tp / (tp + fp)3. specificity is the fraction of correct negative predictions. - tn / (tn + fp) precision-recall tradeoff
###Code
from sklearn.metrics import plot_precision_recall_curve
disp = plot_precision_recall_curve(sgd_clf, X_train, y_train_5, response_method='decision_function')
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3,
method='decision_function') # use decision_function (instead of predict) to return scores
print(y_scores[:5])
print(y_train_pred[:5])
# scores above 0 are classified as positive class, else negative class
from sklearn.metrics import precision_recall_curve
precision, recall, threshold = precision_recall_curve(y_train_5, y_scores)
print(len(precision), len(recall), len(threshold)) # p & r one more than t
plt.plot(threshold, precision[:-1], 'b--', label='precision')
plt.plot(threshold, recall[:-1], 'g-', label='recall')
plt.xlabel('Threshold')
plt.legend()
plt.xlim([-50000, 50000])
plt.show();
from sklearn.metrics import precision_score, recall_score
# scores at default threshold of 0
print(precision_score(y_train_5, y_train_pred))
print(recall_score(y_train_5, y_train_pred))
###Output
0.8370879772350012
0.6511713705958311
###Markdown
NOTE that pr curves from full training set are more optimistic than from cross-validation. The pr curve from cross-validation is more accurate representation of the models ability to generalize well for unseen data.
###Code
plt.plot(recall[:-1], precision[:-1]);
###Output
_____no_output_____
###Markdown
can we make our own precision-recall curves?
###Code
def my_pr_curve(labels, scores):
my_p, my_r, my_t = [], [], []
sorted_scores = np.sort(scores)
for thresh in sorted_scores:
preds = (scores >= thresh)
tn, fp, fn, tp = confusion_matrix(labels, preds).ravel()
my_p.append(tp / (tp + fp))
my_r.append(tp / (tp + fn))
my_t.append(thresh)
return (my_p, my_r, my_t)
# my_p, my_r, my_t = my_pr_curve(y_train_5, y_scores)
# print(len(my_p), len(my_r), len(my_t))
# plt.plot(my_t, my_p, 'b--', label='precision')
# plt.plot(my_t, my_r, 'g-', label='recall')
# plt.xlabel('Threshold')
# plt.legend()
# plt.xlim([-50000, 50000])
# plt.savefig('./figures/3_clf/my_precision_recall_curve.png', dpi=200)
# plt.show();
###Output
60000 60000 60000
###Markdown
Yes, we can make our own pr curve, but it is INSANELY slow compared to sklearn's pr curve. Below is the picture generated by 'my_pr_curve'
###Code
from sklearn.metrics import roc_curve, plot_roc_curve
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], 'k--', alpha=0.5)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.show();
###Output
_____no_output_____
###Markdown
Multiclass classification Inherently, some classifiers are binary; others are multiclass.Examples: - binary: SGD, SVM - multiclass: logistic, LDA, QDA, RandomForest, naive Bayes (NB), knn However, sklearn allows using binary classifiers for multiclass classification. Internally, sklearn ovr or ovo strategies when using binary classifiers for multiclass classification task. - ovr (one-vs-rest): train one binary classifier for each class; choose the class with maximum score. - ovo (one-vs-one): train a binary classifier for each pair of classes; choose the class that wins most duels. sklearn automatically chooses either ovr or ovo based on the model and training data. we can explicitly control which of the two strategies (ovr or ovo) is used by OneVsRestClassifier & OneVsOneClassifers
###Code
%%timeit -n 1 -r 1
# train a binary classifier (SGD) on multiclass
# sklearn automatically chooses either ovr or ovo strategy
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier()
sgd_clf.fit(X_train, y_train) # NOT y_train_5
print(sgd_clf.predict([digit])) # true class is 5
###Output
[5]
312 µs ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
###Markdown
ovr & ovo classifiersHowever, we can specify the strategy explicitly.
###Code
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
ovr_clf = OneVsRestClassifier(SGDClassifier())
ovo_clf = OneVsOneClassifier(SGDClassifier())
ovr_clf.fit(X_train, y_train)
len(ovr_clf.estimators_) # equal to n_classes
ovr_clf.decision_function([digit])
ovr_clf.estimators_[0].decision_function([digit])
sgd_clf.decision_function([digit])
ovo_clf.fit(X_train, y_train)
len(ovo_clf.estimators_)
ovo_clf.decision_function([digit])
ovo_clf.predict([digit])
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
# scale training data
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
# let's fit a few models
sgd_clf = SGDClassifier()
svm_clf = SVC()
log_clf = LogisticRegression()
lda_clf = LinearDiscriminantAnalysis()
qda_clf = QuadraticDiscriminantAnalysis()
knn_clf = KNeighborsClassifier()
%time sgd_scores = cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')
%time svm_scores = cross_val_score(svm_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')
%time log_scores = cross_val_score(log_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')
%time lda_scores = cross_val_score(lda_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')
%time qda_scores = cross_val_score(qda_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')
%time knn_scores = cross_val_score(knn_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')
print(sgd_scores.mean(), sgd_scores.std())
print(svm_scores.mean(), svm_scores.std())
print(log_scores.mean(), log_scores.std())
print(lda_scores.mean(), lda_scores.std())
print(qda_scores.mean(), qda_scores.std())
print(knn_scores.mean(), knn_scores.std())
###Output
0.9007333333333333 0.007053879941012768
0.9602666666666666 0.000573488351136155
0.9080333333333334 0.0016744816776808513
0.33349999999999996 0.28691871090374477
0.5289833333333335 0.009337766804160881
0.9403666666666667 0.002027039439401452
###Markdown
Error analysis
###Code
# sgd_clf = SGDClassifier()
# sgd_clf.fit(X_train_scaled, y_train)
y_train_pred = sgd_clf.predict(X_train_scaled);
sns.heatmap(confusion_matrix(y_train, y_train_pred, normalize='true'), annot=False);
confusion_matrix(y_train, y_train_pred, normalize='true').round(2)
from sklearn.metrics import plot_confusion_matrix
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
plot_confusion_matrix(sgd_clf, X_train_scaled, y_train, normalize='true', ax=ax);
# ugh, that looks ugly!!
###Output
_____no_output_____ |
paper/Advection_diffusion/AD_artificial/Testing/41_41_2.ipynb | ###Markdown
2D Advection-Diffusion equation in this notebook we provide a simple example of the DeepMoD algorithm and apply it on the 2D advection-diffusion equation.
###Code
# General imports
import numpy as np
import torch
import matplotlib.pylab as plt
# DeepMoD functions
from deepymod import DeepMoD
from deepymod.model.func_approx import NN
from deepymod.model.library import Library2D_third
from deepymod.model.constraint import LeastSquares
from deepymod.model.sparse_estimators import Threshold,PDEFIND
from deepymod.training import train
from deepymod.training.sparsity_scheduler import TrainTestPeriodic
from scipy.io import loadmat
# Settings for reproducibility
np.random.seed(1)
torch.manual_seed(1)
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Prepare the data Next, we prepare the dataset.
###Code
data = loadmat('Diffusion_2D_space41.mat')
data = np.real(data['Expression1']).reshape((41,41,41,4))[:,:,:,3]
down_data= np.take(np.take(np.take(data,np.arange(0,data.shape[0],1),axis=0),np.arange(0,data.shape[1],1),axis=1),np.arange(0,data.shape[2],22),axis=2)
print("Dowmsampled shape:",down_data.shape)
width, width_2, steps = down_data.shape
x_arr = np.linspace(0,1,width)
y_arr = np.linspace(0,1,width_2)
t_arr = np.linspace(0,1,steps)
x_grid, y_grid, t_grid = np.meshgrid(x_arr, y_arr, t_arr, indexing='ij')
X = np.transpose((t_grid.flatten(), x_grid.flatten(), y_grid.flatten()))
y = np.float32(down_data.reshape((down_data.size, 1)))
###Output
_____no_output_____
###Markdown
We select the noise level we add to the data-set
###Code
noise_level = 0.0
y_noisy = y + noise_level * np.std(y) * np.random.randn(y.size, 1)
###Output
_____no_output_____
###Markdown
Select the number of samples:
###Code
y_noisy.shape
number_of_samples = 3362
idx = np.random.permutation(y.shape[0])
X_train = torch.tensor(X[idx, :][:number_of_samples], dtype=torch.float32, requires_grad=True).to(device)
y_train = torch.tensor(y_noisy[idx, :][:number_of_samples], dtype=torch.float32).to(device)
###Output
_____no_output_____
###Markdown
Configuration of DeepMoD Configuration of the function approximator: Here the first argument is the number of input and the last argument the number of output layers.
###Code
network = NN(3, [40, 40, 40, 40], 1)
###Output
_____no_output_____
###Markdown
Configuration of the library function: We select athe library with a 2D spatial input. Note that that the max differential order has been pre-determined here out of convinience. So, for poly_order 1 the library contains the following 12 terms:* [$1, u_x, u_y, u_{xx}, u_{yy}, u_{xy}, u, u u_x, u u_y, u u_{xx}, u u_{yy}, u u_{xy}$]
###Code
library = Library2D_third(poly_order=0)
###Output
_____no_output_____
###Markdown
Configuration of the sparsity estimator and sparsity scheduler used. In this case we use the most basic threshold-based Lasso estimator and a scheduler that asseses the validation loss after a given patience. If that value is smaller than 1e-5, the algorithm is converged.
###Code
estimator = Threshold(0.05)
sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=200, delta=1e-5)
###Output
_____no_output_____
###Markdown
Configuration of the sparsity estimator
###Code
constraint = LeastSquares()
# Configuration of the sparsity scheduler
###Output
_____no_output_____
###Markdown
Now we instantiate the model and select the optimizer
###Code
model = DeepMoD(network, library, estimator, constraint).to(device)
# Defining optimizer
optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=2e-3)
###Output
_____no_output_____
###Markdown
Run DeepMoD We can now run DeepMoD using all the options we have set and the training data:* The directory where the tensorboard file is written (log_dir)* The ratio of train/test set used (split)* The maximum number of iterations performed (max_iterations)* The absolute change in L1 norm considered converged (delta)* The amount of epochs over which the absolute change in L1 norm is calculated (patience)
###Code
train(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir='runs/no_noise_2/', split=0.8, max_iterations=100000, delta=1e-6, patience=200)
###Output
8650 MSE: 1.05e-05 Reg: 2.85e-06 L1: 1.36e+00
###Markdown
Sparsity masks provide the active and non-active terms in the PDE:
###Code
model.sparsity_masks
###Output
_____no_output_____
###Markdown
estimatior_coeffs gives the magnitude of the active terms:
###Code
print(model.estimator_coeffs())
data = loadmat('Diffusion_2D.mat')
usol = np.real(data['Expression1'])
usol= usol.reshape((51,51,41,4))
data_tot = usol[:,:,:,3]
print("Total data shape:",data_tot.shape)
width_tot, width_2_tot, steps_tot = data_tot.shape
x_tot = np.linspace(0,1,width_tot)
y_tot = np.linspace(0,1,width_2_tot)
t_tot = np.linspace(0,1,steps_tot)
x_grid_tot, y_grid_tot, t_grid_tot = np.meshgrid(x_tot, y_tot, t_tot, indexing='ij')
X_tot = np.transpose((t_grid_tot.flatten(), x_grid_tot.flatten(), y_grid_tot.flatten()))
noisy_sol = y_noisy.reshape(down_data.shape)
solution = model(torch.tensor(X_tot, dtype=torch.float32))
sol = solution[0].reshape(data_tot.shape).detach().numpy()
ux = solution[2][0][:,1].reshape(data_tot.shape).detach().numpy()
uy = solution[2][0][:,2].reshape(data_tot.shape).detach().numpy()
ut = solution[1][0].reshape(data_tot.shape).detach().numpy()
uxx = solution[2][0][:,3].reshape(data_tot.shape).detach().numpy()
uyy = solution[2][0][:,4].reshape(data_tot.shape).detach().numpy()
import pysindy as ps
fd_spline = ps.SINDyDerivative(kind='spline', s=1e-2)
fd_spectral = ps.SINDyDerivative(kind='spectral')
fd_sg = ps.SINDyDerivative(kind='savitzky_golay', left=0.5, right=0.5, order=3)
dim_w = 3
denoised_sol = []
for i in np.arange(down_data.shape[2]):
uwn,sigmawn,vwn= np.linalg.svd(down_data[:,:,i])
vwn = vwn.T
denoised_sol.append(uwn[:,0:dim_w].dot(np.diag(sigmawn[0:dim_w]).dot(vwn[:,0:dim_w].T)))
denoised_sol = np.array(denoised_sol).T
denoised_sol= np.transpose(denoised_sol,axes=(1,0,2))
data_tot.shape
plt.imshow(noisy_sol[:,:,10])
plt.plot(y_arr,noisy_sol[5,:,30], 'ro')
plt.plot(y_tot,data_tot[25,:,30], 'go--')
plt.plot(y_tot,sol[25,:,30],'g', label='t = 5',linewidth=3)
plt.plot(y_arr,noisy_sol[5,:,5], 'ro')
plt.plot(y_tot,data_tot[25,:,5], 'go--')
plt.plot(y_tot,sol[25,:,5],'g', label='t = 5',linewidth=3)
plt.plot(y_tot,data_tot[:,25,2], 'go--')
plt.plot(y_tot,sol[:,25,2],'g', label='t = 5',linewidth=3)
plt.plot(y_arr,noisy_sol[:,5,2], 'ro')
plt.plot(y_tot,data_tot[25,:,1], 'bo--')
plt.plot(y_tot,sol[25,:,1],'b', label='t = 1',linewidth=3)
plt.plot(y_arr,noisy_sol[5,:,1], 'o')
plt.plot(y_tot,data_tot[25,:,30], 'go--')
plt.plot(y_tot,sol[25,:,30],'g', label='t = 5',linewidth=3)
plt.plot(y_arr,noisy_sol[5,:,30], 'o')
plt.plot(y_tot,data_tot[25,:,10], 'ro--')
plt.plot(y_tot,sol[25,:,10],'r', label='t = 10',linewidth=3)
plt.legend()
y = down_data[5,:,1]
x = y_arr
plt.plot(x,fd_spline(y,x), 'bo--')
plt.plot(x_tot,uy[25,:,1]*np.max(data_tot)/np.max(y_tot),'b', label='x = 1',linewidth=3)
plt.plot(x_tot,fd_spectral(data_tot[25,:,1],x_tot)*np.max(data_tot)/np.max(y_tot),'r', label='x = 1',linewidth=3)
y = down_data[5,:,1]
x = y_arr
plt.plot(x,fd_spline(fd_spline(y,x),x), 'bo--')
plt.plot(x_tot,uyy[25,:,1]*np.max(data_tot)/np.max(y_tot),'b', label='x = 1',linewidth=3)
plt.plot(x_tot,fd_spectral(fd_spectral(data_tot[25,:,1],x_tot),x_tot),'r', label='x = 1',linewidth=3)
y = down_data[5,:,1]
x = y_arr
plt.plot(x,fd_spline(y,x), 'bo--')
plt.plot(x_tot,uy[25,:,1]*np.max(data_tot)/np.max(y_tot),'b', label='x = 1',linewidth=3)
y = down_data[5,:,1]
x = y_arr
plt.plot(x,fd_spline(y,x), 'bo--')
plt.plot(x,uy[5,:,1]*np.max(tot_data)/np.max(y_tot),'b', label='x = 1',linewidth=3)
y = down_data[5,:,2]
x = y_arr
plt.plot(x,fd_spline(y,x), 'go--')
plt.plot(x,uy[5,:,2]*np.max(down_data)/np.max(y_grid),'g', label='x = 5',linewidth=3)
y = down_data[5,:,4]
x = y_arr
plt.plot(x,fd_spline(y,x), 'ro--')
plt.plot(x,uy[5,:,4]*np.max(down_data)/np.max(y_grid),'r', label='x = 10',linewidth=3)
plt.legend()
t = t_tot
down_data[5,2,:].shape
y = down_data[5,2,:]
t = t_arr
plt.plot(t_tot,fd_sg(y_tot,t_tot), 'bo--')
plt.plot(t,ut[5,2,:]*np.max(down_data)/np.max(t_grid),'b', label='y = 12',linewidth=3)
y = down_data[5,5,:]
t = t_arr
plt.plot(t,fd_sg(y,t), 'go--')
plt.plot(t,ut[5,5,:]*np.max(down_data)/np.max(t_grid),'g', label='y = 6',linewidth=3)
y = down_data[5,8,:]
t = t_arr
plt.plot(t,fd_sg(y,t), 'ro--')
plt.plot(t,ut[5,8,:]*np.max(down_data)/np.max(t_grid),'r', label='y = 18',linewidth=3)
plt.legend()
plt.style.use('seaborn-paper')
fig = plt.figure(figsize=(9,6))
plt.subplot(2,2, 1)
y = down_data[5,2,:]
t = t_arr
plt.plot(t,fd_sg(y,t), 'bo--')
plt.plot(t,ut[5,2,:],'b', label='y = 12',linewidth=3)
y = down_data[5,5,:]
t = t_arr
plt.plot(t,fd_sg(y,t), 'go--')
plt.plot(t,ut[5,5,:],'g', label='y = 6',linewidth=3)
y = down_data[5,8,:]
t = t_arr
plt.plot(t,fd_sg(y,t), 'ro--')
plt.plot(t,ut[5,8,:],'r', label='y = 18',linewidth=3)
plt.legend()
plt.subplot(2,2, 2)
y = down_data[5,:,1]
x = y_arr
plt.plot(x,y, 'bo--')
plt.plot(x,sol[5,:,1],'b', label='t = 1',linewidth=3)
y = down_data[5,:,2]
x = y_arr
plt.plot(x,y, 'go--')
plt.plot(x,sol[5,:,2],'g', label='t = 5',linewidth=3)
y = down_data[5,:,4]
x = y_arr
plt.plot(x,y, 'ro--')
plt.plot(x,sol[5,:,4],'r', label='t = 10',linewidth=3)
plt.legend()
plt.subplot(2,2, 3)
y = down_data[5,:,1]
x = y_arr
plt.plot(x,fd_spline(y,x), 'bo--')
plt.plot(x,uy[5,:,1]*np.max(down_data)/np.max(y_grid),'b', label='x = 1',linewidth=3)
y = down_data[5,:,2]
x = y_arr
plt.plot(x,fd_spline(y,x), 'go--')
plt.plot(x,uy[5,:,2]*np.max(down_data)/np.max(y_grid),'g', label='x = 5',linewidth=3)
y = down_data[5,:,4]
x = y_arr
plt.plot(x,fd_spline(y,x), 'ro--')
plt.plot(x,uy[5,:,4]*np.max(down_data)/np.max(y_grid),'r', label='x = 10',linewidth=3)
plt.legend()
plt.subplot(2,2,4)
y = down_data[5,:,1]
x = y_arr
plt.plot(x,fd_spline(fd_spline(y,x)), 'bo--')
plt.plot(x,uyy[5,:,1]*np.max(down_data)/(np.max(y_grid)*np.max(y_grid)),'b',label='x = 1',linewidth=3)
y = down_data[5,:,2]
x = y_arr
plt.plot(x,fd_spline(fd_spline(y,x)), 'go--')
plt.plot(x,uyy[5,:,2]*np.max(down_data)/(np.max(y_grid)*np.max(y_grid)),'g',label='x = 5',linewidth=3)
y = down_data[5,:,4]
x = y_arr
plt.plot(x,fd_spline(fd_spline(y,x)), 'ro--')
plt.plot(x,uyy[5,:,4]*np.max(down_data)/(np.max(y_grid)*np.max(y_grid)),'r',label='x = 10',linewidth=3)
plt.ylim(-10,10)
plt.legend()
#plt.savefig('derivatives.pdf')
plt.style.use('seaborn-paper')
fig = plt.figure(figsize=(13,9))
plt.subplot(2,2, 1)
y = denoised_sol[10,12,:]
t = t_arr
plt.plot(t,fd_sg(y,t), 'bo--')
plt.plot(t,ut[10,12,:]*np.max(down_data)/np.max(t_grid),'b', label='y = 12',linewidth=3)
y = denoised_sol[10,6,:]
t = t_arr
plt.plot(t,fd_sg(y,t), 'go--')
plt.plot(t,ut[10,6,:]*np.max(down_data)/np.max(t_grid),'g', label='y = 6',linewidth=3)
y = denoised_sol[10,18,:]
t = t_arr
plt.plot(t,fd_sg(y,t), 'ro--')
plt.plot(t,ut[10,18,:]*np.max(down_data)/np.max(t_grid),'r', label='y = 18',linewidth=3)
plt.legend()
plt.subplot(2,2, 2)
y = denoised_sol[10,:,1]
x = y_arr
plt.plot(x,y, 'bo--')
plt.plot(x,sol[10,:,1]*np.max(down_data),'b', label='t = 1',linewidth=3)
y = denoised_sol[10,:,2]
x = y_arr
plt.plot(x,y, 'go--')
plt.plot(x,sol[10,:,2]*np.max(down_data),'g', label='t = 5',linewidth=3)
y = denoised_sol[10,:,4]
x = y_arr
plt.plot(x,y, 'ro--')
plt.plot(x,sol[10,:,4]*np.max(down_data),'r', label='t = 10',linewidth=3)
plt.legend()
plt.subplot(2,2, 3)
y = denoised_sol[10,:,1]
x = y_arr
plt.plot(x,fd_spline(y,x), 'bo--')
plt.plot(x,uy[10,:,1]*np.max(down_data)/np.max(y_grid),'b', label='x = 1',linewidth=3)
y = denoised_sol[10,:,2]
x = y_arr
plt.plot(x,fd_spline(y,x), 'go--')
plt.plot(x,uy[10,:,2]*np.max(down_data)/np.max(y_grid),'g', label='x = 5',linewidth=3)
y = denoised_sol[10,:,4]
x = y_arr
plt.plot(x,fd_spline(y,x), 'ro--')
plt.plot(x,uy[10,:,4]*np.max(down_data)/np.max(y_grid),'r', label='x = 10',linewidth=3)
plt.legend()
plt.subplot(2,2,4)
y = down_data[10,:,1]
x = y_arr
plt.plot(x,fd_spline(fd_spline(y,x)), 'bo--')
plt.plot(x,uyy[10,:,1]*np.max(down_data)/(np.max(y_grid)*np.max(y_grid)),'b',label='x = 1',linewidth=3)
y = denoised_sol[10,:,2]
x = y_arr
plt.plot(x,fd_spline(fd_spline(y,x)), 'go--')
plt.plot(x,uyy[10,:,2]*np.max(down_data)/(np.max(y_grid)*np.max(y_grid)),'g',label='x = 5',linewidth=3)
y = denoised_sol[10,:,4]
x = y_arr
plt.plot(x,fd_spline(fd_spline(y,x)), 'ro--')
plt.plot(x,uyy[10,:,4]*np.max(down_data)/(np.max(y_grid)*np.max(y_grid)),'r',label='x = 10',linewidth=3)
plt.ylim(-10,10)
plt.legend()
#plt.savefig('derivatives.pdf')
###Output
_____no_output_____ |
03_pd.ipynb | ###Markdown
pd> Pandas related BigQuery functionality.
###Code
# export
def clean_colnames(df: pd.DataFrame, char_default: str = '_', bad_chars: str = '#:!. -') -> pd.DataFrame:
cols_to_rename = {}
for col in df.columns:
if type(col) != str:
cols_to_rename[col] = f"{char_default}{col}"
if len(cols_to_rename) > 0:
df = df.rename(columns=cols_to_rename)
df.columns = df.columns.str.replace(f'[{bad_chars}]', char_default)
return df
# hide
# tests
df = pd.DataFrame([[1, 2]], columns=[0, 'col2'])
df = clean_colnames(df)
assert list(df.columns) == ['_0', 'col2']
# hide
from nbdev.showdoc import *
# export
def cols_to_str(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert all columns in df to string.
"""
for col, dtype in df.dtypes.iteritems():
if dtype != 'object':
df[col] = df[col].astype(str)
return df
# hide
# tests
df = pd.DataFrame([[1, 'b0'],[2, 'b1']], columns=['col_a_int', 'col_b'])
df = cols_to_str(df)
assert str(df.dtypes) == 'col_a_int object\ncol_b object\ndtype: object'
# export
def df_to_gbq(
df: pd.DataFrame, destination_table: str, project_id: str, if_exists: str = 'append',
print_info: bool = True, mode: str = 'pandas', cols_as_str: bool = False, clean_col_names: bool = True) -> pd.DataFrame:
"""
Save df to BigQuery enforcing schema consistency between df and destination table if it exists.
"""
# remove bad chars that are not allowed in field names in bq
if clean_col_names:
df = clean_colnames(df)
# only do anything if mode set to wrangle, otherwise just use pandas
if mode == 'wrangle':
table_id = f'{project_id}.{destination_table}'
bq_client = bigquery.Client()
# only need to handle schema's if table already exists and if_exists != 'replace'
if does_table_exist(bq_client, table_id) and if_exists != 'replace' :
old_schema = get_schema(table_id)
new_schema = df_to_bq_schema(df)
diffs = schema_diff(old_schema, new_schema)
if len(diffs) > 0:
# update the table schema in BigQuery
update_bq_schema(bq_client, table_id, diffs, print_info=print_info)
# update the df schema to be as expected by BigQuery
df = update_df_schema(bq_client, table_id, diffs, df, print_info=print_info)
if cols_as_str:
df = cols_to_str(df)
# load to BigQuery with a retry
try:
#print(f'... loading to {project_id}:{destination_table} (if_exists={if_exists})')
df.to_gbq(destination_table, project_id=project_id, if_exists=if_exists)
except Exception as e:
print(e)
print(f'... retry loading to {project_id}:{destination_table} (if_exists={if_exists})')
df.to_gbq(destination_table, project_id=project_id, if_exists=if_exists)
return df
# hide
# tests
# make a dummy df
df = pd.DataFrame([['a0', 'b0'],['a1', 'b1']], columns=['col_a', 'col_b'])
# send to bq
df = df_to_gbq(df, 'tmp.tmp', project_id=bq_project_id, if_exists='replace', mode='wrangle')
# read back from bq
df_bq = pd.read_gbq("select * from tmp.tmp")
assert str(df) == str(df_bq)
# add a new col to df
df['col_c'] = ['c0', 'c1']
# drop col_b
df = df.drop(['col_b'], axis=1)
# save to bq
df = df_to_gbq(df, 'tmp.tmp', project_id=bq_project_id, if_exists='append', print_info=False, mode='wrangle')
# read back from bq
df_bq = pd.read_gbq("select * from tmp.tmp order by 1,2,3")
assert str(df_bq) == ' col_a col_b col_c\n0 a0 None c0\n1 a0 b0 None\n2 a1 None c1\n3 a1 b1 None'
# hide
# tests
# make a dummy df
df = pd.DataFrame([['a0', 'b0'],['a1', 'b1']], columns=['col_a', 'col_b'])
# send to bq
df = df_to_gbq(df, 'tmp.tmp', project_id=bq_project_id, if_exists='replace', mode='wrangle')
# read back from bq
df_bq = pd.read_gbq("select * from tmp.tmp")
assert str(df) == str(df_bq)
# add two new cols to df
df['col_c'] = ['c0', 'c1']
df['col_d'] = ['d0', 'd1']
# save to bq
df = df_to_gbq(df, 'tmp.tmp', project_id=bq_project_id, if_exists='append', print_info=False, mode='wrangle')
# read back from bq
df_bq = pd.read_gbq("select * from tmp.tmp order by 1,2,3,4")
assert str(df_bq) == ' col_a col_b col_c col_d\n0 a0 b0 None None\n1 a0 b0 c0 d0\n2 a1 b1 None None\n3 a1 b1 c1 d1'
###Output
1it [00:03, 3.18s/it]
Downloading: 100%|██████████| 2/2 [00:00<00:00, 13.11rows/s]
1it [00:02, 2.25s/it]
Downloading: 100%|██████████| 4/4 [00:00<00:00, 21.47rows/s]
|
UTSA/Lab2b-Pytorch_tutorial.ipynb | ###Markdown
What is PyTorch?================It’s a Python-based scientific computing package targeted at two sets ofaudiences:- A replacement for NumPy to use the power of GPUs- a deep learning research platform that provides maximum flexibility and speedGetting Started---------------Tensors^^^^^^^Tensors are similar to NumPy’s ndarrays, with the addition being thatTensors can also be used on a GPU to accelerate computing.** Here are some high frequency operations you should get used to **
###Code
import cv2
import numpy as np
%matplotlib inline
#The line above is necesary to show Matplotlib's plots inside a Jupyter Notebook
from matplotlib import pyplot as plt
from __future__ import print_function
import torch
###Output
_____no_output_____
###Markdown
Construct a 5x3 matrix, uninitialized using [torch.empty]()
###Code
x = torch.empty(5, 3)
print(x)
# other examples
torch.normal(0,1,[2,2])
torch.randperm(10)
torch.linspace(1,10,10)
###Output
_____no_output_____
###Markdown
Print out the size of a tensor. you will be doing this frequently if developing/debuggin a neural network
###Code
x.size()
###Output
_____no_output_____
###Markdown
Construct a matrix filled zeros and of dtype floating point 16. Here is a link to available [types](https://pytorch.org/docs/stable/tensor_attributes.htmltorch.torch.dtype)Can you change long to floating point16 belowHint torch.zeros(5, 3, dtype=torch.float16)
###Code
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
###Output
_____no_output_____
###Markdown
Element operationsexamples of element operationsdo an element wise add of A and B
###Code
A = torch.rand(5, 3)
B = torch.rand(5, 3)
print(A)
print(B)
print(A + B)
###Output
tensor([[0.4264, 0.2224, 0.0761],
[0.8171, 0.4646, 0.7379],
[0.7076, 0.6894, 0.8530],
[0.9537, 0.5361, 0.7773],
[0.5330, 0.5835, 0.0498]])
tensor([[0.4726, 0.2186, 0.3730],
[0.8941, 0.0974, 0.9308],
[0.5303, 0.2047, 0.1537],
[0.4307, 0.0230, 0.6193],
[0.7310, 0.4267, 0.3674]])
tensor([[0.8989, 0.4410, 0.4491],
[1.7112, 0.5620, 1.6686],
[1.2378, 0.8941, 1.0066],
[1.3844, 0.5591, 1.3965],
[1.2640, 1.0103, 0.4172]])
###Markdown
Alternate method using torch.add
###Code
# more than one way to do it [operator overloading]
orch.add(A, B)
A.add(B)
###Output
tensor([[0.8989, 0.4410, 0.4491],
[1.7112, 0.5620, 1.6686],
[1.2378, 0.8941, 1.0066],
[1.3844, 0.5591, 1.3965],
[1.2640, 1.0103, 0.4172]])
###Markdown
Addition: providing an output tensor as argument
###Code
result = torch.empty(5, 3)
torch.add(A, B, out=result)
print(result)
###Output
_____no_output_____
###Markdown
Addition: in-place
###Code
#### adds x to y
B.add_(A)
print(B)
###Output
_____no_output_____
###Markdown
NoteAny operation that mutates a tensor in-place is post-fixed with an ``_``. For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``. Linear Alg operations - Matrix Multiply Example
###Code
a = torch.randint(4,(2,3))
b = torch.randint(4,(3,2))
print(a)
print(b)
# all equivalent!
# 2x3 @ 3x2 ~ 2x2
a.mm(b)
torch.matmul(a,b)
torch.mm(a,b)
a.T.mm(a)
###Output
_____no_output_____
###Markdown
Create a onehot vector
###Code
batch_size = 5
nb_digits = 10
# Dummy input that HAS to be 2D for the scatter (you can use view(-1,1) if needed)
y = torch.LongTensor(batch_size,1).random_() % nb_digits
# One hot encoding buffer that you create out of the loop and just keep reusing
y_onehot = torch.FloatTensor(batch_size, nb_digits)
# In your for loop
y_onehot.zero_()
y_onehot.scatter_(1, y, 1)
print(y)
print(y_onehot)
###Output
tensor([[ 8],
[ 1],
[ 4],
[ 5],
[ 7]])
tensor([[ 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
[ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.]])
###Markdown
Use argmax to grab the index of the highest value
###Code
A = torch.rand(3,4,5)
print(A)
A.argmax(dim=2)
###Output
tensor([[[0.4791, 0.6427, 0.4850, 0.8918, 0.5046],
[0.3802, 0.1974, 0.8807, 0.3322, 0.3854],
[0.1450, 0.6710, 0.1567, 0.8760, 0.7838],
[0.9900, 0.4326, 0.6907, 0.3431, 0.7704]],
[[0.6741, 0.7869, 0.0707, 0.4947, 0.5580],
[0.4630, 0.4901, 0.9552, 0.2336, 0.9802],
[0.4147, 0.4719, 0.8765, 0.7198, 0.9070],
[0.8264, 0.2374, 0.0944, 0.4895, 0.1953]],
[[0.9034, 0.9578, 0.1266, 0.4522, 0.4032],
[0.2456, 0.0185, 0.1444, 0.8930, 0.6270],
[0.8786, 0.2491, 0.2291, 0.0037, 0.8230],
[0.9096, 0.9918, 0.5614, 0.6948, 0.9402]]])
###Markdown
Aggregation over a dimension
###Code
x = torch.ones([2,3,4])
# inplace multiply a selected column
x[0,:,0].mul_(30)
x
#Suppose the shape of the input is (m, n, k)
#If dim=0 is specified, the shape of the output is (1, n, k) or (n, k)
#If dim=1 is specified, the shape of the output is (m, 1, k) or (m, k)
#If dim=2 is specified, the shape of the output is (m, n, 1) or (m, n)
x.sum(dim=1)
###Output
_____no_output_____
###Markdown
Broadcasting
###Code
x = torch.ones([10,10])
y = torch.linspace(1,10,10)
print(x.size())
print(y.size())
z = x + y
### Masking
mask = z>4
print(mask.size())
mask
# Apply mask, but observe dim change
new =z[z>4]
print(new.size())
new
###Output
torch.Size([70])
###Markdown
You can use standard NumPy-like indexing with all bells and whistles!Example Grab the middle column of A (index = 1)
###Code
A = torch.rand(3,3)
print(A)
print(A[:, 1])
###Output
tensor([[ 0.0591, 0.6838, 0.4621],
[ 0.7117, 0.8484, 0.3358],
[ 0.4537, 0.3042, 0.0450]])
tensor([ 0.6838, 0.8484, 0.3042])
###Markdown
Resizing: If you want to resize/reshape tensor, you can use ``torch.view``:
###Code
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
###Output
_____no_output_____
###Markdown
If you have a one element tensor, use ``.item()`` to get the value as aPython number
###Code
x = torch.randn(1)
print(x)
print(x.item())
###Output
_____no_output_____
###Markdown
**Read later:** 100+ Tensor operations, including transposing, indexing, slicing, mathematical operations, linear algebra, random numbers, etc., are described `here `_. NumPy Bridge------------Converting a Torch Tensor to a NumPy array and vice versa is a breeze.The Torch Tensor and NumPy array will share their underlying memorylocations, and changing one will change the other. Converting a Torch Tensor to a NumPy Array
###Code
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
###Output
_____no_output_____
###Markdown
See how the numpy array changed in value.
###Code
a.add_(1)
print(a)
print(b)
###Output
_____no_output_____
###Markdown
Converting NumPy Array to Torch Tensor^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^See how changing the np array changed the Torch Tensor automatically
###Code
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
###Output
_____no_output_____
###Markdown
All the Tensors on the CPU except a CharTensor support converting toNumPy and back.CUDA Tensors------------Tensors can be moved onto any device using the ``.to`` method.
###Code
# let us run this cell only if CUDA is available
# We will use ``torch.device`` objects to move tensors in and out of GPU
x = torch.rand(2,2,2)
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device) # or just use strings ``.to("cuda")``
z = x + y
print(z)
print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!
###Output
tensor([[[1.9808, 1.5752],
[1.1940, 1.8054]],
[[1.0169, 1.5552],
[1.6047, 1.3726]]], device='cuda:0')
tensor([[[1.9808, 1.5752],
[1.1940, 1.8054]],
[[1.0169, 1.5552],
[1.6047, 1.3726]]], dtype=torch.float64)
###Markdown
ND TensorsWhen working with neural networks, you are always dealing with multidimensional arrays. Here are some quick tricks Assume A is a 32x32 RGB image
###Code
## 3D Tensors
import torch
A = torch.rand(32,32,3)
plt.imshow(A)
###Output
_____no_output_____
###Markdown
Slicing Tensors - grab 'RED' dimension
###Code
red_data = A[:,:,0] #0 represents the first channel of RGB
red_data.size()
###Output
_____no_output_____
###Markdown
Swap the RGB dimension and make the tensor a 3x32x32 tensor
###Code
A_rgb_first = A.permute(2,0,1)
print(A_rgb_first.size())
###Output
torch.Size([3, 32, 32])
###Markdown
Add a BatchSize to our Image TensorUsually you need to do this to run inference on your trained model
###Code
Anew = A.unsqueeze(0)
print(Anew.size())
###Output
torch.Size([1, 32, 32, 3])
torch.Size([32, 32, 3])
###Markdown
Drop the tensor dimension. sometimes like in the example above, you might have a tensor with on of the dimensions equal to one. Use **squeeze()** to drop that dimension>
###Code
print(Anew.squeeze(0).size())
###Output
torch.Size([32, 32, 3])
|
Repo-Ayudante/clases/clase_28_05_20_tp3/clase.ipynb | ###Markdown
Clase 28/05/2020 Ejercicios TP3 Ejercicio 2Obtener la Z(s) que corresponde a la siguiente función de fase:$ \phi_{(w)} = tg^{-1} \frac{-w^5 + 5 w^3 - 2w}{2 w^4 - w^2 + 5}$Ayudas:- Revisar [apunte campus](https://www.campusvirtual.frba.utn.edu.ar/especialidad/pluginfile.php/61513/mod_resource/content/2/FASC03conFiguras.pdf) y [notebook](https://nbviewer.jupyter.org/github/agalbachicar/tc2/blob/master/notebooks/parte_de_funcion.ipynb) sobre parte de función. - ¡Tengan cuidado al pasar de $w$ a $s$! Ejercicio 5El siguiente diagrama de Bode corresponde a la respuesta en módulo de la transferencia de una red de énfasis, utilizada en un transmisor de FM para Broadcasting. Diseñar el circuito, verificando el mismo mediante simulación.Ayudas:- ¿Qué es una octava?- ¿Qué estructura me sirve para implementar una bilineal? ¿Y una bicuadrática? Solucion Ej 2
###Code
# Inserte aquí imagen o codigo!
# Para imagen: <img src='path_a_mi_imagen'>
# Para codigo de Python simplemente escriba, si es Markdown cambie el tipo de celda.
###Output
_____no_output_____
###Markdown
Solucion Ej 5
###Code
# Inserte aquí imagen o codigo!
# Para imagen: <img src='path_a_mi_imagen'>
# Para codigo de Python simplemente escriba, si es Markdown cambie el tipo de celda.
###Output
_____no_output_____ |
project_eular.ipynb | ###Markdown
Problem 2
###Code
def solution(limit = 4000000):
s = 2
m = 1
n = 2
x = 0
while x <= limit:
x = m+n
if x%2 == 0:
s+=x
m = n
n = x
return s
###Output
_____no_output_____
###Markdown
Problem 3
###Code
def is_prime(s):
mm = s-1
while mm > 1:
if s%mm == 0:
return False
mm -= 1
return True
n = 1
x = 600851475143
while x != 1:
n+=1
while is_prime(n)==False:
n+=1
if x%n == 0:
x = x/n
print('factor',n)
print("largest prime factor is: {}".format(n))
###Output
factor 71
factor 839
factor 1471
factor 6857
largest prime factor is: 6857
###Markdown
Problem 4
###Code
x = 999
y = 999
largest = 0
while x>499:
while y>1:
n = x*y
if str(n) == str(n)[::-1]:
if n>largest:
largest = n
print(x,y,largest)
break
y-=1
y=999
x-=1
###Output
999 91 90909
995 583 580085
993 913 906609
###Markdown
Problem 5
###Code
def not_evenly_divisible_20(x):
if sum(x%y for y in [20,19,18,17,16,15,14,13,11])==0:
return False
return True
x = 2520
while not_evenly_divisible_20(x):
x+=20
x
###Output
_____no_output_____
###Markdown
Problem 6
###Code
sum([x for x in range(1,101)])**2 - sum([x**2 for x in range(1, 101)])
###Output
_____no_output_____
###Markdown
Problem 7
###Code
i = 1
x = 2
while i != 10001:
x+=1
if str(x)[-1] in ['2', '4', '6', '8', '0']:
continue
else:
if is_prime(x):
i+=1
if i % 1000 == 0:
print(x,i)
x, i
def get_nth_prime(n):
"""
The point here is: if a number `x` can't be evenly divided by any prime numbers that's smaller than `x`, then `x` is prime.
"""
prime_list = [2] # initiate the list with the first prime number
x = 3
while len(prime_list) < n:
prime = True
for i in prime_list:
if x % i == 0:
prime = False
x += 2 # even numbers>2 can't be prime
break # we don't want to waste time checking the rest
if prime:
prime_list.append(x)
x += 2
return prime_list
get_nth_prime(10001)[-1]
def get_n_prime(limit):
p_no = [2,3]
i = 3
n = p_no[-1] + 2
while len(p_no) <= limit:
prime = True
for i in p_no:
if n % i == 0:
prime = False
break
if prime: p_no.append(n)
n += 2
return p_no[-1]
get_n_prime(10000)
def str_prod(x):
n="""73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
"""
n = n.replace('\n', '')
n_proc = [x for x in n.split('0') if len(x) >=13]
p_max = 0
record = ''
for x in n_proc:
for i in range(len(x)-12):
slis = x[i: i+13]
p = 1
for e in slis:
p *= int(e)
if p > p_max:
p_max,record = p,slis
p_max, record
n = n.replace('\n', '')
p_max = 0
record = ''
for i in range(len(n)-13-1):
slis = n[i: i+13]
p = 1
for x in slis:
p *= int(x)
if p > p_max:
p_max,record = p,slis
p_max, record
[record in x for x in n_proc]
###Output
_____no_output_____
###Markdown
Problem10
###Code
def get_limit_prime(n):
"""
The point here is: if a number `x` can't be evenly divided by any prime numbers that's smaller than `x`, then `x` is prime.
"""
prime_list = [2] # initiate the list with the first prime number
x = 3
while x < n:
prime = True
for i in prime_list:
if x % i == 0:
prime = False
x += 2 # even numbers>2 can't be prime
break # we don't want to waste time checking the rest
if prime:
prime_list.append(x)
x += 2
# if x % 50000 == 1:
# print(x)
return prime_list
n = 2000000
l = get_limit_prime(n)
sum(l)
###Output
_____no_output_____
###Markdown
Filter by correlation
###Code
def ftr_select_corr(X, corr_bar, iv_df):
"""
Drop features that have correlation higher than `corr_bar` and keep whichever has the higher iv according to `auc_df`.
"""
cols = list(X.columns)
cols_drop = []
corr_df = X.corr()
# print("corr_df created.")
i = 0
while i < len(cols) - 1:
col_1 = cols[i]
j = i + 1
while j < len(cols):
col_2 = cols[j]
corr = corr_df.loc[col_2, col_1]
if abs(corr) > corr_bar:
iv_1 = iv_df.query('feature == @col_1')['iv'].squeeze()
iv_2 = iv_df.query('feature == @col_2')['iv'].squeeze()
col_drop = col_2 if iv_1 > iv_2 else col_1
cols_drop.append(col_drop)
cols.remove(col_drop)
X = X.drop(col_drop, axis=1)
j -= 1
if col_drop == col_1:
i -= 1
break
j += 1
i += 1
return [X, cols_drop]
###Output
_____no_output_____
###Markdown
1402. Reducing Dishes
###Code
def maxSatisfaction(satisfaction):
"""
Early stopping could be added to make the method more efficient.
"""
max_coef = 0
best_combo = []
satisfaction_sorted = sorted(satisfaction)
for n in range(1, len(satisfaction)+1):
coef = sum([satisfaction_sorted[-n:][i] * (i+1) for i in range(n)])
if coef > max_coef:
max_coef = coef
best_combo = satisfaction_sorted[-n:]
return max_coef
###Output
_____no_output_____
###Markdown
Recover a Tree From Preorder Traversal
###Code
s = "1-2--3---4-5--6---7"
s.split('-')
a,b = get_num_and_level(s)
c = get_parents(a,b)
a,b,c
def get_num_and_level(s):
"""
Given a string of the format, return the list of numbers contained in it, and the list of the levels of the numbers.
"""
s_split = s.split('-')
numbers = [int(s_split[0])]
levels = [0]
level_count = 1
for x in s_split[1:]:
try:
numbers.append(int(x))
levels.append(level_count)
level_count = 1
except:
level_count += 1
return numbers, levels
def get_parents(numbers, levels):
"""
Given the numbers and their corresponding levels, get the list of the parents of each number.
"""
parents = [numbers[0], numbers[0]]
for n in range(2,len(numbers)):
numbers_sub = numbers[:n+1]
levels_sub = levels[:n+1]
level_dict = dict(zip(numbers_sub, levels_sub))
x = numbers[n]
parents.append([y for y in numbers_sub if level_dict[y] < level_dict[x]][-1])
return parents
def convert_func(n_l, n_p_dict, l_max):
"""
Given the subset of numbers of a certain level, and the n_p_dict, covert the numbers to the desired list format.
"""
s = []
if len(n_l) == 1:
s = n_l
if l < l_max:
s.append('null')
else:
while len(n_l) > 1:
a = n_l[0]
b = n_l[1]
if n_p_dict[a] == n_p_dict[b]:
s.append(a)
s.append(b)
n_l.remove(a)
n_l.remove(b)
else:
s.append(a)
s.append('null')
n_l.remove(a)
if len(n_l) == 1:
s.append(n_l[0])
if l < l_max:
s.append('null')
return s
numbers, levels = get_num_and_level("1-2--3--4-5--6--7")
parents = get_parents(numbers,levels)
n_p_dict = dict(zip(numbers, parents))
n_l_dict = dict(zip(numbers, levels))
l_max = max(levels)
result = [numbers[0]]
for l in set(levels):
if l!=0:
n_l = [x for x in numbers if n_l_dict[x] == l]
result.extend(convert_func(n_l, n_p_dict, l_max))
result
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def get_num_and_level(self, s):
"""
Given a string of the format, return the list of numbers contained in it, and the list of the levels of the numbers.
"""
s_split = s.split('-')
numbers = [int(s_split[0])]
levels = [0]
level_count = 1
for x in s_split[1:]:
try:
numbers.append(int(x))
levels.append(level_count)
level_count = 1
except:
level_count += 1
return numbers, levels
def get_parents(self, numbers, levels):
"""
Given the numbers and their corresponding levels, get the list of the parents of each number.
"""
parents = [numbers[0], numbers[0]]
for n in range(2,len(numbers)):
numbers_sub = numbers[:n+1]
levels_sub = levels[:n+1]
level_dict = dict(zip(numbers_sub, levels_sub))
x = numbers[n]
parents.append([y for y in numbers_sub if level_dict[y] < level_dict[x]][-1])
return parents
def convert_func(self, n_l, n_p_dict, l_max):
"""
Given the subset of numbers of a certain level, and the n_p_dict, covert the numbers to the desired list format.
"""
s = []
if len(n_l) == 1:
s = n_l
if l < l_max:
s.append('null')
else:
while len(n_l) > 1:
a = n_l[0]
b = n_l[1]
if n_p_dict[a] == n_p_dict[b]:
s.append(a)
s.append(b)
n_l.remove(a)
n_l.remove(b)
else:
s.append(a)
s.append('null')
n_l.remove(a)
if len(n_l) == 1:
s.append(n_l[0])
if l < l_max:
s.append('null')
return s
def recoverFromPreorder(self, S):
"""
:type S: str
:rtype: TreeNode
"""
numbers, levels = self.get_num_and_level(S)
if len(numbers)==1:
return numbers
if len(numbers)==2:
numbers.append('null')
return numbers
parents = self.get_parents(numbers,levels)
n_p_dict = dict(zip(numbers, parents))
n_l_dict = dict(zip(numbers, levels))
l_max = max(levels)
result = [numbers[0]]
for l in set(levels):
if l!=0:
n_l = [x for x in numbers if n_l_dict[x] == l]
result.extend(self.convert_func(n_l, n_p_dict, l_max))
return result
###Output
_____no_output_____
###Markdown
1375. Bulb Switcher III
###Code
light = [2,1,3,5,4]
for a,b in enumerate(light):
print(a,b)
def numTimesAllBlue(light):
n = len(light)
lit = [0] * n
blue = 0
for i in range(n):
x = light[i]
lit[x-1] = 1
if i>=1:
if x <= max(light[:i])+1:
if sum(lit[:i+1]) == i+1:
blue += 1
else:
if light[i] == 1:
blue +=1
return blue
numTimesAllBlue([4,1,2,3])
[1, 1, 0, 0, 0]
# def numTimesAllBlue(light):
# """
# :type light: List[int]
# :rtype: int
# """
# n = len(light)
# lit = [0] * n
# blue = 0
# for i in range(n):
# x = light[i]
# lit[x-1] = 1
# if sum(lit[:i+1]) == i+1:
# blue += 1
# return blue
###Output
_____no_output_____
###Markdown
1094. Car Pooling
###Code
trips = [[7,5,6],[6,7,8],[10,1,6]]
capacity = 16
pick_up = [x[1] for x in trips]
drop_off = [x[-1] for x in trips]
n = [x[0] for x in trips]
stops_on = dict(zip(pick_up, n))
stops_off = dict(zip(drop_off, n))
stops = set(pick_up + drop_off)
p = 0
for x in stops:
try:
p += stops_on[x]
except:
pass
try:
p -= stops_off[x]
except:
pass
if p > capacity:
print(False)
print(True)
stops = [0] * max(drop_off)
for i, x in enumerate(pick_up):
stops[x-1] += n[i]
for i, x in enumerate(drop_off):
stops[x-1] -= n[i]
stops
def carPooling(trips, capacity):
"""
:type trips: List[List[int]]
:type capacity: int
:rtype: bool
"""
pick_up = [x[1] for x in trips]
drop_off = [x[-1] for x in trips]
n = [x[0] for x in trips]
stops_on = dict(zip(pick_up, n))
stops_off = dict(zip(drop_off, n))
stops = set(pick_up + drop_off)
p = 0
for x in stops:
try:
p += stops_on[x]
except:
pass
try:
p -= stops_off[x]
except:
pass
if p > capacity:
print(x,p)
return False
return True
carPooling([[7,5,6],[6,7,8],[10,1,6]], 16)
###Output
5 17
###Markdown
679. 24 Game
###Code
6*6*2*6*1*6
a = 2
b = 3
def comb(a, b):
if a == 0:
return list(set([b, -b, 0]))
if b == 0:
return list(set([a, -a, 0]))
return list(set([a+b, a-b, a*b, a/b, b-a, b/a]))
i = [3,33,2,8]
results = []
for x in i:
i_1 = i.copy()
i_1.remove(x)
for y in i_1:
r_1 = comb(x, y)
for z in r_1:
i_2 = i_1.copy()
i_2.remove(y)
for a in i_2:
i_3 = i_2.copy()
i_3.remove(a)
i_3 = i_3[0]
r_2 = comb(z, a)
for b in r_2:
r_3 = comb(b, i_3)
results.extend(r_3)
for x in i:
i_1 = i.copy()
i_1.remove(x)
for y in i_1:
r_1 = comb(x, y)
for z in r_1:
i_2 = i_1.copy()
i_2.remove(y)
r_2 = comb(i_2[0], i_2[1])
for a in r_2:
r_3 = comb(z, a)
results.extend(r_3)
24 in set([round(x, 4) for x in results])
a = [2,1]
###Output
_____no_output_____
###Markdown
51. N-Queens
###Code
n = 4
def create_remove(remove_all, i):
remove_all.append(i)
remove_next = [i-1, i+1]
remove_next = [x for x in remove_next if (x>=0 and x<n)]
remove = list(set(remove_all + remove_next))
print('all,r',remove_all, remove)
return remove_all, remove
remove_all = []
remove_next = []
level = list(range(n))
solution = []
# for i_1 in level:
i_1 = 0
solution_i = [i_1]
remove_all, remove = create_remove(remove_all, i_1)
m = n
while m>0:
m-=1
temp = level.copy()
for x in remove:
temp.remove(x)
print('temp:', temp)
if len(temp)>0:
for i_m in temp:
remove_all, remove = create_remove(remove_all, i_m)
solution_i.append(i_m)
if len(solution_i) == n:
solution.append(solution_i)
i_1 = 0
solution_i = [i_1]
remove_all, remove = create_remove(remove_all, i_1)
while len(remove) < 4:
temp = level.copy()
for x in remove:
temp.remove(x)
remove_all, remove = create_remove(remove_all, i_m)
solution
n = 4
i = 0
space = [space]
possible = [[], [], [], []]
i = 0
for x in possible[i]:
solution.append(x)
possible_temp = possible.copy()
possible_temp = update_possible()
while i <= n:
i += 1
if len(solution) == n:
solutions.append(solution)
break
if len(possible[i]) == 0:
break
for y in possible_temp[i]:
solution.append(y)
###Output
_____no_output_____ |
practices/MLP_MNIST.ipynb | ###Markdown
資料前置處理
###Code
from keras.utils import np_utils
import numpy as np
np.random.seed(10)
from keras.datasets import mnist
(x_train_image, y_train_label), \
(x_test_image, y_test_label) = mnist.load_data()
x_Train = x_train_image.reshape(60000, 784).astype('float32')
x_Test = x_test_image.reshape(10000, 784).astype('float32')
x_Train_normalize = x_Train / 255
x_Test_normalize = x_Test / 255
###Output
_____no_output_____
###Markdown
建立模型
###Code
y_Train_OneHot = np_utils.to_categorical(y_train_label)
y_Test_OneHot = np_utils.to_categorical(y_test_label)
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(units=256,
input_dim=784,
kernel_initializer='normal',
activation='relu'))
model.add(Dense(units=10,
kernel_initializer='normal',
activation='softmax'))
print(model.summary())
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_1 (Dense) (None, 256) 200960
_________________________________________________________________
dense_2 (Dense) (None, 10) 2570
=================================================================
Total params: 203,530
Trainable params: 203,530
Non-trainable params: 0
_________________________________________________________________
None
###Markdown
進行訓練
###Code
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
train_history = model.fit(x = x_Train_normalize,
y = y_Train_OneHot,
validation_split = 0.2,
epochs = 10,
batch_size = 200,
verbose = 2)
import matplotlib.pyplot as plt
def show_train_history(train_history,
train,
validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel(train)
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
show_train_history(train_history, 'acc', 'val_acc')
show_train_history(train_history, 'loss', 'val_loss')
###Output
_____no_output_____
###Markdown
以測試資料評估模型準確率
###Code
score = model.evaluate(x_Test_normalize, y_Test_OneHot)
print()
print('accuracy =', score[1])
###Output
10000/10000 [==============================] - 1s 54us/step
accuracy = 0.9757
###Markdown
進行預測
###Code
prediction = model.predict_classes(x_Test)
prediction
def plot_images_labels_prediction(images,
labels,
prediction,
idx,
num = 10):
fig = plt.gcf()
fig.set_size_inches(12, 14)
if num > 25:
num = 25
for i in range(0, num):
ax = plt.subplot(5, 5, i+1)
ax.imshow(images[idx], cmap = 'binary')
title = f"label = {labels[idx]}"
if len(prediction) > 0:
title += f", predict = {prediction[idx]}"
ax.set_title(title, fontsize = 12)
ax.set_xticks([]); ax.set_yticks([])
idx += 1
plt.show()
plot_images_labels_prediction(x_test_image,
y_test_label,
prediction,
idx = 340)
###Output
_____no_output_____
###Markdown
顯示混淆矩陣
###Code
import pandas as pd
pd.crosstab(y_test_label,
prediction,
rownames = ['label'],
colnames=['predic'])
df = pd.DataFrame({'label': y_test_label, 'predict': prediction})
df[:2]
df[(df.label == 5) & (df.predict == 3)]
plot_images_labels_prediction(x_test_image,
y_test_label,
prediction,
idx = 340,
num = 1)
del model
model = Sequential()
model.add(Dense(units = 1000,
input_dim = 784,
kernel_initializer = 'normal',
activation = 'relu'))
model.add(Dense(units=10,
kernel_initializer = 'normal',
activation = 'softmax'))
print(model.summary())
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
train_history = model.fit(x = x_Train_normalize,
y = y_Train_OneHot,
validation_split = 0.2,
epochs = 10,
batch_size = 200,
verbose = 2)
show_train_history(train_history, 'acc', 'val_acc')
score = model.evaluate(x_Test_normalize,
y_Test_OneHot)
print()
print('accuracy =', score[1])
###Output
10000/10000 [==============================] - 1s 95us/step
accuracy = 0.9796
###Markdown
多層感知器加入DropOut功能以避免 overfitting
###Code
from keras.layers import Dropout
del model
model = Sequential()
model.add(Dense(units = 1000,
input_dim = 784,
kernel_initializer = 'normal',
activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(units = 10,
kernel_initializer = 'normal',
activation = 'softmax'))
print(model.summary())
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
train_history = model.fit(x = x_Train_normalize,
y = y_Train_OneHot,
validation_split = 0.2,
epochs = 10,
batch_size = 200,
verbose = 2)
show_train_history(train_history, 'acc', 'val_acc')
score = model.evaluate(x_Test_normalize, y_Test_OneHot)
print()
print('accuracy =', score[1])
###Output
10000/10000 [==============================] - 1s 108us/step
accuracy = 0.9812
###Markdown
建立多層感知器模型包含2個隱藏層
###Code
del model
model = Sequential()
model.add(Dense(units = 1000,
input_dim = 784,
kernel_initializer = 'normal',
activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(units = 1000,
kernel_initializer = 'normal',
activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(units = 10,
kernel_initializer = 'normal',
activation = 'softmax'))
print(model.summary())
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
train_history = model.fit(x = x_Train_normalize,
y = y_Train_OneHot,
validation_split = 0.2,
epochs = 10,
batch_size = 200,
verbose = 2)
show_train_history(train_history, 'acc', 'val_acc')
score = model.evaluate(x_Test_normalize, y_Test_OneHot)
print()
print('accuracy =', score[1])
###Output
10000/10000 [==============================] - 2s 168us/step
accuracy = 0.9803
|
diet_optimizer_notebook.ipynb | ###Markdown
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html
###Code
food_list = []
serving_list = []
price_list = []
calorie_list = []
carb_list = []
protein_list = []
fat_list = []
def add_food(name, serving, price, carbs, protein, fat):
food_list.append(name)
serving_list.append(serving)
price_list.append(price)
calorie_list.append(carbs*4 + protein*4 + fat*9)
carb_list.append(carbs)
protein_list.append(protein)
fat_list.append(fat)
###Output
_____no_output_____
###Markdown
ADD FOODS HERE
###Code
add_food("Quark (Magerstuffe Aldi)",100,0.60/5,4.1,12,0.2)
add_food("Berries (Aldi)",100,3/5,9.3,1.7,0.4)
add_food("Almonds (Aldi)",100,2.60/2,5.3,18,0.54)
###Output
_____no_output_____
###Markdown
===========
###Code
d = {'Food': food_list, 'Serving': serving_list,'Price':price_list,"Calories":calorie_list,"Carbs":carb_list,"Proteins":protein_list,"Fats":fat_list}
df = pd.DataFrame(data=d)
daily_calories = 469
daily_carbs_percentage = 14
daily_protein_percentage = 17
daily_fat_percentage = 69
daily_carbs = daily_carbs_percentage * daily_calories / 4
daily_protein = daily_protein_percentage * daily_calories / 4
daily_fat = daily_fat_percentage * daily_calories / 9
A_eq = []
b_eq = []
def add_equality_constrain(params, const):
A_eq.append(params)
b_eq.append(const)
A_ub = []
b_ub = []
def add_inequality_constrain(params, const):
A_ub.append(params)
b_ub.append(const)
bounds = []
for i in range(df.shape[0]):
bounds.append((0,None))
def add_bounds(food_index, min_servings, max_servings):
bounds[food_index] = (min_servings,max_servings)
df
###Output
_____no_output_____
###Markdown
ADD CONSTRAINS HERE
###Code
c = list(df.Price)
#add_equality_constrain(list(df.Calories), daily_calories)
add_equality_constrain(list(df.Proteins), daily_protein)
add_equality_constrain(list(df.Carbs), daily_carbs)
add_equality_constrain(list(df.Fats), daily_fat)
#add_bounds(0, 0.5, None)
#add_bounds(1, 1, None)
#add_bounds(2, 0.5, None)
###Output
_____no_output_____
###Markdown
===============
###Code
if(len(A_eq) == 0):
A_eq = None
B_eq = None
print("A_eq is empty")
if(len(A_ub) == 0):
A_ub = None
b_ub = None
print("A_ub is empty")
res = linprog(c, A_eq=A_eq, b_eq=b_eq, A_ub=A_ub, b_ub=b_ub, method='revised simplex', options={'tol':1e+04})
if(res.success):
food_ammount = round(res.x * df.Serving,4)
res_calories = round(res.x * df.Calories,4)
res_carbs = round(res.x * df.Carbs,4)
res_proteins = round(res.x * df.Proteins,4)
res_fats = round(res.x * df.Fats,4)
out = {'Food':df.Food, "Servings":res.x,'Ammount g':food_ammount,'Carbs':res_carbs,'Protein':res_proteins,'Fat':res_fats,'Calories':res_calories}
out_df = pd.DataFrame(data=out)
print(out_df)
print(100*"=")
total_price = round(sum(res.x * df.Price),2)
total_calories = round(sum(res.x * df.Calories),2)
total_carbs = round(sum(res.x * df.Carbs),2)
total_protein = round(sum(res.x * df.Proteins),2)
total_fat = round(sum(res.x * df.Fats),2)
print("Total price:",total_price,"Eur")
print("Total calories:",total_calories,'g of',daily_calories, 'g ---> Delta is', daily_calories - total_calories, 'g')
print("Total carbs:",total_carbs,'g of',daily_carbs, 'g ---> Delta is', daily_carbs - total_carbs, 'g')
print("Total protein:",total_protein,'g of',daily_carbs, 'g ---> Delta is', daily_protein - total_protein, 'g')
print("Total fat:",total_fat,'g of',daily_carbs, 'g ---> Delta is', daily_fat - total_fat, 'g')
else:
print("There is no solution")
print(res)
###Output
_____no_output_____ |
16_Likelihood_Method.ipynb | ###Markdown
Autograd
###Code
# theta = np.random.beta(a=1,b=1,size=(4,))
theta = np.array([0.0, 1.0,
0.0,1.0, 0.0,1.0,
0.0,1.0, 0.0,1.0, 0.0,1.0, 0.0,1.0])
gradTheta = grad(negativeLogLikelihood(x),)
maes = []
for i in range(1500):
theta = theta - 0.001 * gradTheta(theta)
aes = 0
for i in range(int(len(theta)/2)):
aes += np.abs(theta[2*i] - muStar[i])
maes.append(aes/(len(theta)/2))
# jacobian_ = jacobian(negativeLogLikelihood(x))
# hessian_ = hessian(negativeLogLikelihood(x))
# for i in range(1000):
# j = jacobian_(theta)
# h = hessian_(theta)
# theta = theta + 0.001 * np.linalg.inv(h) @ j
# aes = np.abs(theta[0] - muStar[0]) + np.abs(theta[2] - muStar[1])
# maes.append(aes)
plt.plot(maes)
###Output
_____no_output_____
###Markdown
In terms of Bag Estimates
###Code
def logLikelihood(xi,mu,sigma,normalize):
LL = (-len(xi)/2 * np.log(2*np.pi*(sigma + 1e-8)**2) - (1/(2*(sigma + 1e-8)**2)) * np.sum((xi - mu)**2))
if normalize:
LL = LL * (1/len(xi))
return LL
def getChildren(idx,N):
if idx > N - 1:
return np.array([idx])
left = 2 * idx + 1
right = left + 1
return np.concatenate([getChildren(left,N),getChildren(right,N)])
def treeNegativeLogLikelihood(x,leafN,normalize=True):
def LL(leafMeans,bagSigma):
NBags = len(bagSigma)
NInternal_Nodes = np.floor(NBags/2)
ll = 0
for idx in range(NBags):
leafIndices = (getChildren(idx, NInternal_Nodes) - NInternal_Nodes).astype(int)
ln = leafN[leafIndices]
mu = np.dot(leafMeans[leafIndices],ln)/np.sum(ln)
sigma = bagSigma[idx]
ll = ll + logLikelihood(x[idx],mu,sigma,normalize)
return -1 * ll
return LL
###Output
_____no_output_____
###Markdown
Right now I'm assuming N = $2^j$ for some j Generate Data
###Code
N = 7
N_Internal = int(np.floor((N)/2))
NLeaves = int(N - N_Internal)
bagMuStar = np.random.normal(loc=0,scale=10,size=NLeaves)
bagN = np.random.poisson(lam=10,size=NLeaves)
X = []
for level in range(3):
NBagsInLevel = 2**level
start = 2**level - 1
for bagNum in range(start,start+NBagsInLevel):
childrenIndices = (getChildren(bagNum,N_Internal) - N_Internal).astype(int)
childrenMus = bagMuStar[childrenIndices]
childrenNs = bagN[childrenIndices]
loc = np.dot(childrenMus, childrenNs) / np.sum(childrenNs)
scale = 2**level
X.append(np.random.normal(loc=loc,scale=scale,size=np.sum(childrenNs)))
###Output
_____no_output_____
###Markdown
Initialize as local estimates
###Code
mu = np.zeros(bagMuStar.shape)
sigma = np.ones(len(X))
for leafNum in range(NLeaves):
idx = N_Internal + leafNum
xi = X[idx]
mu[leafNum],sigma[idx] = ss.norm.fit(xi)
###Output
_____no_output_____
###Markdown
Run Algorithm
###Code
maes = []
gradNLL_mu = grad(treeNegativeLogLikelihood(X,bagN),0)
gradNLL_sigma = grad(treeNegativeLogLikelihood(X,bagN),1)
NIter= 1000
lr = 0.01
for i in tqdm(range(NIter),total=NIter):
if not i % 5000:
lr = lr * .5
deltaMu = gradNLL_mu(mu,sigma)
deltaSigma = gradNLL_sigma(mu,sigma)
mu = mu - lr * deltaMu
sigma = sigma - lr * deltaSigma
maes.append(np.mean(np.abs(mu - bagMuStar)))
plt.plot(maes)
maes = []
gradNLL_mu = grad(treeNegativeLogLikelihood(X,bagN,normalize=False),0)
gradNLL_sigma = grad(treeNegativeLogLikelihood(X,bagN,normalize=False),1)
NIter= 5000
lr = 0.01
for i in tqdm(range(NIter),total=NIter):
if not i % 5000:
lr = lr * .5
deltaMu = gradNLL_mu(mu,sigma)
deltaSigma = gradNLL_sigma(mu,sigma)
mu = mu - lr * deltaMu
sigma = sigma - lr * deltaSigma
maes.append(np.mean(np.abs(mu - bagMuStar)))
plt.plot(maes)
###Output
_____no_output_____
###Markdown
Try on real data
###Code
from multiinstance.data.realData import buildDataset
from multiinstance.utils import *
from multiinstance.agglomerative_clustering import AgglomerativeClustering
absErrs = {"local":[],
"global":[],
"likelihood":[]}
fileNames = glob("/data/dzeiberg/ClassPriorEstimation/rawDatasets/*.mat")
for fileName in tqdm(fileNames,total=len(fileNames)):
dsi = buildDataset(fileName,4,
alphaDistr=lambda: np.random.uniform(.01,.95),
nPDistr=lambda: 1 + np.random.poisson(100),
nUDistr=lambda: 1 + np.random.poisson(5000))
dsi = addTransformScores(dsi)
dsi = addGlobalEsts(dsi)
dsi.alphaHats,dsi.curves = getBagAlphaHats(dsi,numbootstraps=50)
dsi.numLeaves = dsi.alphaHats.shape[0]
dsi.numNodes = dsi.numLeaves + (dsi.numLeaves - 1)
dsi.numInternal = dsi.numNodes - dsi.numLeaves
dsi.mu = np.zeros(dsi.alphaHats.shape[0])
dsi.sigma = np.ones(dsi.numNodes)
dsi.leafN = np.ones_like(dsi.mu) * dsi.alphaHats.shape[1]
dsi.treeAlphaHats = [[] for _ in range(dsi.numNodes)]
for nodeNum in range(dsi.numInternal):
children = getChildren(nodeNum, dsi.numInternal)
leafNums = children - dsi.numInternal
pos,unlabeled = list(zip(*[getTransformScores(dsi,n) for n in leafNums]))
pos = np.concatenate(pos).reshape((-1,1))
unlabeled = np.concatenate(unlabeled).reshape((-1,1))
NEstimates = int(np.sum([dsi.leafN[l] for l in leafNums]))
dsi.treeAlphaHats[nodeNum],_ = getEsts(pos, unlabeled, NEstimates)
for leafNum in range(dsi.numLeaves):
nodeNum = leafNum + dsi.numInternal
dsi.treeAlphaHats[nodeNum] = dsi.alphaHats[leafNum]
dsi.mu[leafNum],dsi.sigma[nodeNum] = ss.norm.fit(dsi.treeAlphaHats[nodeNum])
maes = [np.mean(np.abs(dsi.mu - dsi.trueAlphas.flatten()))]
lr = 0.001
gradNLL_mu = grad(treeNegativeLogLikelihood(dsi.treeAlphaHats,dsi.leafN),0)
gradNLL_sigma = grad(treeNegativeLogLikelihood(dsi.treeAlphaHats,dsi.leafN),1)
NIter= 5000
for i in tqdm(range(NIter),total=NIter):
if not i % 1500:
lr = lr * .5
deltaMu = gradNLL_mu(dsi.mu,dsi.sigma)
deltaSigma = gradNLL_sigma(dsi.mu,dsi.sigma)
dsi.mu = dsi.mu - lr * deltaMu
dsi.sigma = dsi.sigma - lr * deltaSigma
maes.append(np.mean(np.abs(dsi.mu - dsi.trueAlphas.flatten())))
absErrs["local"].append(maes[0])
absErrs["likelihood"].append(maes[-1])
absErrs["global"].append(np.mean(np.abs(dsi.globalAlphaHats.mean() - dsi.trueAlphas.flatten())))
plt.plot(maes)
plt.hlines(absErrs["global"][-1],0,len(maes),color="black")
plt.title(fileName.split("/")[-1])
plt.show()
dsi.globalAlphaHats.mean()
dsi.sigma
dsi.curves.shape
plt.plot(dsi.curves[2,0])
dsi.alphaHats
dsi.trueAlphas[0]
###Output
_____no_output_____
###Markdown
Final Results
###Code
for k,v in absErrs.items():
print(k, "{:.3f}".format(np.mean(v)))
###Output
_____no_output_____ |
nlp_with_python_for_ml/Exercise Files/Ch05/05_10/Start/.ipynb_checkpoints/05_09-checkpoint.ipynb | ###Markdown
Building Machine Learning Classifiers: Evaluate Gradient Boosting with GridSearchCV **Grid-search:** Exhaustively search all parameter combinations in a given grid to determine the best model.**Cross-validation:** Divide a dataset into k subsets and repeat the holdout method k times where a different subset is used as the holdout set in each iteration. Read in text
###Code
import nltk
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import string
stopwords = nltk.corpus.stopwords.words('english')
ps = nltk.PorterStemmer()
data = pd.read_csv("SMSSpamCollection.tsv", sep='\t')
data.columns = ['label', 'body_text']
def count_punct(text):
count = sum([1 for char in text if char in string.punctuation])
return round(count/(len(text) - text.count(" ")), 3)
data['body_len'] = data['body_text'].apply(lambda x: len(x) - x.count(" "))
data['punct%'] = data['body_text'].apply(lambda x: count_punct(x))
def clean_text(text):
text = "".join([word.lower() for word in text if word not in string.punctuation])
tokens = re.split('\W+', text)
text = [ps.stem(word) for word in tokens if word not in stopwords]
return text
# TF-IDF
tfidf_vect = TfidfVectorizer(analyzer=clean_text)
X_tfidf = tfidf_vect.fit_transform(data['body_text'])
X_tfidf_feat = pd.concat([data['body_len'], data['punct%'], pd.DataFrame(X_tfidf.toarray())], axis=1)
# CountVectorizer
count_vect = CountVectorizer(analyzer=clean_text)
X_count = count_vect.fit_transform(data['body_text'])
X_count_feat = pd.concat([data['body_len'], data['punct%'], pd.DataFrame(X_count.toarray())], axis=1)
X_count_feat.head()
###Output
_____no_output_____
###Markdown
Exploring parameter settings using GridSearchCV
###Code
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
###Output
_____no_output_____ |
01.air_quality_prediction/03.Solution_with_LSTM.ipynb | ###Markdown
Import Libraries
###Code
#from __future__ import print_function
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from datetime import datetime
from matplotlib import pyplot
from math import sqrt
from numpy import concatenate
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.layers import Dense, LSTM
from keras.models import Sequential
###Output
Using TensorFlow backend.
/home/burak/anaconda3/envs/intro_to_pytorch/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/home/burak/anaconda3/envs/intro_to_pytorch/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/home/burak/anaconda3/envs/intro_to_pytorch/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/home/burak/anaconda3/envs/intro_to_pytorch/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/home/burak/anaconda3/envs/intro_to_pytorch/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/home/burak/anaconda3/envs/intro_to_pytorch/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
###Markdown
Preprocessing
###Code
inputFile = 'data.csv'
def parse(x):
#function for parsing data into required format
return datetime.strptime(x, '%Y %m %d %H')
df = read_csv(inputFile,
parse_dates = [['year', 'month', 'day', 'hour']],
index_col=0,
date_parser=parse)
df.head()
df.drop('No', axis=1, inplace=True)
df.head()
# manually specify column names
df.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']
df.index.name = 'date'
df.head()
# mark all NA values with 0
df['pollution'].fillna(0, inplace=True)
df.head()
# drop the first 24 hours as it has 0 value
df = df[24:]
df.head()
def visualize(df,groups):
# plot columns definded in "groups" from inputFile
values = df.values
i = 1
# plot each column
pyplot.figure()
for group in groups:
pyplot.subplot(len(groups), 1, i)
pyplot.plot(values[:, group])
pyplot.title(df.columns[group], y=0.5, loc='right')
i += 1
pyplot.show()
#call function for visualizing data
visualize(df,[0, 1, 2, 3, 5, 6, 7])
def convert_timeseries(data, n_in=1, n_out=1, dropnan=True):
#covert timeseries data to t-n to t-1 form
#n defines how many previous value should be taken into consideration
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
###Output
_____no_output_____
###Markdown
Actual code starts here
###Code
# load dataset
values = df.values
print(values)
# encode direction into integer
encoder = LabelEncoder()
values[:,4] = encoder.fit_transform(values[:,4])
print(values[:,4])
# ensure all data is float
values = values.astype('float32')
print(values)
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
print(scaled)
# frame as supervised learning
reframed = convert_timeseries(scaled, 1, 1)
print(reframed.head())
# drop columns we don't want to predict
# need to change this if we change N or change dataset
reframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True)
print(reframed.head())
###Output
var1(t-1) var2(t-1) var3(t-1) var4(t-1) var5(t-1) var6(t-1) \
1 0.129779 0.352941 0.245902 0.527273 0.666667 0.002290
2 0.148893 0.367647 0.245902 0.527273 0.666667 0.003811
3 0.159960 0.426471 0.229508 0.545454 0.666667 0.005332
4 0.182093 0.485294 0.229508 0.563637 0.666667 0.008391
5 0.138833 0.485294 0.229508 0.563637 0.666667 0.009912
var7(t-1) var8(t-1) var1(t)
1 0.000000 0.0 0.148893
2 0.000000 0.0 0.159960
3 0.000000 0.0 0.182093
4 0.037037 0.0 0.138833
5 0.074074 0.0 0.109658
###Markdown
Splitting Dataset
###Code
# split into train and test sets
values = reframed.values
print(values)
n_train_hours = 365 * 24 #1 year
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
print('train shape: ', train.shape)
print('test shape: ',test.shape)
# split into input and outputs
train_X= train[:, :-1]
train_y= train[:, -1]
test_X= test[:, :-1]
test_y= test[:, -1]
print('train_X shape: ', train_X.shape)
print('test_X shape: ',test_X.shape)
print('train_y shape: ', train_y.shape)
print('test_y shape: ',test_y.shape)
print(train_X)
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
###Output
(8760, 1, 8) (8760,) (35039, 1, 8) (35039,)
###Markdown
Create the LSTM Model
###Code
# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
# fit network
history = model.fit(train_X,
train_y,
epochs=50,
batch_size=72,
validation_data=(test_X, test_y),
verbose=2,
shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='Training Loss')
pyplot.plot(history.history['val_loss'], label='Validation Loss')
pyplot.legend()
pyplot.show()
###Output
_____no_output_____
###Markdown
Make a Prediction
###Code
# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
print(yhat)
print(yhat.shape)
print(test_X)
print(test_X.shape)
# invert scaling for forecast to revert data into original form
inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)
print(inv_yhat)
inv_yhat = scaler.inverse_transform(inv_yhat)
print(inv_yhat)
# Actial Input
inv_xp=inv_yhat[:,1:]
#predicted output
inv_yhat = inv_yhat[:,0]
print('inv_xp: ', inv_xp)
print('inv_yhat: ', inv_yhat)
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_X[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
#Actual output
inv_y = inv_y[:,0]
print("Actual Input:")
print(inv_xp)
print("Actual Output:")
print(inv_y)
#predicted output will be offset by 1
print("Predicted Output:")
print(inv_yhat)
# calculate RMSE
rmse = sqrt(mean_squared_error(inv_y, inv_yhat))
print('Test RMSE: %.3f' % rmse)
###Output
Test RMSE: 26.630
|
SAS_Viya_Explainable_ML.ipynb | ###Markdown
This is the notebook associated with the blog post titled Interactive Explainable Machine Learning with SAS Viya, Streamlit and Docker Install SWAT if you haven't done so already. Import the required modules
###Code
#!pip install swat
from swat import CAS, options
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Connect to CAS and load the required action sets
###Code
host = ""
port = ""
username = ""
password = ""
s = CAS(host, port, username, password)
s.loadActionSet('autotune')
s.loadactionset('aStore')
s.loadactionset('decisionTree')
s.loadactionset("explainModel")
s.loadactionset('table')
###Output
NOTE: Added action set 'autotune'.
NOTE: Added action set 'aStore'.
NOTE: Added action set 'decisionTree'.
NOTE: Added action set 'explainModel'.
NOTE: Added action set 'table'.
###Markdown
Load and inspect the dataset
###Code
hmeq = pd.read_csv('hmeq.csv')
hmeq
###Output
_____no_output_____
###Markdown
Load the dataframe to a CASTable and train a model and perform hyperparameter optimization
###Code
s.upload(hmeq,casout={'name' : 'hmeqTest', 'caslib' : 'public','replace' : True})
result = s.autotune.tuneGradientBoostTree(
trainOptions = {
"table" : {"name":'hmeqTest', 'caslib' : 'public'},
"inputs" : {'LOAN','MORTDUE','VALUE','YOJ','DEROG','DELINQ','CLAGE','NINQ','CLNO','DEBTINC','REASON', 'JOB'},
"target" : 'BAD',
"nominal" : {'BAD','REASON', 'JOB'},
"casout" : {"name":"gradboosthmeqtest", "caslib":"public",'replace':True},
"varImp" : True
},
tunerOptions={"seed":12345, "maxTime":60}
)
###Output
WARNING: The table HMEQTEST exists as a global table in caslib public. By adding a session table with the same name, the session-scope table takes precedence over the global-scope table.
NOTE: Cloud Analytic Services made the uploaded file available as table HMEQTEST in caslib public.
NOTE: The table HMEQTEST has been created in caslib public from binary data uploaded to Cloud Analytic Services.
NOTE: Autotune is started for 'Gradient Boosting Tree' model.
NOTE: Autotune option SEARCHMETHOD='GA'.
NOTE: Autotune option MAXTIME=60 (sec.).
NOTE: Autotune option SEED=12345.
NOTE: Autotune objective is 'Misclassification Error Percentage'.
NOTE: Early stopping is activated; 'NTREE' will not be tuned.
NOTE: Autotune number of parallel evaluations is set to 4, each using 0 worker nodes.
NOTE: Automatic early stopping is activated with STAGNATION=4; set EARLYSTOP=false to deactivate.
Iteration Evals Best Objective Elapsed Time
0 1 19.966 1.08
1 25 7.6063 17.50
2 47 7.047 39.90
3 68 6.5996 60.00
NOTE: Autotune process reached maximum tuning time.
WARNING: Objective evaluation 68 was terminated.
WARNING: Objective evaluation 66 was terminated.
WARNING: Objective evaluation 67 was terminated.
NOTE: Data was partitioned during tuning, to tune based on validation score; the final model is trained and scored on all data.
NOTE: The number of trees used in the final model is 30.
NOTE: Autotune time is 64.60 seconds.
###Markdown
Promote the table with training data, export the astore and promote the astore to global scope. Important for the Streamlit portion
###Code
s.table.promote(name="hmeqTest", caslib='public',target="hmeqTest",targetLib='public')
modelAstore = s.decisionTree.dtreeExportModel(modelTable = {"caslib":"public","name":"gradboosthmeqtest" },
casOut = {"caslib":"public","name":'hmeqTestAstore','replace':True})
s.table.promote(name='hmeqTestAstore', caslib='public',target='hmeqTestAstore',targetLib='public')
###Output
_____no_output_____
###Markdown
Let's test out the model. Create a sample observation, convert it to a pandas dataframe, then a cas table and score against the model
###Code
#Convert dictonary of input data to pandas dataframe (a tabular data format for scoring)
datadict = {'LOAN':140,'MORTDUE':3000, 'VALUE':40000, 'REASON':'HomeImp','JOB':'Other','YOJ':12,
'DEROG':0.0,'DELINQ':0.0, 'CLAGE':89,'NINQ':1.0, 'CLNO':10.0, 'DEBTINC':0.05}
###Output
_____no_output_____
###Markdown
Create a small helper function to convert the python dictionary to a pandas DataFrame. This could be done with a single line of code but the data types end up changing. Hence this slightly verbose function
###Code
def dicttopd(datadict):
for key in datadict:
datadict[key] = [datadict[key]]
return pd.DataFrame.from_dict(datadict)
samplepd = dicttopd(datadict)
samplepd
###Output
_____no_output_____
###Markdown
score this against the model
###Code
s.upload(samplepd,casout={'name' : 'realtime', 'caslib' : 'public','replace' : True})
s.aStore.score(rstore = {"caslib":"public","name":"hmeqTestAstore"},
table = {"caslib":'public',"name":'realtime'},
out = {"caslib":'public',"name":'realscore', 'replace':True})
###Output
NOTE: Cloud Analytic Services made the uploaded file available as table REALTIME in caslib public.
NOTE: The table REALTIME has been created in caslib public from binary data uploaded to Cloud Analytic Services.
###Markdown
Inspect the scores
###Code
scoredData = s.CASTable(name='realscore',caslib='public')
datasetDict = scoredData.to_dict()
scores = pd.DataFrame(datasetDict, index=[0])
scores
###Output
_____no_output_____
###Markdown
Convert this to a neat little function for later use in the app
###Code
def score(samplepd):
s.upload(samplepd,casout={'name' : 'realtime', 'caslib' : 'public','replace' : True})
s.aStore.score(rstore = {"caslib":"public","name":"hmeqTestAstore"},
table = {"caslib":'public',"name":'realtime'},
out = {"caslib":'public',"name":'realscore', 'replace':True})
#scoretable2= s.table.fetch(score_tableName)
scoredData = s.CASTable(name='realscore',caslib='public')
datasetDict = scoredData.to_dict()
scores = pd.DataFrame(datasetDict, index=[0])
return scores
###Output
_____no_output_____
###Markdown
Test to make sure this works
###Code
score(samplepd)
###Output
NOTE: Cloud Analytic Services made the uploaded file available as table REALTIME in caslib public.
NOTE: The table REALTIME has been created in caslib public from binary data uploaded to Cloud Analytic Services.
###Markdown
Let's add the I_BAD value to the 'BAD' field in sample pd
###Code
samplepd['BAD'] = scores.I_BAD.to_list()
samplepd
###Output
_____no_output_____
###Markdown
Get interpretability scores using kernelshap algorithm in the linearexplainer action set
###Code
s.upload(samplepd,casout={'name' : 'realtime', 'caslib' : 'public','replace' : True})
shapvals = s.linearExplainer(
table = {"name" : 'hmeqTest','caslib':'public'},
query = {"name" : 'realtime','caslib':'public'},
modelTable = {"name" :"hmeqTestAstore",'caslib':'public'},
modelTableType = "ASTORE",
predictedTarget = 'P_BAD1',
seed = 1234,
preset = "KERNELSHAP",
inputs = ['LOAN','MORTDUE','VALUE','YOJ','DEROG','DELINQ','CLAGE','NINQ','CLNO','DEBTINC','REASON', 'JOB','BAD'],
nominals = ['REASON', 'JOB','BAD']
)
shap1 = shapvals['ParameterEstimates']
shap = shap1[['Variable','Estimate']][0:10]
###Output
NOTE: Cloud Analytic Services made the uploaded file available as table REALTIME in caslib public.
NOTE: The table REALTIME has been created in caslib public from binary data uploaded to Cloud Analytic Services.
NOTE: Starting the Linear Explainer action.
WARNING: Unseen level in query variable 'BAD'.
NOTE: The generated number of samples is automatically set to 6500.
NOTE: Generating kernel weights.
NOTE: Kernel weights generated.
###Markdown
Inspect the results
###Code
shap
!pip install altair
import altair as alt
alt.Chart(shap).mark_bar().encode(
x='Variable',
y='Estimate'
)
###Output
_____no_output_____ |
Titanic Clean.ipynb | ###Markdown
Load data
###Code
df = pd.read_csv('train.csv')
df_predict = pd.read_csv('predict.csv')
###Output
_____no_output_____
###Markdown
Split data into training and testing sets
###Code
#all columns except survived column
X = df.iloc[:,2:]
# only survived column
y = pd.DataFrame(df['Survived'])
Xtrain1, Xtest1, ytrain1,ytest1 = train_test_split(X,y, test_size = 0.2, random_state = 42, stratify =y)
df_train = pd.merge(Xtrain1, ytrain1, left_index= True, right_index = True)
df_test = pd.merge(Xtest1, ytest1, left_index= True, right_index = True)
df_train.shape, df_test.shape
df_train.head(3)
###Output
_____no_output_____
###Markdown
Visualization
###Code
# Gender
pivot_gender = df_train.pivot_table(index="Sex",values="Survived")
pivot_gender.plot.bar()
plt.show()
# Gender & Class
pivot_gender = df_train.pivot_table(index=["Pclass","Sex"],values="Survived")
pivot_gender.plot.bar()
plt.show()
# Agegroup
df_train['Age_group'] =(df_train['Age'] // 10*10)
pivot_age = df_train.pivot_table(index="Age_group",values="Survived")
pivot_age.plot.bar()
plt.show()
# Class
pivot_class= df_train.pivot_table(index="Pclass",values="Survived")
pivot_class.plot.bar()
plt.show()
# Title
df_train['Titles'] = df_train['Name'].str.split(r'\s*,\s*|\s*\.\s*').str[1]
pivot_class= df_train.pivot_table(index="Titles",values="Survived")
pivot_class.plot.bar()
plt.show()
df_train.drop(['Titles'], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Clean Data
###Code
# missing data
miss = df_train.isna().sum()
miss_perc =(1-df_train.notnull().mean())*100
pd.concat([miss,round(miss_perc)], axis=1, keys=['missing', 'in %'])
def cleaning(dataframe):
### Age
#for dataset in dataframe:
mean = dataframe['Age'].mean()
std = dataframe['Age'].std()
is_null = dataframe['Age'].isnull().sum()
# compute random numbers
rand_age = np.random.randint(mean - std, mean + std, size = is_null)
# fill NaN values in Age column
age_slice = dataframe['Age'].copy()
age_slice[np.isnan(age_slice)] = rand_age
dataframe['Age'] = age_slice
dataframe['Age'] = dataframe['Age'].astype(int)
### recoding the gender in two columns
f = dataframe['Sex'] == 'female'
dataframe['Gender'] =f.astype(int)
### defining age groups
def agegroup(row):
# age 0-5
if row < 6:
return 1
# age 6-14
elif row < 15:
return 2
# age 15-36
elif row < 37:
return 3
# age 37-55
elif row < 56:
return 4
# 56-80
elif row < 81:
return 5
else:
return 6
dataframe['Agegroup'] = dataframe['Age'].apply(agegroup)
### recoding the cabin into floor
# to get starting letter: df_train['Cabin'].str[:1].unique()
characters = ('N','A','B','C','D','E','F','G','T')
numbers = ('0','1','2','3','4','5','6','7','8')
df_deck = pd.DataFrame(
{'Deck': characters,
'Deck_No': numbers})
#replace NaN with N as cabin
dataframe['Cabin'].fillna('N', inplace=True)
#Cabin indicator as column
dataframe['Deck'] = dataframe['Cabin'].str[:1]
#merge with lookup of level aka deck no
dataframe = pd.merge(dataframe,df_deck,
on='Deck',
how='left')
### Embarking ports
port_dict = {'Q': 1, 'C': 2, 'S':3 }
dataframe['Ports'] = dataframe['Embarked'].map(port_dict)
dataframe['Ports'] = dataframe['Ports'].fillna(1)
### titles
# how it works: take the substring that is preceded by , (but do not include),
# consists of at least one word character, and ends with a .
dataframe['Title'] = dataframe['Name'].str.split(r'\s*,\s*|\s*\.\s*').str[1]
titles_dummies = pd.get_dummies(dataframe['Title'], prefix='Title')
dataframe = pd.concat([dataframe, titles_dummies], axis=1)
### "Women and children first"
dataframe.loc[( (dataframe['Sex'] == 'female') & (dataframe['Age'] >= 15) ), 'Category_Code'] = 1
dataframe.loc[( (dataframe['Sex'] == 'male') & (dataframe['Age'] >= 15) ), 'Category_Code'] = 2
dataframe.loc[( dataframe['Age'] < 15 ), 'Category_Code'] = 3
return dataframe
df_train = cleaning(df_train)
df_test = cleaning(df_test)
# dropping unused columns
def dropping(dataframe):
dataframe.drop(['Sex','Name','Ticket','Cabin','Deck','Embarked'], axis=1, inplace=True)
return dataframe
df_train = dropping(df_train)
df_test = dropping(df_test)
###Output
_____no_output_____
###Markdown
Build a Logistic Regression model
###Code
# Define X and Y for train data
y_train = df_train['Survived']
X_train = df_train[['Age', 'Pclass', 'SibSp', 'Parch', 'Fare','Deck_No','Gender',
'Title_Master','Title_Miss','Title_Mr','Title_Mrs','Title_Rev',
'Agegroup','Ports','Category_Code'
]]
m = LogisticRegression()
m.fit(X_train,y_train)
m.intercept_, m.coef_
m.score(X_train,y_train)
# Define X and Y for test data
y_test = df_test['Survived']
X_test = df_test[['Age', 'Pclass', 'SibSp', 'Parch', 'Fare','Deck_No','Gender',
'Title_Master','Title_Miss','Title_Mr','Title_Mrs','Title_Rev',
'Agegroup','Ports','Category_Code'
]]
m.score(X_test,y_test)
df_predict=pd.DataFrame(m.predict_proba(X_train), columns = ['Survived_No', 'Survived_Yes'])
df_predict.round(decimals=2)
df_predict.head(3)
###Output
_____no_output_____
###Markdown
Merge Data
###Code
df_predict2 = pd.merge(df_test,df_predict, left_index= True, right_index = True)
#df_predict2.head(3)
y_pred =m.predict(X_train)
confusion_matrix(y_pred, y_train)
precision_score(y_pred=y_pred, y_true=y_train)
recall_score(y_true=y_train,y_pred=y_pred)
###Output
_____no_output_____
###Markdown
Precision-Recall-Curve
###Code
y_pred_prob = m.predict_proba(X_train)[:,1]
precision, recall, thresholds = precision_recall_curve(y_train, y_pred_prob)
plt.plot(precision, recall)
plt.xlabel('precision')
plt.ylabel('recall')
plt.title('Precision-Recall-Curve')
plt.show()
###Output
_____no_output_____
###Markdown
Random Forest
###Code
rf_model = RandomForestRegressor (n_estimators=100, oob_score=True, random_state=42 )
rf_model.fit(X_test,y_test)
rf_model.oob_score_
y_oob = rf_model.oob_prediction_
"C-stat: ", roc_auc_score(y_test, y_oob)
###Output
_____no_output_____
###Markdown
Crossvalidation
###Code
scores = cross_val_score(rf_model, X_test,y_test, cv=5)
print(scores)
print(sum(scores)/5)
for ntrees in range(1,20,3):
for depth in range (1,11):
rf_model = RandomForestClassifier(max_depth=depth, n_estimators=ntrees)
scores = cross_val_score(rf_model, X_test,y_test, cv=5)
print(ntrees, depth, sum(scores)/5)
rf_model = RandomForestClassifier(random_state=42)
params = {
'max_depth':[2,3,4,5,6],
'n_estimators':[1,3,5,7,10,15,20]
}
g = GridSearchCV(rf_model, param_grid=params)
g.fit (X_test,y_test)
g.score(X_test,y_test)
g.best_params_
g.cv_results_['mean_test_score'].reshape((5,7))
mtx= g.cv_results_['mean_test_score'].reshape((5,7))
sns.heatmap(mtx)
###Output
_____no_output_____ |
Visualization/Lux_college_demo.ipynb | ###Markdown
This dataset contains 1295 records of American colleges and their properties, collected by the [US Department of Education](https://collegescorecard.ed.gov/data/documentation/).
###Code
import pandas as pd
import lux
# Collecting basic usage statistics for Lux (For more information, see: https://tinyurl.com/logging-consent)
#lux.logger = True # Remove this line if you do not want your interactions recorded
df = pd.read_csv("college.csv")
df
###Output
_____no_output_____
###Markdown
We see that the information about ACTMedian and SATAverage has a very strong correlation. This means that we could probably just keep one of the columns and still get about the same information. So let's drop the ACTMedian column.
###Code
df = df.drop(columns=["ACTMedian"])
df
###Output
_____no_output_____
###Markdown
From the Category tab, we see that there are few records where `PredominantDegree` is "Certificate". In addition, there are not a lot of colleges with "Private For-Profit" as `FundingModel`. We can take a look at this by inspecting the `Series` corresponding to the column `PredominantDegree`. Note that Lux not only helps with visualizing dataframes, but also displays visualizations of Series objects.
###Code
df["PredominantDegree"]
df[df["PredominantDegree"]=="Certificate"].to_pandas()
###Output
_____no_output_____
###Markdown
Upon inspection, there is only a single record for Certificate, we look at the [webpage for programs offered at Cleveland State Community College](http://catalog.clevelandstatecc.edu/content.php?catoid=2&navoid=90) and it looks like there is a large number of associate as well as certificate degrees offered. So we decide that this is more appropriately labelled as "Associate" for the `PredominantDegree` field.
###Code
df.loc[df["PredominantDegree"]=="Certificate","PredominantDegree"] = "Associate"
###Output
_____no_output_____
###Markdown
By inspecting the subset of 9 colleges that are "Private For-Profit", we do not find any commonalities across them, so we can just leave the data as-is for now.
###Code
df[df["FundingModel"]=="Private For-Profit"]
###Output
_____no_output_____
###Markdown
Back to looking at the entire dataset:
###Code
df
###Output
_____no_output_____
###Markdown
We are interested in picking a college to attend and want to understand the `AverageCost` of attending different colleges and how that relates to other information in the dataset.
###Code
df.intent = ["AverageCost"]
df
###Output
_____no_output_____
###Markdown
We see that there are a large number of colleges that cost around $20000 per year. We also see that Bachelor degree colleges and colleges in New England and large cities tend to have a higher `AverageCost` than its counterparts. We are interested in the trend of `AverageCost` v.s. `SATAverage` since there is a rough upwards relationship above `AverageCost` of $30000, but below that the trend is less clear.
###Code
df.intent = ["AverageCost","SATAverage"]
df
###Output
_____no_output_____ |
backend_tests/.ipynb_checkpoints/test-checkpoint.ipynb | ###Markdown
My first automatic Jupyter Notebook This is an auto-generated notebook.
###Code
%pylab inline
hist(normal(size=2000), bins=50);
###Output
_____no_output_____ |
notebooks/Calculating radial coordinates.ipynb | ###Markdown
The InfiniteHMM class is capable of reading a GROMACS trajectory file and converting the xy coordinates to radial coordinates with respect to the pore centers. This is all done in the __init__ function. This notebook outlines how the radial coordinates are calculated.
###Code
import hdphmm
import mdtraj as md
###Output
_____no_output_____
###Markdown
First, let's load the trajectory.
###Code
traj = '5ms_nojump.xtc'
gro = 'em.gro'
first_frame = 7000
t = md.load(traj, top=gro)[first_frame:]
###Output
_____no_output_____
###Markdown
Now we will calculate the center of mass of the residue whose coordinates are being tracked
###Code
from hdphmm.utils import physical_properties
res = 'MET'
residue = physical_properties.Residue(res)
ndx = [a.index for a in t.topology.atoms if a.residue.name == res]
names = [a.name for a in t.topology.atoms if a.residue.name == res][:residue.natoms] # names of atoms in one residue
mass = [residue.mass[x] for x in names]
com = physical_properties.center_of_mass(t.xyz[:, ndx, :], mass)
###Output
_____no_output_____
###Markdown
Now we need to locate the pore centers. The pores are not perfectly straight, so we create a spline that runs through them and is a function of z.
###Code
monomer = physical_properties.Residue('NAcarb11V')
pore_atoms = [a.index for a in t.topology.atoms if a.name in monomer.pore_defining_atoms and
a.residue.name in monomer.residues]
spline_params = {'npts_spline': 10, 'save': True, 'savename': 'test_spline.pl'}
spline = physical_properties.trace_pores(t.xyz[:, pore_atoms, :], t.unitcell_vectors,
spline_params['npts_spline'], save=spline_params['save'], savename=spline_params['savename'])[0]
###Output
Attempting to load spline ... Success!
###Markdown
The physical_properties module can write out the coordinates of the spline in .gro format if you'd like to compare it to the actual system.
###Code
physical_properties.write_spline_coordinates(spline)
###Output
_____no_output_____
###Markdown
Now we just need to calculate the distance between each solute center of mass and the closest spline
###Code
import numpy as np
import tqdm
nres = com.shape[1]
radial_distances = np.zeros([t.n_frames, nres])
npores = 4
for f in tqdm.tqdm(range(t.n_frames), unit=' Frames'):
d = np.zeros([npores, nres])
for p in range(npores):
# calculate radial distance between each solute and all splines
d[p, :] = physical_properties.radial_distance_spline(spline[f, p, ...], com[f, ...], t.unitcell_vectors[f, ...])
# record distance to closest pore center
radial_distances[f, :] = d[np.argmin(d, axis=0), np.arange(nres)]
radial_distances.shape
import matplotlib.pyplot as plt
traj_no = 2
rd = radial_distances[:, traj_no]
diff = rd[1:] - rd[:-1]
fig, ax = plt.subplots(2, 1, figsize=(12, 5))
ax[0].plot(rd)
ax[1].plot(diff)
plt.show()
###Output
_____no_output_____ |
vertex_metrics_experiment/chalboards/nlp_chalkboard.ipynb | ###Markdown
load similarity matrix
###Code
similarity_matrix = load_sparse_csr(filename=experiment_data_dir + 'cosine_sims.npz')
with open(experiment_data_dir + 'CLid_to_index.p', 'rb') as f:
CLid_to_index = pickle.load(f)
###Output
_____no_output_____
###Markdown
Look at similarities
###Code
def get_similarities(similarity_matrix, CLid_A, CLid_B, CLid_to_index):
"""
Returns the similarities for cases index by CL ids as a list
Parameters
----------
similarity_matrix: precomputed similarity matrix
CLid_A, CLid_B: two lists of CL ids whose similarities we want
CLid_to_index: dict that maps CL ids to similarity_matrix indices
"""
if len(CLid_A) != len(CLid_B):
raise ValueError('lists not the same length')
else:
N = len(CLid_A)
# list to return
similarities = [0] * N
# grab each entry
for i in range(N):
try:
# convet CL id to matrix index
idA = CLid_to_index[CLid_A[i]]
idB = CLid_to_index[CLid_B[i]]
similarities[i] = similarity_matrix[idA, idB]
except KeyError:
# if one of the CLid's is not in the similarity matrix return nan
similarities[i] = np.nan
return similarities
def save_similarity_matrix(experiment_data_dir, similarity_matrix, CLid_to_index):
"""
saves similarity matrix and CLid_to_index dict
"""
# save similarity matrix
save_sparse_csr(filename=experiment_data_dir + 'cosine_sims',
array=S)
# save clid to index map
with open(experiment_data_dir + 'CLid_to_index.p', 'wb') as fp:
pickle.dump(CLid_to_index, fp)
def load_similarity_matrix(experiment_data_dir):
"""
Load similarity matrix and CLid_to_index dict
Parameters
----------
experiment_data_dir:
Output
------
similarity_matrix, CLid_to_index
"""
similarity_matrix = load_sparse_csr(filename=experiment_data_dir + 'cosine_sims.npz')
with open(experiment_data_dir + 'CLid_to_index.p', 'rb') as f:
CLid_to_index = pickle.load(f)
return similarity_matrix, CLid_to_index
CLid_ing = []
CLid_ed = []
for e in G.es:
CLid_ing.append(G.vs[e.source]['name'])
CLid_ed.append(G.vs[e.target]['name'])
start = time.time()
sims = get_similarities(S, CLid_ing, CLid_ed, CLid_to_index)
runtime = time.time() - start
###Output
_____no_output_____
###Markdown
surgery
###Code
len(CLid_to_index.keys())
map_clids = CLid_to_index.keys()
print 'there are %d keys' % len(CLid_to_index.keys())
len(G.vs)
G_clids = G.vs['name']
print 'there are %d vertices in the graph' % len(G.vs)
set(G_clids).difference(set(map_clids))
len(os.listdir(experiment_data_dir + 'textfiles/'))
###Output
_____no_output_____ |
Game development information.ipynb | ###Markdown
This is a bit off topic but still crucial for the ultimate production of project gardener.
###Code
# This game has a number of features I quite like. Simplistic ui and environment. A lot of upgrades and bonuses.
# Game: The Tower - Idle Tower Defense
# Developer: Tech Tree Games
# Website: https://www.techtreegames.com/
# Dev active: 2018
# Owner: Anthony Tirone
# Linked in profile: https://www.linkedin.com/in/anthony-tirone-7b143043/
# Possible reddit profile: https://www.reddit.com/user/Fuddsworth/
###Output
_____no_output_____ |
Week2/week-2-multiple-regression-assignment-2.ipynb | ###Markdown
Regression Week 2: Multiple Regression (gradient descent) In the first notebook we explored multiple regression using graphlab create. Now we will use graphlab along with numpy to solve for the regression weights with gradient descent.In this notebook we will cover estimating multiple regression weights via gradient descent. You will:* Add a constant column of 1's to a graphlab SFrame to account for the intercept* Convert an SFrame into a Numpy array* Write a predict_output() function using Numpy* Write a numpy function to compute the derivative of the regression weights with respect to a single feature* Write gradient descent function to compute the regression weights given an initial weight vector, step size and tolerance.* Use the gradient descent function to estimate regression weights for multiple features Fire up graphlab create Make sure you have the latest version of graphlab (>= 1.7)
###Code
import graphlab
###Output
_____no_output_____
###Markdown
Load in house sales dataDataset is from house sales in King County, the region where the city of Seattle, WA is located.
###Code
sales = graphlab.SFrame('kc_house_data.gl/')
###Output
This non-commercial license of GraphLab Create for academic use is assigned to [email protected] and will expire on September 10, 2017.
###Markdown
If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the other Week 2 notebook. For this notebook, however, we will work with the existing features. Convert to Numpy Array Although SFrames offer a number of benefits to users (especially when using Big Data and built-in graphlab functions) in order to understand the details of the implementation of algorithms it's important to work with a library that allows for direct (and optimized) matrix operations. Numpy is a Python solution to work with matrices (or any multi-dimensional "array").Recall that the predicted value given the weights and the features is just the dot product between the feature and weight vector. Similarly, if we put all of the features row-by-row in a matrix then the predicted value for *all* the observations can be computed by right multiplying the "feature matrix" by the "weight vector". First we need to take the SFrame of our data and convert it into a 2D numpy array (also called a matrix). To do this we use graphlab's built in .to_dataframe() which converts the SFrame into a Pandas (another python library) dataframe. We can then use Panda's .as_matrix() to convert the dataframe into a numpy matrix.
###Code
import numpy as np # note this allows us to refer to numpy as np instead
###Output
_____no_output_____
###Markdown
Now we will write a function that will accept an SFrame, a list of feature names (e.g. ['sqft_living', 'bedrooms']) and an target feature e.g. ('price') and will return two things:* A numpy matrix whose columns are the desired features plus a constant column (this is how we create an 'intercept')* A numpy array containing the values of the outputWith this in mind, complete the following function (where there's an empty line you should write a line of code that does what the comment above indicates)**Please note you will need GraphLab Create version at least 1.7.1 in order for .to_numpy() to work!**
###Code
def get_numpy_data(data_sframe, features, output):
data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame
# add the column 'constant' to the front of the features list so that we can extract it along with the others:
features = ['constant'] + features # this is how you combine two lists
# select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):
features_sframe = data_sframe[features]
# the following line will convert the features_SFrame into a numpy matrix:
feature_matrix = features_sframe.to_numpy()
# assign the column of data_sframe associated with the output to the SArray output_sarray
output_sarray = data_sframe[output]
# the following will convert the SArray into a numpy array by first converting it to a list
output_array = output_sarray.to_numpy()
return(feature_matrix, output_array)
###Output
_____no_output_____
###Markdown
For testing let's use the 'sqft_living' feature and a constant as our features and price as our output:
###Code
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price') # the [] around 'sqft_living' makes it a list
print example_features[0,:] # this accesses the first row of the data the ':' indicates 'all columns'
print example_output[0] # and the corresponding output
###Output
[ 1.00000000e+00 1.18000000e+03]
221900.0
###Markdown
Predicting output given regression weights Suppose we had the weights [1.0, 1.0] and the features [1.0, 1180.0] and we wanted to compute the predicted output 1.0\*1.0 + 1.0\*1180.0 = 1181.0 this is the dot product between these two arrays. If they're numpy arrayws we can use np.dot() to compute this:
###Code
my_weights = np.array([1., 1.]) # the example weights
my_features = example_features[0,] # we'll use the first data point
predicted_value = np.dot(my_features, my_weights)
print predicted_value
###Output
1181.0
###Markdown
np.dot() also works when dealing with a matrix and a vector. Recall that the predictions from all the observations is just the RIGHT (as in weights on the right) dot product between the features *matrix* and the weights *vector*. With this in mind finish the following predict_output function to compute the predictions for an entire matrix of features given the matrix and the weights:
###Code
def predict_output(feature_matrix, weights):
# assume feature_matrix is a numpy matrix containing the features as columns and weights is a corresponding numpy array
# create the predictions vector by using np.dot()
predictions = np.dot(feature_matrix, weights)
return(predictions)
###Output
_____no_output_____
###Markdown
If you want to test your code run the following cell:
###Code
test_predictions = predict_output(example_features, my_weights)
print test_predictions[0] # should be 1181.0
print test_predictions[1] # should be 2571.0
###Output
1181.0
2571.0
###Markdown
Computing the Derivative We are now going to move to computing the derivative of the regression cost function. Recall that the cost function is the sum over the data points of the squared difference between an observed output and a predicted output.Since the derivative of a sum is the sum of the derivatives we can compute the derivative for a single data point and then sum over data points. We can write the squared difference between the observed output and predicted output for a single point as follows:(w[0]\*[CONSTANT] + w[1]\*[feature_1] + ... + w[i] \*[feature_i] + ... + w[k]\*[feature_k] - output)^2Where we have k features and a constant. So the derivative with respect to weight w[i] by the chain rule is:2\*(w[0]\*[CONSTANT] + w[1]\*[feature_1] + ... + w[i] \*[feature_i] + ... + w[k]\*[feature_k] - output)\* [feature_i]The term inside the paranethesis is just the error (difference between prediction and output). So we can re-write this as:2\*error\*[feature_i]That is, the derivative for the weight for feature i is the sum (over data points) of 2 times the product of the error and the feature itself. In the case of the constant then this is just twice the sum of the errors!Recall that twice the sum of the product of two vectors is just twice the dot product of the two vectors. Therefore the derivative for the weight for feature_i is just two times the dot product between the values of feature_i and the current errors. With this in mind complete the following derivative function which computes the derivative of the weight given the value of the feature (over all data points) and the errors (over all data points).
###Code
def feature_derivative(errors, feature):
# Assume that errors and feature are both numpy arrays of the same length (number of data points)
# compute twice the dot product of these vectors as 'derivative' and return the value
derivative = 2 * np.dot(errors, feature)
return(derivative)
###Output
_____no_output_____
###Markdown
To test your feature derivartive run the following:
###Code
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
my_weights = np.array([0., 0.]) # this makes all the predictions 0
test_predictions = predict_output(example_features, my_weights)
# just like SFrames 2 numpy arrays can be elementwise subtracted with '-':
errors = test_predictions - example_output # prediction errors in this case is just the -example_output
feature = example_features[:,0] # let's compute the derivative with respect to 'constant', the ":" indicates "all rows"
derivative = feature_derivative(errors, feature)
print derivative
print -np.sum(example_output)*2 # should be the same as derivative
###Output
-23345850022.0
-23345850022.0
###Markdown
Gradient Descent Now we will write a function that performs a gradient descent. The basic premise is simple. Given a starting point we update the current weights by moving in the negative gradient direction. Recall that the gradient is the direction of *increase* and therefore the negative gradient is the direction of *decrease* and we're trying to *minimize* a cost function. The amount by which we move in the negative gradient *direction* is called the 'step size'. We stop when we are 'sufficiently close' to the optimum. We define this by requiring that the magnitude (length) of the gradient vector to be smaller than a fixed 'tolerance'.With this in mind, complete the following gradient descent function below using your derivative function above. For each step in the gradient descent we update the weight for each feature befofe computing our stopping criteria
###Code
from math import sqrt # recall that the magnitude/length of a vector [g[0], g[1], g[2]] is sqrt(g[0]^2 + g[1]^2 + g[2]^2)
def regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance):
converged = False
weights = np.array(initial_weights) # make sure it's a numpy array
while not converged:
# compute the predictions based on feature_matrix and weights using your predict_output() function
pred_output = predict_output(feature_matrix, weights)
# compute the errors as predictions - output
errors = pred_output - output
gradient_sum_squares = 0 # initialize the gradient sum of squares
# while we haven't reached the tolerance yet, update each feature's weight
for i in range(len(weights)): # loop over each weight
# Recall that feature_matrix[:, i] is the feature column associated with weights[i]
# compute the derivative for weight[i]:
derivative = feature_derivative(errors, feature_matrix[:, i])
# add the squared value of the derivative to the gradient sum of squares (for assessing convergence)
gradient_sum_squares += derivative ** 2
# subtract the step size times the derivative from the current weight
weights[i] -= derivative * step_size
# compute the square-root of the gradient sum of squares to get the gradient magnitude:
gradient_magnitude = sqrt(gradient_sum_squares)
if gradient_magnitude < tolerance:
converged = True
return(weights)
###Output
_____no_output_____
###Markdown
A few things to note before we run the gradient descent. Since the gradient is a sum over all the data points and involves a product of an error and a feature the gradient itself will be very large since the features are large (squarefeet) and the output is large (prices). So while you might expect "tolerance" to be small, small is only relative to the size of the features. For similar reasons the step size will be much smaller than you might expect but this is because the gradient has such large values. Running the Gradient Descent as Simple Regression First let's split the data into training and test data.
###Code
train_data,test_data = sales.random_split(.8,seed=0)
###Output
_____no_output_____
###Markdown
Although the gradient descent is designed for multiple regression since the constant is now a feature we can use the gradient descent function to estimat the parameters in the simple regression on squarefeet. The folowing cell sets up the feature_matrix, output, initial weights and step size for the first model:
###Code
# let's test out the gradient descent
simple_features = ['sqft_living']
my_output = 'price'
(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
initial_weights = np.array([-47000., 1.])
step_size = 7e-12
tolerance = 2.5e7
###Output
_____no_output_____
###Markdown
Next run your gradient descent with the above parameters.
###Code
simple_weights = regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, tolerance)
print simple_weights
###Output
[-46999.88716555 281.91211912]
###Markdown
How do your weights compare to those achieved in week 1 (don't expect them to be exactly the same)? **Quiz Question: What is the value of the weight for sqft_living -- the second element of ‘simple_weights’ (rounded to 1 decimal place)?** Use your newly estimated weights and your predict_output() function to compute the predictions on all the TEST data (you will need to create a numpy array of the test feature_matrix and test output first:
###Code
(test_simple_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)
###Output
_____no_output_____
###Markdown
Now compute your predictions using test_simple_feature_matrix and your weights from above.
###Code
simple_predictions = predict_output(test_simple_feature_matrix, simple_weights)
print simple_predictions[0]
print test_output[0]
###Output
356134.443171
310000.0
###Markdown
**Quiz Question: What is the predicted price for the 1st house in the TEST data set for model 1 (round to nearest dollar)?** Now that you have the predictions on test data, compute the RSS on the test data set. Save this value for comparison later. Recall that RSS is the sum of the squared errors (difference between prediction and output).
###Code
simpleRSS = (np.dot((simple_predictions - test_output), (simple_predictions - test_output))).sum()
print simpleRSS
###Output
2.75400047593e+14
###Markdown
Running a multiple regression Now we will use more than one actual feature. Use the following code to produce the weights for a second model with the following parameters:
###Code
model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors.
my_output = 'price'
(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)
initial_weights = np.array([-100000., 1., 1.])
step_size = 4e-12
tolerance = 1e9
###Output
_____no_output_____
###Markdown
Use the above parameters to estimate the model weights. Record these values for your quiz.
###Code
two_weights = regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance)
print two_weights
###Output
[ -9.99999688e+04 2.45072603e+02 6.52795277e+01]
###Markdown
Use your newly estimated weights and the predict_output function to compute the predictions on the TEST data. Don't forget to create a numpy array for these features from the test set first!
###Code
(test_two_feature_matrix, test_two_output) = get_numpy_data(test_data, model_features, my_output)
two_predictions = predict_output(test_two_feature_matrix, two_weights)
print two_predictions[0]
###Output
366651.412037
###Markdown
**Quiz Question: What is the predicted price for the 1st house in the TEST data set for model 2 (round to nearest dollar)?** What is the actual price for the 1st house in the test data set?
###Code
print test_two_output[0]
###Output
310000.0
###Markdown
**Quiz Question: Which estimate was closer to the true price for the 1st house on the TEST data set, model 1 or model 2?** Now use your predictions and the output to compute the RSS for model 2 on TEST data.
###Code
twoRSS = (np.dot((two_predictions - test_two_output), (two_predictions - test_two_output))).sum()
print twoRSS
###Output
2.70263446465e+14
|
Day 3 - Using descriptors for matching.ipynb | ###Markdown
Strategy- Identify keypoints and descriptor vectors in both Nemo and the world.- Match keypoints between Nemo and the world.- Visualize those matches.
###Code
detector = cv2.ORB_create(nfeatures=30) #Default = 500
kp_nemo, des_nemo = detector.detectAndCompute(nemo, mask=None)
kp_world, des_world = detector.detectAndCompute(world, mask=None)
###Output
_____no_output_____
###Markdown
Once we have keypoints and descriptors, we need to match.
###Code
matcher = cv2.BFMatcher_create(cv2.NORM_HAMMING2)
matches = matcher.match(des_nemo,des_world)
out = cv2.drawMatches(
nemo, kp_nemo,
world, kp_world,
matches, None #out image
)
plt.figure(figsize=(10,10))
plt.imshow(out[:,:,::-1])
###Output
_____no_output_____ |
examples/ipynb/histogram.ipynb | ###Markdown
Draw two histograms, on of which is log scaled
###Code
H = histograms(figsize=(10,5))
H.histogram(x, num_bins=10, title='Basic Histogram', facecolor='blue', α=0.5, path=None, subplot=121)
H.histogram(x, num_bins=10, title='Y Log Scaled Histogram', facecolor='blue', α=0.5, path=None, subplot=122, ylogScale=True)
H.show()
###Output
_____no_output_____ |
3_Vector_interpolation_with_wind_and_air_pollution_dataset/3D_vector_Interpolation_TEST.ipynb | ###Markdown
1. Import Packages
###Code
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
###Output
_____no_output_____
###Markdown
2. Data Visualization
###Code
x = [0, 0, 1, 1, 2, 2, 0, 1, 2]
y = [1, 2, 1, 2, 1, 2, 1.5, 1.5, 1.5]
u = [50, -100, 100, 100, 25, 100, 0, 0, 75]
v = [100, 100, 100, 100, 100, 100, 100, 100, 100]
plt.figure(1)
plt.quiver(x, y, u, v)
plt.show()
###Output
_____no_output_____
###Markdown
3. Simple interploation with Scipy
###Code
xx = np.linspace(0, 2, 10)
yy = np.linspace(1, 2, 10)
xx, yy = np.meshgrid(xx, yy)
plt.figure(2)
points = np.transpose(np.vstack((x, y)))
u_interp = interpolate.griddata(points, u, (xx, yy), method = 'cubic')
v_interp = interpolate.griddata(points, v, (xx, yy), method = 'cubic')
plt.quiver(xx, yy, u_interp, v_interp)
plt.show()
v_interp
###Output
_____no_output_____ |
modern-python/6 cluster analysis for voting blocks.ipynb | ###Markdown
Load votes
###Code
for filename in glob.glob('congress_data/*.csv'):
with open(filename, encoding='utf-8') as f:
reader = csv.reader(f)
vote_topic = next(reader)
headers = next(reader)
for person, state, distric, vote, name, party in reader:
senator = Senator(name, party, state)
accumulated_record[senator].append(vote_value[vote])
###Output
_____no_output_____
###Markdown
Transform the record into a plain dict that maps to tuple of votes
###Code
record = {senator: tuple(votes) for senator, votes in accumulated_record.items()} # type: Dict[Senator, VoteHistory]
###Output
_____no_output_____
###Markdown
Use k-means to locate the cluster centroids, assign each senator to the nearest cluster
###Code
from kmeans import k_means, assign_data
centroids = k_means(record.values(), k=3)
clustered_votes = assign_data(centroids, record.values())
###Output
_____no_output_____
###Markdown
Build a reverse mapping from a vote history to a list of senators who voted that way
###Code
votes_to_senators = defaultdict(list) # type: DefaultDict[VoteHistory, List[Senator]]
for senator, votehistory in record.items():
votes_to_senators[votehistory].append(senator)
assert sum([len(cluster) for cluster in votes_to_senators.values()]) == NUM_SENATORS
###Output
_____no_output_____
###Markdown
Display the clusters and the members (senators) of each cluster
###Code
for i, votes_in_cluster in enumerate(clustered_votes.values(), start=1):
print(f'----- Voting Cluster #{i} -----')
party_totals = Counter()
for votes in set(votes_in_cluster):
for senator in votes_to_senators[votes]:
print(senator)
party_totals[senator.party] += 1
print(party_totals)
###Output
----- Voting Cluster #1 -----
Senator(name='Sen. Cory Gardner [R]', party='Republican', state='CO')
Senator(name='Sen. Timothy Kaine [D]', party='Democrat', state='VA')
Senator(name='Sen. Robert “Bob” Casey Jr. [D]', party='Democrat', state='PA')
Senator(name='Sen. Thomas Carper [D]', party='Democrat', state='DE')
Senator(name='Sen. Alan “Al” Franken [D]', party='Democrat', state='MN')
Senator(name='Sen. Mark Warner [D]', party='Democrat', state='VA')
Senator(name='Sen. Daniel Coats [R]', party='Republican', state='IN')
Senator(name='Sen. Mark Kirk [R]', party='Republican', state='IL')
Senator(name='Sen. Orrin Hatch [R]', party='Republican', state='UT')
Senator(name='Sen. Richard Burr [R]', party='Republican', state='NC')
Senator(name='Sen. John “Johnny” Isakson [R]', party='Republican', state='GA')
Senator(name='Sen. Richard Blumenthal [D]', party='Democrat', state='CT')
Senator(name='Sen. Angus King [I]', party='Independent', state='ME')
Senator(name='Sen. Bob Corker [R]', party='Republican', state='TN')
Senator(name='Sen. James Risch [R]', party='Republican', state='ID')
Senator(name='Sen. Lamar Alexander [R]', party='Republican', state='TN')
Senator(name='Sen. Michael Enzi [R]', party='Republican', state='WY')
Senator(name='Sen. James Lankford [R]', party='Republican', state='OK')
Senator(name='Sen. Ron Johnson [R]', party='Republican', state='WI')
Senator(name='Sen. Patrick “Pat” Toomey [R]', party='Republican', state='PA')
Senator(name='Sen. Michael Crapo [R]', party='Republican', state='ID')
Senator(name='Sen. Claire McCaskill [D]', party='Democrat', state='MO')
Senator(name='Sen. John McCain [R]', party='Republican', state='AZ')
Senator(name='Sen. Chris Coons [D]', party='Democrat', state='DE')
Senator(name='Sen. Dianne Feinstein [D]', party='Democrat', state='CA')
Senator(name='Sen. Bill Nelson [D]', party='Democrat', state='FL')
Senator(name='Sen. Amy Klobuchar [D]', party='Democrat', state='MN')
Senator(name='Sen. Jeanne Shaheen [D]', party='Democrat', state='NH')
Senator(name='Sen. Michael Bennet [D]', party='Democrat', state='CO')
Senator(name='Sen. Kelly Ayotte [R]', party='Republican', state='NH')
Senator(name='Sen. Gary Peters [D]', party='Democrat', state='MI')
Senator(name='Sen. Marco Rubio [R]', party='Republican', state='FL')
Senator(name='Sen. Tom Udall [D]', party='Democrat', state='NM')
Senator(name='Sen. Martin Heinrich [D]', party='Democrat', state='NM')
Senator(name='Sen. Dan Sullivan [R]', party='Republican', state='AK')
Senator(name='Sen. John Cornyn [R]', party='Republican', state='TX')
Counter({'Republican': 19, 'Democrat': 16, 'Independent': 1})
----- Voting Cluster #2 -----
Senator(name='Sen. Jeff Flake [R]', party='Republican', state='AZ')
Senator(name='Sen. Maria Cantwell [D]', party='Democrat', state='WA')
Senator(name='Sen. Patty Murray [D]', party='Democrat', state='WA')
Senator(name='Sen. Richard Durbin [D]', party='Democrat', state='IL')
Senator(name='Sen. Barbara Boxer [D]', party='Democrat', state='CA')
Senator(name='Sen. Dean Heller [R]', party='Republican', state='NV')
Senator(name='Sen. Edward “Ed” Markey [D]', party='Democrat', state='MA')
Senator(name='Sen. Brian Schatz [D]', party='Democrat', state='HI')
Senator(name='Sen. Cory Booker [D]', party='Democrat', state='NJ')
Senator(name='Sen. Charles “Chuck” Schumer [D]', party='Democrat', state='NY')
Senator(name='Sen. Mazie Hirono [D]', party='Democrat', state='HI')
Senator(name='Sen. Rand Paul [R]', party='Republican', state='KY')
Senator(name='Sen. John “Jack” Reed [D]', party='Democrat', state='RI')
Senator(name='Sen. Elizabeth Warren [D]', party='Democrat', state='MA')
Senator(name='Sen. Bernard “Bernie” Sanders [I]', party='Independent', state='VT')
Senator(name='Sen. Benjamin Sasse [R]', party='Republican', state='NE')
Senator(name='Sen. Jefferson “Jeff” Sessions [R]', party='Republican', state='AL')
Senator(name='Sen. Kirsten Gillibrand [D]', party='Democrat', state='NY')
Senator(name='Sen. Ted Cruz [R]', party='Republican', state='TX')
Senator(name='Sen. Mike Lee [R]', party='Republican', state='UT')
Senator(name='Sen. Harry Reid [D]', party='Democrat', state='NV')
Senator(name='Sen. Jeff Merkley [D]', party='Democrat', state='OR')
Senator(name='Sen. Patrick Leahy [D]', party='Democrat', state='VT')
Senator(name='Sen. Sheldon Whitehouse [D]', party='Democrat', state='RI')
Senator(name='Sen. Robert “Bob” Menéndez [D]', party='Democrat', state='NJ')
Senator(name='Sen. Ron Wyden [D]', party='Democrat', state='OR')
Counter({'Democrat': 18, 'Republican': 7, 'Independent': 1})
----- Voting Cluster #3 -----
Senator(name='Sen. David Vitter [R]', party='Republican', state='LA')
Senator(name='Sen. Tim Scott [R]', party='Republican', state='SC')
Senator(name='Sen. Charles “Chuck” Grassley [R]', party='Republican', state='IA')
Senator(name='Sen. Steve Daines [R]', party='Republican', state='MT')
Senator(name='Sen. Joni Ernst [R]', party='Republican', state='IA')
Senator(name='Sen. Thom Tillis [R]', party='Republican', state='NC')
Senator(name='Sen. Heidi Heitkamp [D]', party='Democrat', state='ND')
Senator(name='Sen. Lisa Murkowski [R]', party='Republican', state='AK')
Senator(name='Sen. Tammy Baldwin [D]', party='Democrat', state='WI')
Senator(name='Sen. Lindsey Graham [R]', party='Republican', state='SC')
Senator(name='Sen. Tom Cotton [R]', party='Republican', state='AR')
Senator(name='Sen. Jon Tester [D]', party='Democrat', state='MT')
Senator(name='Sen. John Boozman [R]', party='Republican', state='AR')
Senator(name='Sen. Deb Fischer [R]', party='Republican', state='NE')
Senator(name='Sen. Susan Collins [R]', party='Republican', state='ME')
Senator(name='Sen. Jerry Moran [R]', party='Republican', state='KS')
Senator(name='Sen. Roger Wicker [R]', party='Republican', state='MS')
Senator(name='Sen. Debbie Stabenow [D]', party='Democrat', state='MI')
Senator(name='Sen. Bill Cassidy [R]', party='Republican', state='LA')
Senator(name='Sen. James “Jim” Inhofe [R]', party='Republican', state='OK')
Senator(name='Sen. Mike Rounds [R]', party='Republican', state='SD')
Senator(name='Sen. Joe Manchin III [D]', party='Democrat', state='WV')
Senator(name='Sen. Sherrod Brown [D]', party='Democrat', state='OH')
Senator(name='Sen. Barbara Mikulski [D]', party='Democrat', state='MD')
Senator(name='Sen. Benjamin Cardin [D]', party='Democrat', state='MD')
Senator(name='Sen. Christopher Murphy [D]', party='Democrat', state='CT')
Senator(name='Sen. David Perdue [R]', party='Republican', state='GA')
Senator(name='Sen. Mitch McConnell [R]', party='Republican', state='KY')
Senator(name='Sen. Roy Blunt [R]', party='Republican', state='MO')
Senator(name='Sen. John Thune [R]', party='Republican', state='SD')
Senator(name='Sen. Joe Donnelly [D]', party='Democrat', state='IN')
Senator(name='Sen. John Barrasso [R]', party='Republican', state='WY')
Senator(name='Sen. John Hoeven [R]', party='Republican', state='ND')
Senator(name='Sen. Richard Shelby [R]', party='Republican', state='AL')
Senator(name='Sen. Thad Cochran [R]', party='Republican', state='MS')
Senator(name='Sen. Pat Roberts [R]', party='Republican', state='KS')
Senator(name='Sen. Shelley Capito [R]', party='Republican', state='WV')
Senator(name='Sen. Robert “Rob” Portman [R]', party='Republican', state='OH')
Counter({'Republican': 28, 'Democrat': 10})
|
jwolf-AI/tensorflow2_tutorials_chinese-master/103-example_overfitting_and_underfitting.ipynb | ###Markdown
TensorFlow2.0教程-过拟合和欠拟合
###Code
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
NUM_WORDS = 10000
(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)
def multi_hot_sequences(sequences, dimension):
results = np.zeros((len(sequences), dimension))
for i, word_indices in enumerate(sequences):
results[i, word_indices] = 1.0
return results
train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)
plt.plot(train_data[0])
###Output
_____no_output_____
###Markdown
防止过度拟合的最简单方法是减小模型的大小,即模型中可学习参数的数量。深度学习模型往往善于适应训练数据,但真正的挑战是概括,而不是适合。另一方面,如果网络具有有限的记忆资源,则将不能容易地学习映射。为了最大限度地减少损失,它必须学习具有更强预测能力的压缩表示。同时,如果您使模型太小,则难以适应训练数据。 “太多容量”和“容量不足”之间存在平衡。要找到合适的模型大小,最好从相对较少的图层和参数开始,然后开始增加图层的大小或添加新图层,直到看到验证损失的收益递减为止。我们将在电影评论分类网络上使用Dense图层作为基线创建一个简单模型,然后创建更小和更大的版本,并进行比较。 1.创建一个baseline模型
###Code
import tensorflow.keras.layers as layers
baseline_model = keras.Sequential(
[
layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)),
layers.Dense(16, activation='relu'),
layers.Dense(1, activation='sigmoid')
]
)
baseline_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
baseline_model.summary()
baseline_history = baseline_model.fit(train_data, train_labels,
epochs=20, batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/20
25000/25000 - 3s - loss: 0.4808 - accuracy: 0.8096 - binary_crossentropy: 0.4808 - val_loss: 0.3333 - val_accuracy: 0.8762 - val_binary_crossentropy: 0.3333
Epoch 2/20
25000/25000 - 2s - loss: 0.2450 - accuracy: 0.9126 - binary_crossentropy: 0.2450 - val_loss: 0.2831 - val_accuracy: 0.8882 - val_binary_crossentropy: 0.2831
Epoch 3/20
25000/25000 - 2s - loss: 0.1806 - accuracy: 0.9374 - binary_crossentropy: 0.1806 - val_loss: 0.2921 - val_accuracy: 0.8832 - val_binary_crossentropy: 0.2921
Epoch 4/20
25000/25000 - 2s - loss: 0.1450 - accuracy: 0.9511 - binary_crossentropy: 0.1450 - val_loss: 0.3135 - val_accuracy: 0.8788 - val_binary_crossentropy: 0.3135
Epoch 5/20
25000/25000 - 2s - loss: 0.1187 - accuracy: 0.9614 - binary_crossentropy: 0.1187 - val_loss: 0.3406 - val_accuracy: 0.8752 - val_binary_crossentropy: 0.3406
Epoch 6/20
25000/25000 - 2s - loss: 0.1000 - accuracy: 0.9676 - binary_crossentropy: 0.1000 - val_loss: 0.3765 - val_accuracy: 0.8692 - val_binary_crossentropy: 0.3765
Epoch 7/20
25000/25000 - 2s - loss: 0.0827 - accuracy: 0.9750 - binary_crossentropy: 0.0827 - val_loss: 0.4124 - val_accuracy: 0.8659 - val_binary_crossentropy: 0.4124
Epoch 8/20
25000/25000 - 2s - loss: 0.0685 - accuracy: 0.9815 - binary_crossentropy: 0.0685 - val_loss: 0.4567 - val_accuracy: 0.8610 - val_binary_crossentropy: 0.4567
Epoch 9/20
25000/25000 - 2s - loss: 0.0581 - accuracy: 0.9857 - binary_crossentropy: 0.0581 - val_loss: 0.4987 - val_accuracy: 0.8597 - val_binary_crossentropy: 0.4987
Epoch 10/20
25000/25000 - 2s - loss: 0.0481 - accuracy: 0.9889 - binary_crossentropy: 0.0481 - val_loss: 0.5402 - val_accuracy: 0.8569 - val_binary_crossentropy: 0.5402
Epoch 11/20
25000/25000 - 2s - loss: 0.0392 - accuracy: 0.9923 - binary_crossentropy: 0.0392 - val_loss: 0.5883 - val_accuracy: 0.8540 - val_binary_crossentropy: 0.5883
Epoch 12/20
25000/25000 - 2s - loss: 0.0310 - accuracy: 0.9946 - binary_crossentropy: 0.0310 - val_loss: 0.6316 - val_accuracy: 0.8534 - val_binary_crossentropy: 0.6316
Epoch 13/20
25000/25000 - 2s - loss: 0.0242 - accuracy: 0.9964 - binary_crossentropy: 0.0242 - val_loss: 0.6779 - val_accuracy: 0.8515 - val_binary_crossentropy: 0.6779
Epoch 14/20
25000/25000 - 2s - loss: 0.0185 - accuracy: 0.9978 - binary_crossentropy: 0.0185 - val_loss: 0.7149 - val_accuracy: 0.8510 - val_binary_crossentropy: 0.7149
Epoch 15/20
25000/25000 - 2s - loss: 0.0143 - accuracy: 0.9989 - binary_crossentropy: 0.0143 - val_loss: 0.7571 - val_accuracy: 0.8496 - val_binary_crossentropy: 0.7571
Epoch 16/20
25000/25000 - 2s - loss: 0.0111 - accuracy: 0.9994 - binary_crossentropy: 0.0111 - val_loss: 0.7954 - val_accuracy: 0.8492 - val_binary_crossentropy: 0.7954
Epoch 17/20
25000/25000 - 2s - loss: 0.0087 - accuracy: 0.9997 - binary_crossentropy: 0.0087 - val_loss: 0.8301 - val_accuracy: 0.8497 - val_binary_crossentropy: 0.8301
Epoch 18/20
25000/25000 - 3s - loss: 0.0068 - accuracy: 0.9998 - binary_crossentropy: 0.0068 - val_loss: 0.8629 - val_accuracy: 0.8490 - val_binary_crossentropy: 0.8629
Epoch 19/20
25000/25000 - 3s - loss: 0.0055 - accuracy: 0.9999 - binary_crossentropy: 0.0055 - val_loss: 0.8937 - val_accuracy: 0.8492 - val_binary_crossentropy: 0.8937
Epoch 20/20
25000/25000 - 3s - loss: 0.0044 - accuracy: 0.9999 - binary_crossentropy: 0.0044 - val_loss: 0.9217 - val_accuracy: 0.8488 - val_binary_crossentropy: 0.9217
###Markdown
2.创建一个小模型
###Code
small_model = keras.Sequential(
[
layers.Dense(4, activation='relu', input_shape=(NUM_WORDS,)),
layers.Dense(4, activation='relu'),
layers.Dense(1, activation='sigmoid')
]
)
small_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
small_model.summary()
small_history = small_model.fit(train_data, train_labels,
epochs=20, batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
Train on 25000 samples, validate on 25000 samples
Epoch 1/20
25000/25000 - 3s - loss: 0.6170 - accuracy: 0.6609 - binary_crossentropy: 0.6170 - val_loss: 0.5217 - val_accuracy: 0.8034 - val_binary_crossentropy: 0.5217
Epoch 2/20
25000/25000 - 2s - loss: 0.4356 - accuracy: 0.8661 - binary_crossentropy: 0.4356 - val_loss: 0.3979 - val_accuracy: 0.8781 - val_binary_crossentropy: 0.3979
Epoch 3/20
25000/25000 - 2s - loss: 0.3002 - accuracy: 0.9146 - binary_crossentropy: 0.3002 - val_loss: 0.3160 - val_accuracy: 0.8866 - val_binary_crossentropy: 0.3160
Epoch 4/20
25000/25000 - 2s - loss: 0.2255 - accuracy: 0.9322 - binary_crossentropy: 0.2255 - val_loss: 0.2930 - val_accuracy: 0.8880 - val_binary_crossentropy: 0.2930
Epoch 5/20
25000/25000 - 2s - loss: 0.1884 - accuracy: 0.9416 - binary_crossentropy: 0.1884 - val_loss: 0.2901 - val_accuracy: 0.8858 - val_binary_crossentropy: 0.2901
Epoch 6/20
25000/25000 - 2s - loss: 0.1632 - accuracy: 0.9507 - binary_crossentropy: 0.1632 - val_loss: 0.2918 - val_accuracy: 0.8848 - val_binary_crossentropy: 0.2918
Epoch 7/20
25000/25000 - 2s - loss: 0.1449 - accuracy: 0.9560 - binary_crossentropy: 0.1449 - val_loss: 0.2991 - val_accuracy: 0.8817 - val_binary_crossentropy: 0.2991
Epoch 8/20
25000/25000 - 2s - loss: 0.1295 - accuracy: 0.9618 - binary_crossentropy: 0.1295 - val_loss: 0.3087 - val_accuracy: 0.8796 - val_binary_crossentropy: 0.3087
Epoch 9/20
25000/25000 - 2s - loss: 0.1167 - accuracy: 0.9669 - binary_crossentropy: 0.1167 - val_loss: 0.3210 - val_accuracy: 0.8772 - val_binary_crossentropy: 0.3210
Epoch 10/20
25000/25000 - 2s - loss: 0.1059 - accuracy: 0.9709 - binary_crossentropy: 0.1059 - val_loss: 0.3325 - val_accuracy: 0.8748 - val_binary_crossentropy: 0.3325
Epoch 11/20
25000/25000 - 2s - loss: 0.0961 - accuracy: 0.9742 - binary_crossentropy: 0.0961 - val_loss: 0.3468 - val_accuracy: 0.8727 - val_binary_crossentropy: 0.3468
Epoch 12/20
25000/25000 - 2s - loss: 0.0877 - accuracy: 0.9780 - binary_crossentropy: 0.0877 - val_loss: 0.3602 - val_accuracy: 0.8715 - val_binary_crossentropy: 0.3602
Epoch 13/20
25000/25000 - 2s - loss: 0.0800 - accuracy: 0.9807 - binary_crossentropy: 0.0800 - val_loss: 0.3759 - val_accuracy: 0.8693 - val_binary_crossentropy: 0.3759
Epoch 14/20
25000/25000 - 2s - loss: 0.0727 - accuracy: 0.9837 - binary_crossentropy: 0.0727 - val_loss: 0.3923 - val_accuracy: 0.8690 - val_binary_crossentropy: 0.3923
Epoch 15/20
25000/25000 - 2s - loss: 0.0667 - accuracy: 0.9858 - binary_crossentropy: 0.0667 - val_loss: 0.4089 - val_accuracy: 0.8672 - val_binary_crossentropy: 0.4089
Epoch 16/20
25000/25000 - 2s - loss: 0.0610 - accuracy: 0.9876 - binary_crossentropy: 0.0610 - val_loss: 0.4255 - val_accuracy: 0.8646 - val_binary_crossentropy: 0.4255
Epoch 17/20
25000/25000 - 2s - loss: 0.0555 - accuracy: 0.9899 - binary_crossentropy: 0.0555 - val_loss: 0.4422 - val_accuracy: 0.8651 - val_binary_crossentropy: 0.4422
Epoch 18/20
25000/25000 - 2s - loss: 0.0509 - accuracy: 0.9912 - binary_crossentropy: 0.0509 - val_loss: 0.4614 - val_accuracy: 0.8626 - val_binary_crossentropy: 0.4614
Epoch 19/20
25000/25000 - 2s - loss: 0.0466 - accuracy: 0.9925 - binary_crossentropy: 0.0466 - val_loss: 0.4780 - val_accuracy: 0.8622 - val_binary_crossentropy: 0.4780
Epoch 20/20
25000/25000 - 2s - loss: 0.0426 - accuracy: 0.9936 - binary_crossentropy: 0.0426 - val_loss: 0.4976 - val_accuracy: 0.8608 - val_binary_crossentropy: 0.4976
###Markdown
3.创建一个大模型
###Code
big_model = keras.Sequential(
[
layers.Dense(512, activation='relu', input_shape=(NUM_WORDS,)),
layers.Dense(512, activation='relu'),
layers.Dense(1, activation='sigmoid')
]
)
big_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
big_model.summary()
big_history = big_model.fit(train_data, train_labels,
epochs=20, batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
def plot_history(histories, key='binary_crossentropy'):
plt.figure(figsize=(16,10))
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_'+key],
'--', label=name.title()+' Val')
plt.plot(history.epoch, history.history[key], color=val[0].get_color(),
label=name.title()+' Train')
plt.xlabel('Epochs')
plt.ylabel(key.replace('_',' ').title())
plt.legend()
plt.xlim([0,max(history.epoch)])
plot_history([('baseline', baseline_history),
('small', small_history),
('big', big_history)])
###Output
_____no_output_____
###Markdown
请注意,较大的网络在仅仅一个时期之后几乎立即开始过度拟合,并且更过拟合更严重。 网络容量越大,能够越快地对训练数据进行建模(导致训练损失低),但过度拟合的可能性越大(导致训练和验证损失之间的差异很大)。 4.添加l2正则
###Code
l2_model = keras.Sequential(
[
layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu', input_shape=(NUM_WORDS,)),
layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu'),
layers.Dense(1, activation='sigmoid')
]
)
l2_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
l2_model.summary()
l2_history = l2_model.fit(train_data, train_labels,
epochs=20, batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
plot_history([('baseline', baseline_history),
('l2', l2_history)])
###Output
_____no_output_____
###Markdown
5.添加dropout
###Code
dpt_model = keras.Sequential(
[
layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)),
layers.Dropout(0.5),
layers.Dense(16, activation='relu'),
layers.Dropout(0.5),
layers.Dense(1, activation='sigmoid')
]
)
dpt_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
dpt_model.summary()
dpt_history = dpt_model.fit(train_data, train_labels,
epochs=20, batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
plot_history([('baseline', baseline_history),
('dropout', dpt_history)])
###Output
_____no_output_____ |
Workshop Week 6a.ipynb | ###Markdown
Linear Regression - Data NormalisationThis notebook presents a problem that requires some normalisation of data before a linear regression model can be applied. The data we will use is the Sea Ice data from Chapter 6 of the text (also referenced in [the accompanying notebooks](https://github.com/MQCOMP257/introduction-datascience-python-book/blob/master/ch06_Regression_Analysis.ipynb)). Our goal is to observe the relationship between `year` and `extent` of the Sea Ice and to build a linear regression model to predict the extent for a given year.
###Code
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn import metrics
import matplotlib.pylab as plt
%matplotlib inline
# Load the data and show the info and contents:
ice = pd.read_csv('files/SeaIce.txt', delim_whitespace = True)
ice.head()
###Output
_____no_output_____
###Markdown
Summarize the Dataset- Dimensions of the dataset- Peek at the data itself- Statistical summary of all attributes.
###Code
# your code here for data shape
ice.shape
# your code here for statistical summary
ice.describe()
# Visualize the data with a scatter plot (x is year, y as extent)
sns.lmplot(x='year', y='extent', data=ice)
###Output
_____no_output_____
###Markdown
Clean your data Note what is wrong with the data and what needs to be cleaned before proceeding. Exclude the outlier data and repeat the plot to check the outlier data is now exluded.
###Code
# Remove the outlier data and and repeat the plot to confirm data is clean
# insert code here
ice = ice[ice['extent'] >= 0]
###Output
_____no_output_____
###Markdown
Normalize the DataThe plot above should reveal that we need to normalize the data (it has a sinusoidal shape) and to do this we need to compute the mean for each month and subtract the monthly mean from each record. This will remove the effect of seasons on the `extent` variable and reveal the longer term trend in the data.You can use the Pandas [groupby](http://pandas.pydata.org/pandas-docs/stable/groupby.html) method to group rows in a data frame according to some value. This returns a __group__ object that can be used to operate on the groups. The [notebook for Chapter 6](https://github.com/MQCOMP257/introduction-datascience-python-book/blob/master/ch06_Regression_Analysis.ipynb) shows how to use this to normalise the data.(Advanced Hint: it is possible to avoid using a for loop to normalise this data - look at the [groupby.transform method](http://pandas.pydata.org/pandas-docs/stable/groupby.htmltransformation))
###Code
# Compute the mean extent for each month and subtract from each row of the data frame
# re-plot the data
sns.lmplot(x='mo', y='extent', data=ice)
grouped=ice.groupby('mo')
for key, item in grouped:
print(grouped.get_group(key))
month_means=grouped.extent.mean()
ice_2 = ice[ice['extent'] >= 0]
mean=grouped.transform(np.mean)
#std=grouped.transform(np.std)
ice_2['extent']=ice_2['extent']-mean['extent']
sns.lmplot(x='mo', y='extent', data=ice_2)
###Output
_____no_output_____
###Markdown
Now you can plot `year` vs `extent` to look at the relationship we are trying to model. What are your initial thoughts on the relationship? Is a linear model going to work?
###Code
# Plot Year vs Extent
###Output
_____no_output_____ |
notebooks/community/migration/UJ6 AutoML for natural language with Vertex AI Text Classification.ipynb | ###Markdown
Vertex SDK: AutoML natural language text classification model InstallationInstall the latest (preview) version of Vertex SDK.
###Code
! pip3 install -U google-cloud-aiplatform --user
###Output
_____no_output_____
###Markdown
Install the Google *cloud-storage* library as well.
###Code
! pip3 install google-cloud-storage
###Output
_____no_output_____
###Markdown
Restart the KernelOnce you've installed the Vertex SDK and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("AUTORUN"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Before you begin GPU run-time*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** Set up your GCP project**The following steps are required, regardless of your notebook environment.**1. [Select or create a GCP project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)4. [Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebooks.5. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend when possible, to choose the region closest to you. - Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You cannot use a Multi-Regional Storage bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see [Region support for Vertex AI services](https://cloud.google.com/vertex-ai/docs/general/locations)
###Code
REGION = "us-central1" # @param {type: "string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your GCP account
**If you are using Google Cloud Notebooks**, your environment is already
authenticated. Skip this step.
*Note: If you are on an Vertex notebook and run the cell, the cell knows to skip executing the authentication steps.*
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your Google Cloud account. This provides access
# to your Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Vertex, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this tutorial in a notebook locally, replace the string
# below with the path to your service account key and run this cell to
# authenticate your Google Cloud account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json
# Log in to your account on Google Cloud
! gcloud auth login
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket.Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets.
###Code
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION gs://$BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al gs://$BUCKET_NAME
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants Import Vertex SDKImport the Vertex SDK into our Python environment.
###Code
import base64
import json
import os
import sys
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
###Output
_____no_output_____
###Markdown
Vertex AI constantsSetup up the following constants for Vertex AI:- `API_ENDPOINT`: The Vertex AI API service endpoint for dataset, model, job, pipeline and endpoint services.- `PARENT`: The Vertex AI location root path for dataset, model and endpoint resources.
###Code
# API Endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex AI location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
###Output
_____no_output_____
###Markdown
AutoML constantsNext, setup constants unique to AutoML Text Classification datasets and training:- Dataset Schemas: Tells the managed dataset service which type of dataset it is.- Data Labeling (Annotations) Schemas: Tells the managed dataset service how the data is labeled (annotated).- Dataset Training Schemas: Tells the Vertex AI Pipelines service the task (e.g., classification) to train the model for.
###Code
# Text Dataset type
TEXT_SCHEMA = "google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml"
# Text Labeling type
IMPORT_SCHEMA_TEXT_CLASSIFICATION = "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_classification_single_label_io_format_1.0.0.yaml"
# Text Training task
TRAINING_TEXT_CLASSIFICATION_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_classification_1.0.0.yaml"
###Output
_____no_output_____
###Markdown
Clients Vertex AIThe Vertex SDK works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the server (Vertex).You will use several clients in this tutorial, so set them all up upfront.- Dataset Service for managed datasets.- Model Service for managed models.- Pipeline Service for training.- Endpoint Service for deployment.- Prediction Service for serving. *Note*: Prediction has a different service endpoint.
###Code
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_dataset_client():
client = aip.DatasetServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
clients = {}
clients["dataset"] = create_dataset_client()
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
clients["job"] = create_job_client()
for client in clients.items():
print(client)
IMPORT_FILE = "gs://cloud-ml-data/NL-classification/happiness.csv"
! gsutil cat $IMPORT_FILE | head -n 10
###Output
_____no_output_____
###Markdown
*Example output*:```I went on a successful date with someone I felt sympathy and connection with.,affectionI was happy when my son got 90% marks in his examination,affectionI went to the gym this morning and did yoga.,exerciseWe had a serious talk with some friends of ours who have been flaky lately. They understood and we had a good evening hanging out.,bondingI went with grandchildren to butterfly display at Crohn Conservatory,affectionI meditated last night.,leisure"I made a new recipe for peasant bread, and it came out spectacular!",achievementI got gift from my elder brother which was really surprising me,affectionYESTERDAY MY MOMS BIRTHDAY SO I ENJOYED,enjoy_the_momentWatching cupcake wars with my three teen children,affection``` Create a dataset [projects.locations.datasets.create](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.datasets/create) Request
###Code
DATA_SCHEMA = TEXT_SCHEMA
dataset = {
"display_name": "happiness_" + TIMESTAMP,
"metadata_schema_uri": "gs://" + DATA_SCHEMA,
}
print(
MessageToJson(
aip.CreateDatasetRequest(parent=PARENT, dataset=dataset).__dict__["_pb"]
)
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "parent": "projects/migration-ucaip-training/locations/us-central1", "dataset": { "displayName": "happiness_20210226015238", "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml" }}``` Call
###Code
request = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset)
###Output
_____no_output_____
###Markdown
Response
###Code
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/datasets/574578388396670976", "displayName": "happiness_20210226015238", "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml", "labels": { "aiplatform.googleapis.com/dataset_metadata_schema": "TEXT" }, "metadata": { "dataItemSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/dataitem/text_1.0.0.yaml" }}```
###Code
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split("/")[-1]
print(dataset_id)
###Output
_____no_output_____
###Markdown
[projects.locations.datasets.import](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.datasets/import) Request
###Code
LABEL_SCHEMA = IMPORT_SCHEMA_TEXT_CLASSIFICATION
import_config = {
"gcs_source": {"uris": [IMPORT_FILE]},
"import_schema_uri": LABEL_SCHEMA,
}
print(
MessageToJson(
aip.ImportDataRequest(
name=dataset_short_id, import_configs=[import_config]
).__dict__["_pb"]
)
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "574578388396670976", "importConfigs": [ { "gcsSource": { "uris": [ "gs://cloud-ml-data/NL-classification/happiness.csv" ] }, "importSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_classification_single_label_io_format_1.0.0.yaml" } ]}``` Call
###Code
request = clients["dataset"].import_data(
name=dataset_id, import_configs=[import_config]
)
###Output
_____no_output_____
###Markdown
Response
###Code
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{}``` Train a model [projects.locations.trainingPipelines.create](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.trainingPipelines/create) Request
###Code
TRAINING_SCHEMA = TRAINING_TEXT_CLASSIFICATION_SCHEMA
task = json_format.ParseDict(
{
"multi_label": False,
},
Value(),
)
training_pipeline = {
"display_name": "happiness_" + TIMESTAMP,
"input_data_config": {"dataset_id": dataset_short_id},
"model_to_upload": {"display_name": "happiness_" + TIMESTAMP},
"training_task_definition": TRAINING_SCHEMA,
"training_task_inputs": task,
}
print(
MessageToJson(
aip.CreateTrainingPipelineRequest(
parent=PARENT, training_pipeline=training_pipeline
).__dict__["_pb"]
)
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "parent": "projects/migration-ucaip-training/locations/us-central1", "trainingPipeline": { "displayName": "happiness_20210226015238", "inputDataConfig": { "datasetId": "574578388396670976" }, "trainingTaskDefinition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_classification_1.0.0.yaml", "trainingTaskInputs": { "multi_label": false }, "modelToUpload": { "displayName": "happiness_20210226015238" } }}``` Call
###Code
request = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
###Output
_____no_output_____
###Markdown
Response
###Code
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/trainingPipelines/2903115317607661568", "displayName": "happiness_20210226015238", "inputDataConfig": { "datasetId": "574578388396670976" }, "trainingTaskDefinition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_classification_1.0.0.yaml", "trainingTaskInputs": {}, "modelToUpload": { "displayName": "happiness_20210226015238" }, "state": "PIPELINE_STATE_PENDING", "createTime": "2021-02-26T02:23:54.166560Z", "updateTime": "2021-02-26T02:23:54.166560Z"}```
###Code
# The full unique ID for the training pipeline
training_pipeline_id = request.name
# The short numeric ID for the training pipeline
training_pipeline_short_id = training_pipeline_id.split("/")[-1]
print(training_pipeline_id)
###Output
_____no_output_____
###Markdown
[projects.locations.trainingPipelines.get](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.trainingPipelines/get) Call
###Code
request = clients["pipeline"].get_training_pipeline(name=training_pipeline_id)
###Output
_____no_output_____
###Markdown
Response
###Code
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/trainingPipelines/2903115317607661568", "displayName": "happiness_20210226015238", "inputDataConfig": { "datasetId": "574578388396670976" }, "trainingTaskDefinition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_classification_1.0.0.yaml", "trainingTaskInputs": {}, "modelToUpload": { "name": "projects/116273516712/locations/us-central1/models/2369051733671280640", "displayName": "happiness_20210226015238" }, "state": "PIPELINE_STATE_SUCCEEDED", "createTime": "2021-02-26T02:23:54.166560Z", "startTime": "2021-02-26T02:23:54.396088Z", "endTime": "2021-02-26T06:08:06.548524Z", "updateTime": "2021-02-26T06:08:06.548524Z"}```
###Code
while True:
response = clients["pipeline"].get_training_pipeline(name=training_pipeline_id)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_name = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
break
else:
model_id = response.model_to_upload.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(20)
print(model_id)
###Output
_____no_output_____
###Markdown
Evaluate the model [projects.locations.models.evaluations.list](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.models.evaluations/list) Call
###Code
request = clients["model"].list_model_evaluations(parent=model_id)
###Output
_____no_output_____
###Markdown
Response
###Code
model_evaluations = [json.loads(MessageToJson(mel.__dict__["_pb"])) for mel in request]
print(json.dumps(model_evaluations, indent=2))
# The evaluation slice
evaluation_slice = request.model_evaluations[0].name
###Output
_____no_output_____
###Markdown
*Example output*:```[ { "name": "projects/116273516712/locations/us-central1/models/2369051733671280640/evaluations/1541152463304785920", "metricsSchemaUri": "gs://google-cloud-aiplatform/schema/modelevaluation/classification_metrics_1.0.0.yaml", "metrics": { "confusionMatrix": { "annotationSpecs": [ { "displayName": "exercise", "id": "952213353537732608" }, { "id": "1528674105841156096", "displayName": "achievement" }, { "id": "3258056362751426560", "displayName": "leisure" }, { "id": "3834517115054850048", "displayName": "bonding" }, { "id": "5563899371965120512", "displayName": "enjoy_the_moment" }, { "id": "6140360124268544000", "displayName": "nature" }, { "id": "8446203133482237952", "displayName": "affection" } ], "rows": [ [ 19.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 ], [ 0.0, 342.0, 5.0, 2.0, 13.0, 2.0, 13.0 ], [ 2.0, 10.0, 42.0, 1.0, 12.0, 0.0, 2.0 ], [ 0.0, 4.0, 0.0, 121.0, 1.0, 0.0, 4.0 ], [ 2.0, 29.0, 3.0, 2.0, 98.0, 0.0, 6.0 ], [ 0.0, 3.0, 0.0, 1.0, 0.0, 21.0, 1.0 ], [ 0.0, 7.0, 0.0, 1.0, 6.0, 0.0, 409.0 ] ] }, "confidenceMetrics": [ { "f1Score": 0.25, "recall": 1.0, "f1ScoreAt1": 0.88776374, "precisionAt1": 0.88776374, "precision": 0.14285715, "recallAt1": 0.88776374 }, { "confidenceThreshold": 0.05, "recall": 0.9721519, "f1Score": 0.8101266, "recallAt1": 0.88776374, "f1ScoreAt1": 0.88776374, "precisionAt1": 0.88776374, "precision": 0.69439423 }, REMOVED FOR BREVITY { "f1Score": 0.0033698399, "recall": 0.0016877637, "confidenceThreshold": 1.0, "recallAt1": 0.0016877637, "f1ScoreAt1": 0.0033698399, "precisionAt1": 1.0, "precision": 1.0 } ], "auPrc": 0.95903283, "logLoss": 0.08260541 }, "createTime": "2021-02-26T06:07:48.967028Z", "sliceDimensions": [ "annotationSpec" ] }]``` [projects.locations.models.evaluations.get](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.models.evaluations/get) Call
###Code
request = clients["model"].get_model_evaluation(name=evaluation_slice)
###Output
_____no_output_____
###Markdown
Response
###Code
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/models/2369051733671280640/evaluations/1541152463304785920", "metricsSchemaUri": "gs://google-cloud-aiplatform/schema/modelevaluation/classification_metrics_1.0.0.yaml", "metrics": { "confusionMatrix": { "annotationSpecs": [ { "displayName": "exercise", "id": "952213353537732608" }, { "displayName": "achievement", "id": "1528674105841156096" }, { "id": "3258056362751426560", "displayName": "leisure" }, { "id": "3834517115054850048", "displayName": "bonding" }, { "displayName": "enjoy_the_moment", "id": "5563899371965120512" }, { "displayName": "nature", "id": "6140360124268544000" }, { "id": "8446203133482237952", "displayName": "affection" } ], "rows": [ [ 19.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 ], [ 0.0, 342.0, 5.0, 2.0, 13.0, 2.0, 13.0 ], [ 2.0, 10.0, 42.0, 1.0, 12.0, 0.0, 2.0 ], [ 0.0, 4.0, 0.0, 121.0, 1.0, 0.0, 4.0 ], [ 2.0, 29.0, 3.0, 2.0, 98.0, 0.0, 6.0 ], [ 0.0, 3.0, 0.0, 1.0, 0.0, 21.0, 1.0 ], [ 0.0, 7.0, 0.0, 1.0, 6.0, 0.0, 409.0 ] ] }, "logLoss": 0.08260541, "confidenceMetrics": [ { "precision": 0.14285715, "precisionAt1": 0.88776374, "recall": 1.0, "f1ScoreAt1": 0.88776374, "recallAt1": 0.88776374, "f1Score": 0.25 }, { "f1Score": 0.8101266, "recall": 0.9721519, "precision": 0.69439423, "confidenceThreshold": 0.05, "recallAt1": 0.88776374, "precisionAt1": 0.88776374, "f1ScoreAt1": 0.88776374 }, REMOVED FOR BREVITY { "confidenceThreshold": 1.0, "f1Score": 0.0033698399, "f1ScoreAt1": 0.0033698399, "precisionAt1": 1.0, "precision": 1.0, "recall": 0.0016877637, "recallAt1": 0.0016877637 } ], "auPrc": 0.95903283 }, "createTime": "2021-02-26T06:07:48.967028Z", "sliceDimensions": [ "annotationSpec" ]}``` Make batch predictions Prepare files for batch prediction
###Code
test_item = ! gsutil cat $IMPORT_FILE | head -n1
test_item, test_label = str(test_item[0]).split(",")
print(test_item, test_label)
###Output
_____no_output_____
###Markdown
*Example output*:```I went on a successful date with someone I felt sympathy and connection with. affection``` Make the batch input fileLet's now make a batch input file, which you store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each text file. The dictionary contains the key/value pairs:- `content`: The Cloud Storage path to the text file.- `mimeType`: The content type. In our example, it is an `text/plain` file.
###Code
import json
import tensorflow as tf
test_item_uri = "gs://" + BUCKET_NAME + "/test.txt"
with tf.io.gfile.GFile(test_item_uri, "w") as f:
f.write(test_item + "\n")
gcs_input_uri = "gs://" + BUCKET_NAME + "/test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {"content": test_item_uri, "mime_type": "text/plain"}
f.write(json.dumps(data) + "\n")
! gsutil cat $gcs_input_uri
! gsutil cat $test_item_uri
###Output
_____no_output_____
###Markdown
*Example output*:```{"content": "gs://migration-ucaip-trainingaip-20210226015238/test.txt", "mime_type": "text/plain"}I went on a successful date with someone I felt sympathy and connection with.``` [projects.locations.batchPredictionJobs.create](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.batchPredictionJobs/create) Request
###Code
batch_prediction_job = {
"display_name": "happiness_" + TIMESTAMP,
"model": model_id,
"input_config": {
"instances_format": "jsonl",
"gcs_source": {"uris": [gcs_input_uri]},
},
"output_config": {
"predictions_format": "jsonl",
"gcs_destination": {
"output_uri_prefix": "gs://" + f"{BUCKET_NAME}/batch_output/"
},
},
"dedicated_resources": {
"machine_spec": {
"machine_type": "n1-standard-2",
"accelerator_count": 0,
},
"starting_replica_count": 1,
"max_replica_count": 1,
},
}
print(
MessageToJson(
aip.CreateBatchPredictionJobRequest(
parent=PARENT, batch_prediction_job=batch_prediction_job
).__dict__["_pb"]
)
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "parent": "projects/migration-ucaip-training/locations/us-central1", "batchPredictionJob": { "displayName": "happiness_20210226015238", "model": "projects/116273516712/locations/us-central1/models/2369051733671280640", "inputConfig": { "instancesFormat": "jsonl", "gcsSource": { "uris": [ "gs://migration-ucaip-trainingaip-20210226015238/test.jsonl" ] } }, "outputConfig": { "predictionsFormat": "jsonl", "gcsDestination": { "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210226015238/batch_output/" } }, "dedicatedResources": { "machineSpec": { "machineType": "n1-standard-2" }, "startingReplicaCount": 1, "maxReplicaCount": 1 } }}``` Call
###Code
request = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
###Output
_____no_output_____
###Markdown
Response
###Code
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/batchPredictionJobs/4770983263059574784", "displayName": "happiness_20210226015238", "model": "projects/116273516712/locations/us-central1/models/2369051733671280640", "inputConfig": { "instancesFormat": "jsonl", "gcsSource": { "uris": [ "gs://migration-ucaip-trainingaip-20210226015238/test.jsonl" ] } }, "outputConfig": { "predictionsFormat": "jsonl", "gcsDestination": { "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210226015238/batch_output/" } }, "state": "JOB_STATE_PENDING", "completionStats": { "incompleteCount": "-1" }, "createTime": "2021-02-26T09:37:44.471843Z", "updateTime": "2021-02-26T09:37:44.471843Z"}```
###Code
# The fully qualified ID for the batch job
batch_job_id = request.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)
###Output
_____no_output_____
###Markdown
[projects.locations.batchPredictionJobs.get](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.batchPredictionJobs/get) Call
###Code
request = clients["job"].get_batch_prediction_job(name=batch_job_id)
###Output
_____no_output_____
###Markdown
Response
###Code
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/batchPredictionJobs/4770983263059574784", "displayName": "happiness_20210226015238", "model": "projects/116273516712/locations/us-central1/models/2369051733671280640", "inputConfig": { "instancesFormat": "jsonl", "gcsSource": { "uris": [ "gs://migration-ucaip-trainingaip-20210226015238/test.jsonl" ] } }, "outputConfig": { "predictionsFormat": "jsonl", "gcsDestination": { "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210226015238/batch_output/" } }, "state": "JOB_STATE_PENDING", "completionStats": { "incompleteCount": "-1" }, "createTime": "2021-02-26T09:37:44.471843Z", "updateTime": "2021-02-26T09:37:44.471843Z"}```
###Code
def get_latest_predictions(gcs_out_dir):
""" Get the latest prediction subfolder using the timestamp in the subfolder name"""
folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
response = clients["job"].get_batch_prediction_job(name=batch_job_id)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", response.state)
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
folder = get_latest_predictions(
response.output_config.gcs_destination.output_uri_prefix
)
! gsutil ls $folder/prediction*.jsonl
! gsutil cat $folder/prediction*.jsonl
break
time.sleep(60)
###Output
_____no_output_____
###Markdown
*Example output*:```gs://migration-ucaip-trainingaip-20210226015238/batch_output/prediction-happiness_20210226015238-2021-02-26T09:37:44.261133Z/predictions_00001.jsonl{"instance":{"content":"gs://migration-ucaip-trainingaip-20210226015238/test.txt","mimeType":"text/plain"},"prediction":{"ids":["8446203133482237952","3834517115054850048","1528674105841156096","5563899371965120512","952213353537732608","3258056362751426560","6140360124268544000"],"displayNames":["affection","bonding","achievement","enjoy_the_moment","exercise","leisure","nature"],"confidences":[0.9183423,0.045685068,0.024327256,0.0057157497,0.0040851077,0.0012627868,5.8173126E-4]}}``` Make online predictions [projects.locations.endpoints.create](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/create) Request
###Code
endpoint = {"display_name": "happiness_" + TIMESTAMP}
print(
MessageToJson(
aip.CreateEndpointRequest(parent=PARENT, endpoint=endpoint).__dict__["_pb"]
)
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "parent": "projects/migration-ucaip-training/locations/us-central1", "endpoint": { "displayName": "happiness_20210226015238" }}``` Call
###Code
request = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
###Output
_____no_output_____
###Markdown
Response
###Code
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/endpoints/7367713068517687296"}```
###Code
# The fully qualified ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
###Output
_____no_output_____
###Markdown
[projects.locations.endpoints.deployModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) Request
###Code
deployed_model = {
"model": model_id,
"display_name": "happiness_" + TIMESTAMP,
"automatic_resources": {"min_replica_count": 1, "max_replica_count": 1},
}
traffic_split = {"0": 100}
print(
MessageToJson(
aip.DeployModelRequest(
endpoint=endpoint_id,
deployed_model=deployed_model,
traffic_split=traffic_split,
).__dict__["_pb"]
)
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "endpoint": "projects/116273516712/locations/us-central1/endpoints/7367713068517687296", "deployedModel": { "model": "projects/116273516712/locations/us-central1/models/2369051733671280640", "displayName": "happiness_20210226015238", "automaticResources": { "minReplicaCount": 1, "maxReplicaCount": 1 } }, "trafficSplit": { "0": 100 }}``` Call
###Code
request = clients["endpoint"].deploy_model(
endpoint=endpoint_id, deployed_model=deployed_model, traffic_split=traffic_split
)
###Output
_____no_output_____
###Markdown
Response
###Code
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "deployedModel": { "id": "418518105996656640" }}```
###Code
# The unique ID for the deployed model
deployed_model_id = result.deployed_model.id
print(deployed_model_id)
###Output
_____no_output_____
###Markdown
[projects.locations.endpoints.predict](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/predict) Request
###Code
test_item = ! gsutil cat $IMPORT_FILE | head -n1
test_item, test_label = str(test_item[0]).split(",")
instances_list = [{"content": test_item}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
request = aip.PredictRequest(
endpoint=endpoint_id,
)
request.instances.append(instances)
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "endpoint": "projects/116273516712/locations/us-central1/endpoints/7367713068517687296", "instances": [ [ { "content": "I went on a successful date with someone I felt sympathy and connection with." } ] ]}``` Call
###Code
request = clients["prediction"].predict(endpoint=endpoint_id, instances=instances)
###Output
_____no_output_____
###Markdown
Response
###Code
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "predictions": [ { "confidences": [ 0.8867673277854919, 0.024743923917412758, 0.0034913308918476105, 0.07936617732048035, 0.0013463868526741862, 0.0002393187169218436, 0.0040455833077430725 ], "displayNames": [ "affection", "achievement", "enjoy_the_moment", "bonding", "leisure", "nature", "exercise" ], "ids": [ "8446203133482237952", "1528674105841156096", "5563899371965120512", "3834517115054850048", "3258056362751426560", "6140360124268544000", "952213353537732608" ] } ], "deployedModelId": "418518105996656640"}``` [projects.locations.endpoints.undeployModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/undeployModel) Call
###Code
request = clients["endpoint"].undeploy_model(
endpoint=endpoint_id, deployed_model_id=deployed_model_id, traffic_split={}
)
###Output
_____no_output_____
###Markdown
Response
###Code
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{}``` Cleaning up
To clean up all GCP resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial.
###Code
delete_dataset = True
delete_model = True
delete_endpoint = True
delete_pipeline = True
delete_batchjob = True
delete_bucket = True
# Delete the dataset using the Vertex AI fully qualified identifier for the dataset
try:
if delete_dataset:
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the model using the Vertex AI fully qualified identifier for the model
try:
if delete_model:
clients["model"].delete_model(name=model_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint
try:
if delete_endpoint:
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex AI fully qualified identifier for the training pipeline
try:
if delete_pipeline:
clients["pipeline"].delete_training_pipeline(name=training_pipeline_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex AI fully qualified identifier for the batch job
try:
if delete_batchjob:
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r gs://$BUCKET_NAME
###Output
_____no_output_____ |
tour2_constant_bond/2_1_1_PO_observation.ipynb | ###Markdown
**2.1 Pull-out of elastic fiber from rigid matrix** [](https://moodle.rwth-aachen.de/mod/page/view.php?id=551807) The simplest possible pull-out model An analytical solution of the pull-out problem is obtained by 1. Integrate the differential equilibrium equation relating the shear flow with the change of the normal force $A_\mathrm{f} \mathrm{d}\sigma_\mathrm{f}$ in the fiber on an infinitesimal element $\mathrm{d}x$\begin{align}\dfrac{\mathrm{d}\sigma_\mathrm{f}}{\mathrm{d}x} = \dfrac{p\bar{\tau}}{A_\mathrm{f}}\end{align}2. Substitute the result into the elastic constitutive law of the reinforcement\begin{align}\varepsilon_\mathrm{f} = \dfrac{\sigma_\mathrm{f}}{E_\mathrm{f}}\end{align}3. Substitute the result into the kinematic relation stating that the pull-out displacement is equal to the integral of fiber strain along the debonded length\begin{align}u_\mathrm{f} = \int_{a}^0 \varepsilon_\mathrm{f} \, \mathrm{d}x\end{align}4. Identify the integration constants by applying boundary conditions: equilibrium at loaded end and compatibility and smoothness at the end of the debonded zone $x = a$These step deliver the pull-out curve as a square root function\begin{align}P = \sqrt{p \bar{\tau} E_\mathrm{f} A_\mathrm{f} w}\end{align} Graphical summary of the model derivation  Model applicationLet us utilize the the derived model to simulate the test results of the RILEM pull-out test | Symbol | Unit |Description ||:- |:- |:- || $E_\mathrm{f}$ | MPa | Young's modulus of reinforcement || $\bar{\tau}$ | MPa | Bond stress || $A_\mathrm{f}$ | mm$^2$ | Cross-sectional area of reinforcement || $p$ | mm | Perimeter of contact between concrete and reinforcement | **Observation** - The measured displament at the loaded and unloaded end are different - Their difference increases with increasing bond length $L_\mathrm{b}$ - The shape of the pull-out curve has a shape of a square root function **Question** - Can the above derived model describe the debonding process correctly? Look inside the specimen using the model The parameters of the above experiment are specified as follows
###Code
ds = 16
A_f = (ds/2)**2 * 3.14 # mm^2 - reinforcement area
L_b = 5 * ds # mm - bond length
E_f = 210000 # MPa - reinforcement stiffness
p_b = 3.14 * ds # mm - bond perimeter
w_max = 0.12 # mm - maximum displacement
###Output
_____no_output_____
###Markdown
**Construct the model:** To study the model behavior import the class `PO_ELF_RLM`, construct it with the defined parameters and run the `interact` method
###Code
%matplotlib widget
from pull_out import PO_ELF_RLM
po = PO_ELF_RLM(E_f=E_f, L_b=L_b, p=p_b, A_f=A_f, w_max=w_max)
###Output
_____no_output_____
###Markdown
**Remark:** that the length $L_b$ is not the end of the bond zone. It only measures the slip at the position $x = L_\mathrm{b}$ from the loadedend. However, the debonding process can continue beyond this length.
###Code
po.interact()
###Output
_____no_output_____ |
10A/Lista10A.ipynb | ###Markdown
1) Determine se os dados são qualitativos ou quantitativos. Explique seu raciocínio.- Alturas de balões de ar quente -> Quantitativa, por que podemos dizer quantos metros os balões podem chegar em média.- Capacidades de carga de caminhonetes -> Quantitativa, podemos calcular a quantidade de carga que o caminhão suporta.- Cores dos olhos de modelos -> Qualitativa. Estamos observando as cores dos olhos das modelos, portanto estamos dando uma qualidade.- Números de identidade de estudantes -> Quantitativa, estamos observando quantas identidades existem de estudantes.- Respostas em uma pesquisa de opinião. -> Quantitativa. Quantas respostas da pesquisa foram coletadas. 2) A)- Sexo: Qualitativa, porque estamos categorizando se é masculino ou feminino e é qualitativa nominal porque não podemos ordená-las quantitativamente.(**Qualitativa nominal**).- Predileta: Qualitativa, porque estamos categorizando nomes, nesse caso são as matérias elas são nominais, porque não podemos ordenar quantitativamente os nomes.(**Qualitativa nominal**).- Nota: **Quantitativa discreta**, pois, podemos enumerar os valores e podemos ordena-los.
###Code
#B)
#TABVELA
DFDOIS = pd.DataFrame({"SEXO": ['MASCULINO', 'FEMININO'],
"Frequência absoluta":[21, 21],
"Frequência relativa":[0.50,0.50]})
DFDOIS
DFTRES= pd.DataFrame({"Predileta":['Português','Matemática', 'História','Geografia','Ciências'],"frq abs Masculino":[3,6,4,5,3],"frq abs Feminino":[7,8,3,3,0]})
DFTRES
DFDOIS = pd.DataFrame({"NOTA": ['Português','Matemática', 'História','Geografia','Ciências'],
"Frequência absoluta":[21, 21],
"Frequência relativa":[0.50,0.50]})
DFDOIS
###Output
_____no_output_____
###Markdown
**QUESTÃO 3**- PAP; Quantitativa discreta, aqui temos valores finitos 1 ou 0, sendo assim possível enumerar.- GI; Qualitativa ordinal, podemos colocar em ordem por escolaridade, mas não podemos quantificar a diferença entre chefes.- RES: Quantitativa discreta, pois o conjunto é finito. Podemos dizer exatamente quantos membros existem na casa A ou na casa B.- RENDA Quantitativa contínua, pois temos valores expressos como número real, que pode ser somado. **QUESTÃO 3**
###Code
totalPAP = 40
frqNAO = 16/40
frqSIM = 24/40
df2 = pd.DataFrame({"PAP": ['1/SIM', '0/NÃO'],
"Frequência absoluta":[24, 16],
"Frequência relativa":[frqSIM,frqNAO]})
df2
gi1 = 6/40
gi2 = 11/40
gi3 = 23/40
df3 = pd.DataFrame({"GI": ['1 = nenhum grau oficialmente completo', ' 2 = primeiro grau completo',' 3 = segundo grau completo '],
"Frequência absoluta":[6, 11,23],
"Frequência relativa":[gi1,gi2,gi3]})
df3
n1=1/40
n2=3/40
n3=6/40
n4=13/40
n5=11/40
n6=4/40
n8=2/40
df4 = pd.DataFrame({"Número de pessoas redesentes na casa/RES":['1Pessoas','2Pessoas','3Pessoas','4Pessoas','5Pessoas','6Pessoas','8Pessoas'],"Frequência absoluta":[1,3,6,13,11,4,2],"Frequência relativa":[n1,n2,n3,n4,n5,n6,n8]})
df4
###Output
_____no_output_____ |
perceptron/non-separable.ipynb | ###Markdown
Linearly non-separable classesThe main limitation of perceptrons is that they only work with linearly separable classes.This is a variation of a previous exercise, with a slightly different dataset.
###Code
import numpy as np
import sklearn.linear_model
import matplotlib.pyplot as plt
from packages.plot import plot_decision_boundary, plot_data
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load in the dataAgain, there are two classes of dots (red and black), and each dot is defined by two features as before. So the structure of the dataset is exactly the same, consising of a matrix `x` with as many rows as dots, and two columns, and the vector `y` with as many elements as dots. The value of `y[i]` is 0 for red dots and 1 for black dots.In fact, the values in `x` are exactly the same as in the previous exercise, but you must define the values in `y` so that the data points become linealy non-separable.
###Code
x = np.array([[2,2],[1,3],[2,3],[5,3],[7,3],[2,4],[3,4],\
[6,4],[1,5],[2,5],[5,5],[4,6],[6,6],[5,7]])
y = np.array([0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1])
###Output
_____no_output_____
###Markdown
Plot the dataLet's represent graphically the data. Here you can see that the classes can't be separated by a single straight line.
###Code
plot_data(x, y)
plt.axis([0,8,0,8]);
###Output
_____no_output_____
###Markdown
Build the modelCreate a [perceptron object](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html).
###Code
net = sklearn.linear_model.Perceptron(n_iter=1, warm_start=True)
###Output
_____no_output_____
###Markdown
TrainRepeat the following cell (`Ctrl+Enter`) until the model converges (or you are tired).
###Code
net.fit(x,y)
print("Coefficient 0: %6.3f" % net.coef_[0,0])
print("Coefficient 1: %6.3f" % net.coef_[0,1])
print(" Bias: %6.3f" % net.intercept_)
plot_data(x, y)
plt.axis([0,8,0,8]);
plot_decision_boundary(net)
print(' Target: %s' % np.array_str(y))
print('Prediction: %s' % np.array_str(net.predict(x)))
###Output
_____no_output_____ |
Spectral_Methods_Project_Random_SVD.ipynb | ###Markdown
Spectral Methods in Data Processing (Fall 2019) - Final Project Random SVD and Random ID Results Reconstruction- Elad Eatah[](https://github.com/tterb/atomic-design-ui/blob/master/LICENSEs)Complete description of this project is available in [this repo](https://github.com/RedCrow9564/SpectralMethodsProject-RandomSVD.git). Getting StartedFirst run the nodes under "Infrastructure" one by one.Second, run the first two nodes under "Main Components". Running Unit-TestsYou can then run the nodes under "UnitTests" one by one. The results of the last node are the results of these Unit-Tests. Performing an experimentThis is done by running the "main" node. The number of the experiment to perform can be set in the form. It can be any integer between 1 and 5. Its results are then saved to a local directory in the remote machine. Make sure this directory exists on the remote machine. Infrastructure
###Code
#@title Dependencies installations
!pip install pandas nptyping sacred
from IPython.display import clear_output
import numpy as np
import warnings
import psutil
from multiprocessing import Pool
import pandas as pd
import os
import pyximport
import cython
from sacred import Experiment
from time import perf_counter
import cpuinfo
#warnings.filterwarnings("error") # Uncomment to see warnings as errors.
%load_ext Cython
# Defining the "sacred" experiment object.
ex = Experiment(name="Initializing project", interactive=True)
cpu_info = cpuinfo.get_cpu_info()
cpu_count: int = cpu_info['count']
clear_output()
print(cpu_info['brand'])
print(f'Total CPUs: {cpu_count}')
print("Installation is done!")
#@title Common Utils
# -*- coding: utf-8 -*-
"""
utils.py - The common utilities functions and objects
=====================================================
This module contains all frequently-used methods and objects which can be shared among the entire project.
For example, data types name used for type-hinting, a basic enum class :class:`BaseEnum`, methods for measuring
run-time of a given function.
"""
from typing import List, Dict, Callable, Union, Iterator
from nptyping import Array
import inspect
# Defining the "sacred" experiment object.
ex = Experiment(name="Initializing project", interactive=True)
pyximport.install(setup_args={"include_dirs": np.get_include()}, reload_support=True)
# Naming data types for type hinting.
Number = Union[int, float]
Scalar = Union[Number, Array[float, 1, 1], Array[int, 1, 1]]
RowVector = Union[List[Scalar], Array[float, 1, ...], Array[int, 1, ...], Scalar]
ColumnVector = Union[List[Scalar], Array[float, ..., 1], Array[int, ..., 1], Scalar]
Vector = Union[RowVector, ColumnVector]
Matrix = Union[List[Vector], Array[float], Array[int], Vector, Scalar]
class _MetaEnum(type):
"""
A private meta-class which given any :class:`BaseEnum` object to be an iterable.
This can be used for iterating all possible values of this enum. Should not be used explicitly.
"""
def __iter__(self) -> Iterator:
"""
This method gives any BaseEnum the ability of iterating over all the enum's values.
Returns:
An iterator for the collection of all the enum's values.
"""
# noinspection PyUnresolvedReferences
return self.enum_iter()
def __contains__(self, item) -> bool:
"""
This method give any BaseEnum the ability to test if a given item is a possible value for this enum class.
Returns:
A flag which indicates if 'item' is a possible value for this enum class.
"""
# noinspection PyUnresolvedReferences
return self.enum_contains(item)
class BaseEnum(metaclass=_MetaEnum):
"""
A basic interface for all enum classes. Should be sub-classed in eny enum, i.e ``class ExperimentType(BaseEnum)``
"""
@classmethod
def enum_iter(cls) -> Iterator:
"""
This method gives any BaseEnum the ability of iterating over all the enum's values.
Returns:
An iterator for the collection of all the enum's values.
"""
return iter(cls.get_all_values())
@classmethod
def enum_contains(cls, item) -> bool:
"""
This method give any BaseEnum the ability to test if a given item is a possible value for this enum class.
Returns:
A flag which indicates if 'item' is a possible value for this enum class.
"""
return item in cls.get_all_values()
@classmethod
def get_all_values(cls) -> List:
"""
A method which fetches all possible values of an enum. Used for iterating over an enum.
Returns:
A list of all possible enum's values.
"""
all_attributes: List = inspect.getmembers(cls, lambda a: not inspect.ismethod(a))
all_attributes = [value for name, value in all_attributes if not (name.startswith('__') or name.endswith('__'))]
return all_attributes
def create_factory(possibilities_dict: Dict[str, Callable], are_methods: bool = False) -> Callable:
"""
A generic method for creating factories for the entire project.
Args:
possibilities_dict(Dict[str, Callable]): The dictionary which maps object types (as strings!) and returns the
relevant class constructors.
are_methods(bool): A flag, true if the factory output are methods, rather than objects. Defaults to False
Returns:
The factory function for the given classes/methods mapping.
"""
def factory_func(requested_object_type: str): # Inner function!
if requested_object_type not in possibilities_dict:
raise ValueError("Object type {0} is NOT supported".format(requested_object_type))
else:
if are_methods:
return possibilities_dict[requested_object_type]
else:
return possibilities_dict[requested_object_type]()
return factory_func
def measure_time(method: Callable) -> Callable:
"""
A method which receives a method and returns the same method, while including run-time measure
output for the given method, in seconds.
Args:
method(Callable): A method whose run-time we are interested in measuring.
Returns:
A function which does exactly the same, with an additional run-time output value in seconds.
"""
def timed(*args, **kw):
ts = perf_counter()
result = method(*args, **kw)
te = perf_counter()
duration_in_ms: float = te - ts
if isinstance(result, tuple):
return result + (duration_in_ms,)
else:
return result, duration_in_ms
timed.__name__ = method.__name__ + " with time measure"
return timed
def is_empty(collection: List) -> bool:
return len(collection) == 0
class DataLog:
"""
A class for log-management objects. See the following example for creating it: ``DataLog(["Column 1", "Column 2"])``
"""
def __init__(self, log_fields: List):
"""
This methods initializes an empty log.
Args:
log_fields(List) - A list of column names for this log.
"""
self._data: Dict = dict()
self._log_fields: List = log_fields
for log_field in log_fields:
self._data[log_field] = list()
def append(self, data_type: str, value: Scalar) -> None:
"""
This methods appends given data to the given column inside the log.
Example of usage:``log.append(DataFields.DataSize, 20)``
Args:
data_type(LogFields): The column name in which the input data in inserted to.
value(Scalar): The value to insert to the log.
"""
self._data[data_type].append(value)
def append_dict(self, data_dict: Dict) -> None:
"""
This methods takes the data from the input dictionary and inserts it to this log.
Args:
data_dict(Dict): The dictionary from which new data is taken and inserted to the log.
"""
for log_field, data_value in data_dict.items():
self.append(log_field, data_value)
def save_log(self, log_file_name: str, results_folder_path: str) -> None:
"""
This method saves the log to a file, with the input name, in the input folder path.
Args:
log_file_name(str): The name for this log file.
results_folder_path(str): The path in which this log will be saved.
"""
df = pd.DataFrame(self._data, columns=self._log_fields)
df.to_csv(os.path.join(results_folder_path, log_file_name + ".csv"), sep=",", float_format="%.2E", index=False)
ex.info["Experiment Log"] = self._data
print("Utils loaded successfully!")
#@title Enums
# -*- coding: utf-8 -*-
"""
enums.py - All enums section
============================
This module contains all possible enums of this project. Most of them are used by the configuration section in
:mod:`main`. An example for using enum: ``ExperimentType.ExampleNo1``
"""
from typing import Iterator, List
class LogFields(BaseEnum):
"""
The enum class of fields within experiments logs. Possible values:
* ``LogFields.DataSize``
* ``LogFields.ApproximationRank``
* ``LogFields.Increment``
* ``LogFields.NextSingularValue``
* ``LogFields.RandomSVDDuration``
* ``LogFields.RandomIDDuration``
* ``LogFields.RandomSVDAccuracy``
* ``LogFields.RandomIDAccuracy``
"""
DataSize: str = "Data size"
ApproximationRank: str = "k"
Increment: str = "increment"
NextSingularValue: str = "K+1 singular value"
RandomSVDDuration: str = "Random SVD Duration in seconds"
RandomIDDuration: str = "Random ID Duration in seconds"
RandomSVDAccuracy: str = "Random SVD Accuracy"
RandomIDAccuracy: str = "Random ID Accuracy"
class ExperimentType(BaseEnum):
"""
The enum class of experiment types. Possible values:
* ``ExperimentType.ExampleNo1``
* ``ExperimentType.ExampleNo2``
* ``ExperimentType.ExampleNo3``
* ``ExperimentType.ExampleNo4``
* ``ExperimentType.ExampleNo5``
"""
ExampleNo1: str = "Example No. 1"
ExampleNo2: str = "Example No. 2"
ExampleNo3: str = "Example No. 3"
ExampleNo4: str = "Example No. 4"
ExampleNo5: str = "Example No. 5"
print("Enums loaded successfully!")
###Output
Enums loaded successfully!
###Markdown
Main Components
###Code
#@title Data Loading for Experiments
# -*- coding: utf-8 -*-
"""
data_loader.py - The data management module
===========================================
This module handles the fetching of the data from the local resources path, given in the configuration and arranging it
for our purposes of estimations. See the example for fetching the data for Example no. 1.
Example:
get_data(ExperimentType.ExampleNo1) - Creating the data for Example no. 1 of the paper.
"""
from scipy.linalg import qr
def _random_orthonormal_cols(data_size: int, columns: int) -> Matrix:
return np.ascontiguousarray(qr(np.random.randn(data_size, columns), mode="economic", overwrite_a=True,
check_finite=False)[0])
def _get_first_3_examples_data(data_size: int, singular_values: RowVector) -> Matrix:
"""
A method which creates a random matrix of size data_size x data_size with given singular values.
Args:
data_size(int): The input data size n.
singular_values(RowVector): The singular values to be set for the matrix to create.
Returns:
A random size data_size x data_size Matrix awith the given singular values.
"""
rank: int = len(singular_values)
U: Matrix = _random_orthonormal_cols(data_size, rank)
V: Matrix = _random_orthonormal_cols(data_size, rank)
return MatInSVDForm(U, singular_values, V)
def _get_example_4_data(data_size: int, singular_values: RowVector) -> Matrix:
"""
A method which creates a data_size x data_size matrix whose singular values are the input values.
Args:
data_size(int): The input data size n.
singular_values(RowVector): The singular values to be set for the matrix to create.
Returns:
A data_size x data_size Matrix with the given singular values.
"""
U: Matrix = np.ascontiguousarray(
np.stack([
np.ones(data_size),
np.tile([1, -1], data_size // 2),
np.tile([1, 1, -1, -1], data_size // 4),
np.tile([1, 1, 1, 1, -1, -1, -1, -1], data_size // 8)]).T) / np.sqrt(data_size)
V: Matrix = np.ascontiguousarray(
np.stack([
np.concatenate([np.ones(data_size - 1), [0]]) / np.sqrt(data_size - 1),
np.concatenate([np.zeros(data_size - 1), [1]]),
np.concatenate([np.tile([1, -1], (data_size - 2) // 2) / np.sqrt(data_size - 2), [0, 0]]),
np.concatenate([[1, 0, -1], np.zeros(data_size - 3)]) / np.sqrt(2)]).T)
return MatInSVDForm(U, np.array(singular_values), V)
def _get_example_5_data(data_size: int, singular_values: RowVector) -> Matrix:
"""
A method which creates a data_size x data_size matrix with singular values 1 and the other input singular values.
Args:
data_size(int): The input data size n.
singular_value(RowVector): A 1x2 vector of singular values for the created matrix.
Returns:
A random size data_size x data_size Matrix with singular values 1 and the other input singular value.
"""
return ExperimentNo5Form((data_size, data_size), singular_values[0])
# A private dictionary used to create the method "get_data"
_data_type_to_function: Dict[str, Callable] = {
ExperimentType.ExampleNo1: _get_first_3_examples_data,
ExperimentType.ExampleNo2: _get_first_3_examples_data,
ExperimentType.ExampleNo3: _get_first_3_examples_data,
ExperimentType.ExampleNo4: _get_example_4_data,
ExperimentType.ExampleNo5: _get_example_5_data
}
# The public method which fetches the data loading methods.
get_data: Callable = create_factory(_data_type_to_function, are_methods=True)
print("Data loading Methods loaded successfully!")
#@title Randomized Decomposition Algorithms
%%cython
# cython: language_level=3, boundscheck=False, wraparound=False
# cython: initializedcheck=False, cdivision=True, nonecheck=False
import numpy as np
cimport numpy as np
cimport cython
from libc.math cimport sqrt
from numpy.linalg import svd
from scipy.linalg.interpolative import interp_decomp
"""
randomized_decompositions.pyx -
================================
dffdgdg
"""
cdef inline double[:, ::1] mat_scalar_product(const double[:, ::1] mat, const double scalar):
cdef double[:, ::1] result = np.empty_like(mat)
cdef Py_ssize_t i, j
for i in range(result.shape[0]):
for j in range(result.shape[1]):
result[i, j] = scalar * mat[i, j]
return result
cdef inline void vector_scalar_div(double[::1] vec, const double scalar):
cdef Py_ssize_t i, j
for j in range(vec.shape[0]):
vec[j] /= scalar
cdef inline double[:, :] multiply_by_diagonal_mat(const double[:, :] mat, const double[::1] vec):
cdef ssize_t i,j
cdef double[:, ::1] result = mat.copy()
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
result[i, j] = mat[i, j] * vec[i]
return result
cdef class ExperimentNo5Form:
cdef double sigma
cdef readonly Py_ssize_t shape[2]
cdef readonly np.dtype dtype
cdef double[:, ::1] common_vector
def __init__(self, const (Py_ssize_t, Py_ssize_t) mat_shape, const double sigma):
self.shape[0] = mat_shape[0]
self.shape[1] = mat_shape[1]
self.sigma = sigma
self.dtype = np.dtype(np.double)
cdef inline double[:, ::1] dot(self, const double[:, ::1] other_vector):
cdef double[::1] column_sums = np.sum(other_vector, axis=0)
cdef double[:, ::1] result = mat_scalar_product(other_vector, self.sigma)
for j in range(other_vector.shape[1]):
result[0, j] += column_sums[j] / sqrt(<double>self.shape[1])
return result
cdef inline double[:, ::1] transpose_dot(self, const double[:, ::1] other_vector):
cdef double[:, ::1] result = mat_scalar_product(other_vector, self.sigma)
cdef double[::1] first_row = other_vector[0, :].copy()
vector_scalar_div(first_row, sqrt(<double>self.shape[1]))
return np.tile(first_row, (self.shape[0], 1)) + result
cdef inline double[:, ::1] left_dot(self, const double[:, ::1] other_vector):
cdef double[:, ::1] result = mat_scalar_product(other_vector, self.sigma)
cdef double[::1] first_col = other_vector[:, 0].copy()
vector_scalar_div(first_col, sqrt(<double>self.shape[1]))
return np.tile(np.reshape(first_col, (-1, 1)), (1, self.shape[1])) + result
cdef inline double[:, ::1] slice_columns(self, const int[::1] idx):
cdef double[:, ::1] result = np.zeros((self.shape[0], len(idx)), dtype=np.double)
cdef Py_ssize_t i
for i in range(len(idx)):
result[0, i] += 1 / sqrt(<double>self.shape[1])
result[idx[i], i] += self.sigma
return result
def as_numpy_arr(self):
cdef double[:, ::1] result = self.sigma * np.eye(self.shape[0], self.shape[1])
cdef Py_ssize_t j
for j in range(self.shape[1]):
result[0, j] += 1 / sqrt(<double>self.shape[1])
return result.base
def matmat(self, other):
return self.dot(other)
def matvec(self, other):
return self.dot(other[:, None])
def rmatvec(self, other):
return self.transpose_dot(other[:, None])
cdef class MatInSVDForm:
cdef readonly const double[:, ::1] U, V
cdef readonly const double[::1] sigma
cdef readonly (Py_ssize_t, Py_ssize_t) shape
cdef readonly np.dtype dtype
def __init__(self, const double[:, ::1] mat_U, const double[::1] sigmas, const double[:, ::1] mat_V):
self.U = mat_U
self.sigma = sigmas
self.V = mat_V
self.shape = (mat_U.shape[0], mat_V.shape[0])
self.dtype = np.dtype(np.double)
cdef inline double[:, ::1] dot(self, const double[:, ::1] other_vector):
return np.dot(self.U, multiply_by_diagonal_mat(np.dot(self.V.T, other_vector), self.sigma))
cdef inline double[:, ::1] transpose_dot(self, const double[:, ::1] other_vector):
return np.dot(self.V, multiply_by_diagonal_mat(np.dot(self.U.T, other_vector), self.sigma))
cdef inline double[:, ::1] left_dot(self, const double[:, ::1] other_vector):
return np.dot(np.dot(other_vector, self.U), multiply_by_diagonal_mat(self.V.T, self.sigma))
cdef inline double[:, ::1] slice_columns(self, const int[::1] idx):
return np.dot(self.U, multiply_by_diagonal_mat(self.V.base[idx, :].T, self.sigma))
def as_numpy_arr(self):
return np.array(np.dot(self.U, np.multiply(self.V, self.sigma).T))
def matmat(self, other):
return self.dot(other)
def matvec(self, other):
return self.dot(other[:, None])
def rmatvec(self, other):
return self.transpose_dot(other[:, None])
cdef class MatInIDForm:
cdef readonly const double[:, ::1] B
cdef readonly const double[::1, :] P
cdef readonly (Py_ssize_t, Py_ssize_t) shape
cdef readonly np.dtype dtype
def __init__(self, const double[:, ::1] mat_B, const double[::1, :] mat_P):
self.B = mat_B
self.P = mat_P
self.shape = (mat_B.shape[0], mat_P.shape[1])
self.dtype = np.dtype(np.double)
cdef inline double[:, ::1] dot(self, const double[:, ::1] other_vector):
return np.dot(self.B, np.dot(self.P, other_vector))
cdef inline double[:, ::1] transpose_dot(self, const double[:, ::1] other_vector):
return np.dot(self.P.T, np.dot(self.B.T, other_vector))
cdef inline double[:, ::1] left_dot(self, const double[:, ::1] other_vector):
return np.dot(np.dot(other_vector, self.B), self.P)
cdef inline double[:, ::1] slice_columns(self, const int[::1] idx):
return np.dot(self.B, self.P.base[idx, :].T)
def as_numpy_arr(self):
return np.array(np.dot(self.B, self.P))
def matmat(self, other):
return self.dot(other)
def matvec(self, other):
return self.dot(other[:, None])
def rmatvec(self, other):
return self.transpose_dot(other[:, None])
ctypedef fused GeneralMat:
MatInSVDForm
MatInIDForm
ExperimentNo5Form
def random_svd(GeneralMat A, const int k, const int increment):
"""
Args:
A(Matrix): ffff
k(int): Approximation rank.
increment(int): The extra sampled columns in the approximation (beyond the first ``k`` columns.
Returns:
Matrices :math:`U, V` and :math:`\sigma` for which the SVD approximation is :math:`U\sigmaV^{T}
"""
cdef Py_ssize_t m = A.shape[0]
cdef const double[::1] sigma
cdef const double[:, ::1] Q, U, H
Q = svd(A.transpose_dot(np.random.randn(m, k + increment)), full_matrices=False, compute_uv=True)[0]
Q = np.ascontiguousarray(Q[:, :k])
U, sigma, H = svd(A.dot(Q), full_matrices=False, compute_uv=True)
return MatInSVDForm(U, sigma, np.dot(Q, H.T))
def random_id(GeneralMat A, const int k, const int increment):
cdef Py_ssize_t m = A.shape[0]
cdef const int[::1] idx
cdef const double[::1, :] P, proj
cdef const double[:, ::1] B
idx, proj = interp_decomp(A.left_dot(np.random.randn(k + increment, m)).base, k, rand=False)[:2]
P = np.hstack([np.eye(k), proj])[:, np.argsort(idx)]
B = A.slice_columns(idx[:k])
return MatInIDForm(B, P)
print("Randommized Algorithms loaded successfully!")
#@title Main
experiment_number_from_one_to_five = 5 #@param {type:"integer"}
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
main.py - The main module of the project
========================================
This module contains the config for the experiment in the "config" function.
Running this module invokes the :func:`main` function, which then performs the experiment and saves its results
to the configured results folder. Example for running an experiment: ``python -m main.py``
"""
from scipy.linalg.interpolative import estimate_spectral_norm_diff, seed
def choose_singular_values(experiment_type: ExperimentType) -> RowVector:
"""
This function sets the needed singular values, according to the given experiment_type
Args:
experiment_type(ExperimentType): The performed experiment. For example, ``ExperimentType.ExampleNo1``.
Returns:
A RowVector of the required singular values.
"""
if experiment_type == ExperimentType.ExampleNo1:
return np.concatenate([np.flip(np.geomspace(0.2e-15, 1, num=10)), 0.2e-15 * np.ones(10)])
elif experiment_type == ExperimentType.ExampleNo2:
return np.concatenate([np.flip(np.geomspace(1e-8, 1, num=10)), 1e-8 * np.ones(10)])
elif experiment_type == ExperimentType.ExampleNo3:
return np.concatenate([np.flip(np.geomspace(1e-9, 1, num=30)), 1e-9 * np.ones(30)])
elif experiment_type == ExperimentType.ExampleNo4:
return [1, 1, 1e-8, 1e-8]
elif experiment_type == ExperimentType.ExampleNo5:
return [1e-7]
return [-1]
def choose_increments(experiment_type: ExperimentType) -> List:
"""
This function sets the needed increments, according to the given experiment_type
Args:
experiment_type(ExperimentType): The performed experiment. For example, ``ExperimentType.ExampleNo1``.
Returns:
A list of the required increments.
"""
if experiment_type in [ExperimentType.ExampleNo1, ExperimentType.ExampleNo2,
ExperimentType.ExampleNo4, ExperimentType.ExampleNo5]:
return [0]
elif experiment_type == ExperimentType.ExampleNo3:
return [0, 1] + np.round(np.geomspace(2, 16, 4)).astype(int).tolist()
return [-1]
def choose_approximation_ranks(experiment_type: ExperimentType) -> List:
"""
This function sets the needed approximation ranks, according to the given experiment_type
Args:
experiment_type(ExperimentType): The performed experiment. For example, ``ExperimentType.ExampleNo1``.
Returns:
A list of the required approximation ranks.
"""
if experiment_type in [ExperimentType.ExampleNo1, ExperimentType.ExampleNo2, ExperimentType.ExampleNo5]:
return [10]
elif experiment_type == ExperimentType.ExampleNo3:
return [30]
elif experiment_type == ExperimentType.ExampleNo4:
return [2]
return [-1]
def choose_data_sizes(experiment_type: ExperimentType) -> List:
"""
This function sets the needed data sizes, according to the given experiment_type
Args:
experiment_type(ExperimentType): The performed experiment. For example, ``ExperimentType.ExampleNo1``.
Returns:
A list of the required data sizes.
"""
if experiment_type in [ExperimentType.ExampleNo1, ExperimentType.ExampleNo2, ExperimentType.ExampleNo5]:
return np.geomspace(1e+2, 1e+6, 5, dtype=int).tolist()
elif experiment_type == ExperimentType.ExampleNo3:
return [int(1e+5)]
elif experiment_type == ExperimentType.ExampleNo4:
return (4 * np.geomspace(1e+2, 1e+6, 5, dtype=int)).tolist()
return [-1]
@ex.config
def config():
""" Config section
This function contains all possible configuration for all experiments. Full details on each configuration values
can be found in :mod:`enums.py`.
"""
experiment_type: str = f'Example No. {experiment_number_from_one_to_five}'
singular_values: RowVector = choose_singular_values(experiment_type)
used_data_factory: Callable = get_data(experiment_type)
data_sizes: List = choose_data_sizes(experiment_type)
approximation_ranks: List = choose_approximation_ranks(experiment_type)
increments: List = choose_increments(experiment_type)
results_path: str = r'Results/'
power_method_iterations: int = 100
@ex.main
def main(data_sizes: List, approximation_ranks: List, increments: List,
singular_values: RowVector, used_data_factory: Callable,
results_path: str, experiment_type: str, power_method_iterations: int,
_seed) -> DataLog:
""" The main function of this project
This functions performs the desired experiment according to the given configuration.
The function runs the random_svd and random_id for every combination of data_size, approximation rank and increment
given in the config and saves all the results to a csv file in the results folder (given in the configuration).
"""
seed(_seed)
results_log = DataLog(LogFields) # Initializing an empty results log.
random_svd_with_run_time: Callable = measure_time(random_svd)
random_id_with_run_time: Callable = measure_time(random_id)
for data_size in data_sizes:
data_matrix: Matrix = used_data_factory(data_size, singular_values)
for approximation_rank in approximation_ranks:
next_singular_value: Scalar = singular_values[approximation_rank + 1] if \
approximation_rank < len(singular_values) else singular_values[-1]
for increment in increments:
# Executing all the tested methods.
random_svd_approximation, svd_duration = random_svd_with_run_time(data_matrix, approximation_rank, increment)
random_svd_accuracy: Scalar = estimate_spectral_norm_diff(data_matrix, random_svd_approximation,
power_method_iterations)
random_id_approximation, id_duration = random_id_with_run_time(data_matrix, approximation_rank, increment)
random_id_accuracy: Scalar = estimate_spectral_norm_diff(data_matrix, random_id_approximation,
power_method_iterations)
# Appending all the experiment results to the log.
results_log.append(LogFields.DataSize, data_size)
results_log.append(LogFields.ApproximationRank, approximation_rank)
results_log.append(LogFields.Increment, increment + approximation_rank)
results_log.append(LogFields.NextSingularValue, next_singular_value)
results_log.append(LogFields.RandomSVDAccuracy, random_svd_accuracy)
results_log.append(LogFields.RandomIDAccuracy, random_id_accuracy)
results_log.append(LogFields.RandomSVDDuration, svd_duration)
results_log.append(LogFields.RandomIDDuration, id_duration)
results_log.save_log(experiment_type + " results", results_folder_path=results_path)
print(f'{experiment_type} was performed and results were saved!')
return results_log
pd.DataFrame(ex.run().result._data)
###Output
WARNING - Initializing project - No observers have been added to this run
INFO - Initializing project - Running command 'main'
INFO - Initializing project - Started
INFO - Initializing project - Result: <__main__.DataLog object at 0x7f6b0ec57630>
INFO - Initializing project - Completed after 0:00:23
###Markdown
Unit-Tests
###Code
#@title Test data creation methods
# -*- coding: utf-8 -*-
"""
test_data_creation.py - tests for data creation methods
=======================================================
This module contains the tests for the data creation in all the examples.
"""
import unittest
from scipy.linalg import svdvals
class TestDataCreation(unittest.TestCase):
"""
A class which contains tests for the validity of the created data in all the examples
"""
def test_example_no_1_data(self):
"""
Test data creation for Example no. 1
This test validates the data created is ``data_size x data_size``, has rank 20
and posses the expected singular values.
"""
experiment_type: str = ExperimentType.ExampleNo1
data_size: int = 70
singular_values: RowVector = choose_singular_values(experiment_type)
rank: int = len(singular_values)
data: Matrix = get_data(experiment_type)(data_size, singular_values).as_numpy_arr()
calculated_singular_values: RowVector = svdvals(data, check_finite=False)[:rank]
self.assertTrue(np.allclose(data.shape, (data_size, data_size))) # Validate data shape.
self.assertEqual(np.linalg.matrix_rank(data, tol=1.7e-16), rank) # Validate data rank.
self.assertTrue(np.allclose(singular_values, calculated_singular_values)) # Validate singular values.
def test_example_no_2_data(self):
"""
Test data creation for Example no. 2
This test validates the data created is ``data_size x data_size``, has rank 20
and posses the expected singular values.
"""
experiment_type: str = ExperimentType.ExampleNo2
data_size: int = 70
singular_values: RowVector = choose_singular_values(experiment_type)
rank: int = len(singular_values)
data: Matrix = get_data(experiment_type)(data_size, singular_values).as_numpy_arr()
calculated_singular_values: RowVector = svdvals(data, check_finite=False)[:rank]
self.assertTrue(np.allclose(data.shape, (data_size, data_size))) # Validate data shape.
self.assertEqual(np.linalg.matrix_rank(data, tol=singular_values[rank - 1] / 2), rank) # Validate data rank.
self.assertTrue(np.allclose(singular_values, calculated_singular_values)) # Validate singular values.
def test_example_no_3_data(self):
"""
Test data creation for Example no. 3
This test validates the data created is ``data_size x data_size``, has rank 60
and posses the expected singular values.
"""
experiment_type: str = ExperimentType.ExampleNo3
data_size: int = 70
singular_values: RowVector = choose_singular_values(experiment_type)
rank: int = len(singular_values)
data: Matrix = get_data(experiment_type)(data_size, singular_values).as_numpy_arr()
calculated_singular_values: RowVector = svdvals(data, check_finite=False)[:rank]
self.assertTrue(np.allclose(data.shape, (data_size, data_size))) # Validate data shape.
self.assertEqual(np.linalg.matrix_rank(data, tol=singular_values[rank - 1] / 2), rank) # Validate data rank.
self.assertTrue(np.allclose(singular_values, calculated_singular_values)) # Validate singular values.
def test_example_no_4_data(self):
"""
Test data creation for Example no. 4
This test validates the data created is ``data_size x data_size`` and posses the expected singular values.
"""
experiment_type: str = ExperimentType.ExampleNo4
data_size: int = 80
singular_values: RowVector = choose_singular_values(experiment_type)
known_singular_values_num: int = len(singular_values)
data: Matrix = get_data(experiment_type)(data_size, singular_values).as_numpy_arr()
calculated_singular_values: RowVector = svdvals(data, check_finite=False)[:known_singular_values_num]
self.assertTrue(np.allclose(data.shape, (data_size, data_size))) # Validate data shape.
self.assertTrue(np.allclose(singular_values, calculated_singular_values)) # Validate singular values.
def test_example_no_5_data(self):
"""
Test data creation for Example no. 5
This test validates the data created is ``data_size x data_size`` and posses the expected singular value
:math:`10^{-17}` as the second largest singular value with multiplicity of at least ``data_size - 2``.
"""
experiment_type: str = ExperimentType.ExampleNo5
data_size: int = 70
singular_values: RowVector = choose_singular_values(experiment_type)
known_singular_values_num: int = data_size - 2
data: Matrix = get_data(experiment_type)(data_size, singular_values).as_numpy_arr()
calculated_singular_values: RowVector = svdvals(data, check_finite=False)[1:known_singular_values_num + 1]
known_singular_values: RowVector = singular_values[0] * np.ones(known_singular_values_num)
self.assertTrue(np.allclose(data.shape, (data_size, data_size))) # Validate data shape.
self.assertTrue(np.allclose(known_singular_values, calculated_singular_values)) # Validate singular values.
print("Loaded Unit-Tests for data creation successfully!")
#@title Test Randomized Algorithms
# -*- coding: utf-8 -*-
"""
test_random_id.py - tests for Randomized Interpolative Decomposition
====================================================================
This module contains the tests for the implementation of randomized ID algorithm.
"""
from numpy.random import randn as _randn
pyximport.install(setup_args={"include_dirs": np.get_include()}, reload_support=True)
class TestRandomID(unittest.TestCase):
"""
A class which contains tests for the validity of the random_id algorithm implementation.
"""
def setUp(self):
"""
This method sets the variables for the following tests.
"""
self._m = 100
self._n = 30
self._k = 5
self._increment = 20
self._A = get_data(ExperimentType.ExampleNo2)(self._m, np.arange(2 * self._k).astype(float))
self._approximation = random_id(self._A, self._k, self._increment)
self._B = self._approximation.B
self._P = self._approximation.P.base
self._A = self._A.as_numpy_arr()
self._n = self._A.shape[1]
self._approximation = self._approximation.as_numpy_arr()
def test_matrices_shapes(self):
"""
This methods tests the shapes of the matrices :math:`B` and :math:`P` in the decomposition.
"""
self.assertTrue(self._B.shape, (self._m, self._k))
self.assertTrue(self._P.shape, (self._k, self._n))
def test_interpolative_decomposition(self):
"""
This methods tests if the decomposition satisfies the properties of interpolative-decomposition.
"""
self.assertTrue(np.all(self._P <= 2)) # Validate entries of P are between -1 and 2.
self.assertTrue(np.all(self._P >= -2))
# Validate P's norm is bound by the theoretical bound
self.assertLessEqual(np.linalg.norm(self._P), np.sqrt(self._k * (self._n - self._k) + 1))
self.assertGreaterEqual(svdvals(self._P)[-1], 1) # Validate the least singular value of P is at least 1.
for unit_vector in np.eye(self._k): # Validate P has kxk identity matrix as a sub-matrix.
self.assertIn(unit_vector, self._P.T)
for col in self._B.T: # Validate every column of B is also a column of A.
self.assertIn(col, self._A.T)
def test_approximation_estimate(self):
"""
This methods tests if the random ID satisfies the theoretical bound. There is a probability
of less then :math:`10^{-17}` this bound won't be satisfied...
"""
real_sigmas = np.linalg.svd(self._A, full_matrices=False, compute_uv=False)
estimate_error = np.linalg.norm(self._A - self._approximation)
expected_bound = 10 * np.sqrt(self._n * (self._k + self._increment) * self._m * self._k)
expected_bound *= real_sigmas[self._k]
self.assertLessEqual(estimate_error, expected_bound)
print("Loaded Unit-Tests for Random ID successfully!")
#@title Test Randomized SVD
# -*- coding: utf-8 -*-
"""
test_random_svd.py - tests for Randomized Singular-Value Decomposition
======================================================================
This module contains the tests for the implementation of randomized SVD algorithm.
"""
pyximport.install(setup_args={"include_dirs": np.get_include()}, reload_support=True)
class TestRandomSVD(unittest.TestCase):
"""
A class which contains tests for the validity of the random_svd algorithm implementation.
"""
def setUp(self):
"""
This method sets the variables for the following tests.
"""
self._m = 100
self._n = 30
self._k = 5
self._increment = 20
self._A = get_data(ExperimentType.ExampleNo2)(self._m, np.arange(2 * self._k).astype(float))
self._approximation = random_svd(self._A, self._k, self._increment)
self._U = self._approximation.U
self._sigma = self._approximation.sigma
self._VT = self._approximation.V.T
self._approximation = self._approximation.as_numpy_arr()
self._A = self._A.as_numpy_arr()
def test_matrices_shapes(self):
"""
This methods tests the shapes of the matrices :math:`U` and :math:`V` in the decomposition.
"""
self.assertTrue(self._U.shape, (self._m, self._k))
self.assertTrue(self._VT.shape, (self._k, self._n))
def test_matrices_svd_decomposition(self):
"""
This methods tests if the output decomposition satisfies the properties of SVD decomposition.
"""
self.assertTrue(np.allclose(np.dot(self._U.T, self._U), np.eye(self._k)))
self.assertTrue(np.allclose(np.dot(self._VT, self._VT.T), np.eye(self._k)))
self.assertTrue(np.all(self._sigma.base > 0))
def test_decomposition_rank(self):
"""
This methods tests if the number of positive singular values is equal to the approximation rank.
"""
self.assertEqual(len(self._sigma), self._k)
def test_approximation_estimate(self):
"""
This methods tests if the random SVD satisfies the theoretical bound. There is a probability
of less then :math:`10^{-17}` this bound won't be satisfied...
"""
real_sigmas = np.linalg.svd(self._A, full_matrices=False, compute_uv=False)
estimate_error = np.linalg.norm(self._A - self._approximation)
expected_bound = 10 * np.sqrt(self._n * (self._k + self._increment))
expected_bound *= real_sigmas[self._k]
self.assertLessEqual(estimate_error, expected_bound)
print("Loaded Unit-Tests for Random SVD successfully!")
#@title Running all these Unit-Tests
unittest.main(argv=[''], verbosity=2, exit=False)
###Output
_____no_output_____ |
3. Numpy, Pandas, Mathplotlib/NumPy/accessing-deleting-and-inserting-elements-into-ndarrays.ipynb | ###Markdown
Example 1. Access individual elements of 1-D array
###Code
import numpy as np
# We create a rank 1 ndarray that contains integers from 1 to 5
x = np.array([1, 2, 3, 4, 5])
# We print x
print()
print('x = ', x)
print()
# Let's access some elements with positive indices
print('This is First Element in x:', x[0])
print('This is Second Element in x:', x[1])
print('This is Fifth (Last) Element in x:', x[4])
print()
# Let's access the same elements with negative indices
print('This is First Element in x:', x[-5])
print('This is Second Element in x:', x[-4])
print('This is Fifth (Last) Element in x:', x[-1])
###Output
_____no_output_____
###Markdown
Example 2. Modify an element of 1-D array
###Code
# We create a rank 1 ndarray that contains integers from 1 to 5
x = np.array([1, 2, 3, 4, 5])
# We print the original x
print()
print('Original:\n x = ', x)
print()
# We change the fourth element in x from 4 to 20
x[3] = 20
# We print x after it was modified
print('Modified:\n x = ', x)
###Output
_____no_output_____
###Markdown
Example 3. Access individual elements of 2-D array
###Code
# We create a 3 x 3 rank 2 ndarray that contains integers from 1 to 9
X = np.array([[1,2,3],[4,5,6],[7,8,9]])
# We print X
print()
print('X = \n', X)
print()
# Let's access some elements in X
print('This is (0,0) Element in X:', X[0,0])
print('This is (0,1) Element in X:', X[0,1])
print('This is (2,2) Element in X:', X[2,2])
###Output
_____no_output_____
###Markdown
Example 4. Modify an element of 2-D array
###Code
# We create a 3 x 3 rank 2 ndarray that contains integers from 1 to 9
X = np.array([[1,2,3],[4,5,6],[7,8,9]])
# We print the original x
print()
print('Original:\n X = \n', X)
print()
# We change the (0,0) element in X from 1 to 20
X[0,0] = 20
# We print X after it was modified
print('Modified:\n X = \n', X)
###Output
_____no_output_____
###Markdown
Example 5. Delete elements
###Code
# We create a rank 1 ndarray
x = np.array([1, 2, 3, 4, 5])
# We create a rank 2 ndarray
Y = np.array([[1,2,3],[4,5,6],[7,8,9]])
# We print x
print()
print('Original x = ', x)
# We delete the first and last element of x
x = np.delete(x, [0,4])
# We print x with the first and last element deleted
print()
print('Modified x = ', x)
# We print Y
print()
print('Original Y = \n', Y)
# We delete the first row of y
w = np.delete(Y, 0, axis=0)
# We delete the first and last column of y
v = np.delete(Y, [0,2], axis=1)
# We print w
print()
print('w = \n', w)
# We print v
print()
print('v = \n', v)
###Output
_____no_output_____
###Markdown
Example 6. Append elements
###Code
# We create a rank 1 ndarray
x = np.array([1, 2, 3, 4, 5])
# We create a rank 2 ndarray
Y = np.array([[1,2,3],[4,5,6]])
# We print x
print()
print('Original x = ', x)
# We append the integer 6 to x
x = np.append(x, 6)
# We print x
print()
print('x = ', x)
# We append the integer 7 and 8 to x
x = np.append(x, [7,8])
# We print x
print()
print('x = ', x)
# We print Y
print()
print('Original Y = \n', Y)
# We append a new row containing 7,8,9 to y
v = np.append(Y, [[7,8,9]], axis=0)
# We append a new column containing 9 and 10 to y
q = np.append(Y,[[9],[10]], axis=1)
# We print v
print()
print('v = \n', v)
# We print q
print()
print('q = \n', q)
###Output
_____no_output_____
###Markdown
Example 7. Insert elements
###Code
# We create a rank 1 ndarray
x = np.array([1, 2, 5, 6, 7])
# We create a rank 2 ndarray
Y = np.array([[1,2,3],[7,8,9]])
# We print x
print()
print('Original x = ', x)
# We insert the integer 3 and 4 between 2 and 5 in x.
x = np.insert(x,2,[3,4])
# We print x with the inserted elements
print()
print('x = ', x)
# We print Y
print()
print('Original Y = \n', Y)
# We insert a row between the first and last row of y
w = np.insert(Y,1,[4,5,6],axis=0)
# We insert a column full of 5s between the first and second column of y
v = np.insert(Y,1,5, axis=1)
# We print w
print()
print('w = \n', w)
# We print v
print()
print('v = \n', v)
###Output
_____no_output_____
###Markdown
Example 8. Stack arrays
###Code
# We create a rank 1 ndarray
x = np.array([1,2])
# We create a rank 2 ndarray
Y = np.array([[3,4],[5,6]])
# We print x
print()
print('x = ', x)
# We print Y
print()
print('Y = \n', Y)
# We stack x on top of Y
z = np.vstack((x,Y))
# We stack x on the right of Y. We need to reshape x in order to stack it on the right of Y.
w = np.hstack((Y,x.reshape(2,1)))
# We print z
print()
print('z = \n', z)
# We print w
print()
print('w = \n', w)
###Output
_____no_output_____ |
jupyter_notebooks/rebound_inner_solar_system_plus_eccentric.ipynb | ###Markdown
Rebound Inner Solar System plus Eccentric Planet Simulation
###Code
import rebound
import numpy as np
from datetime import date,datetime
from IPython.display import Image
# get the file path of this Jupyter notebook
import ipynb_path # pip install ipynb-path
import os
path = os.path.dirname(ipynb_path.get())
# display notebook path
path
# paths of simulation directories relative to notebook path
sims_dir = '/sims'
sim_dir = '/sims/sim-01'
# create simulation directories if they do not exist
if not os.path.exists(path + sims_dir):
os.mkdir(path + sims_dir)
if not os.path.exists(path + sim_dir):
os.mkdir(path + sim_dir)
# disylay simulation directory path
path + sim_dir
# setup SIM or use an existing setup
# set to True if you want to create an updated simulation setup snapshot
force_new_snapshot = False
if os.path.exists(path + sim_dir + '/' + 'snapshot.bin'):
sim = rebound.Simulation(path + sim_dir + '/' + 'snapshot.bin')
print('existing simulation snapshot loaded')
elif not os.path.exists(path + sim_dir + '/' + 'snapshot.bin') or force_new_snapshot:
# Create simulation
sim = rebound.Simulation()
# Set initial value of G
sim.G = 6.6743e-11 # m^3 / kg s^2 for time (s), mass (kg), distance (m)
# Set simulation units. G will be automatically adjusted to match these units
sim.units = ['AU','yr','Msun']
# Add the Sun from JPL Horizons. This will take a few moments.
sim.add('Sun')
# Add Mercury from JPL Horizons
sim.add('Mercury')
# Add Venus from JPL Horizons
sim.add('Venus')
# Add Earth from JPL Horizons
sim.add('Earth')
# Add Mars from JPL Horizons
sim.add('Mars')
# Add our Eccentric orbit planet
sim.add(a=1.5, e=0.4)
# Save snapshot of simulation
sim.save(path + sim_dir + '/' + 'snapshot.bin')
print('new simulation created and saved')
# Current Simulation Time. Should be 0.0 years
sim.t
# Get the current time. This will be the simulation start time.
from astropy.time import Time
nt = Time.now()
nt.format = 'decimalyear'
time_now = nt.value
time_now
import imageio
import os
filenames = []
from IPython.display import display, clear_output
import matplotlib.pyplot as plt
# Move to the centre of mass of the system
sim.move_to_com()
# Simulation
for i in range(60):
# Timestep
sim.integrate(sim.t+0.03)
# Plot orbits. Use the 'fancy' option with stars and a glowing star.
fig, ax1, ax2, ax3 = rebound.OrbitPlot(sim, figsize=(15,13), slices=0.5, unitlabel='[AU]', color = ['lightsteelblue', 'orange', 'dodgerblue', 'chocolate', 'limegreen'], lw=2, fancy=True, xlim=[-2.6,2.6], ylim=[-2.6,2.6])
# Calculate current time in years and convert to 'YYYY, DD, Abreviated Month' format
# See here for options: https://docs.python.org/3.7/library/datetime.html#strftime-strptime-behavior
current_time = Time(str(time_now + round(sim.t,2)), format='decimalyear')
current_time_dt = datetime.strptime(current_time.to_value('iso', subfmt='date'), "%Y-%m-%d")
# Display current time
ax2.text(-2.1, 0.6, current_time_dt.strftime('%Y, %d %b'), color='white', fontsize=13)
display(fig)
# Create file name and append it to a list
filename = f'{i}.png'
filenames.append(path + sim_dir + '/' + filename)
# Save frame to directory
plt.savefig(path + sim_dir + '/' + filename, dpi=96)
plt.close()
clear_output(wait=True)
# Build GIF
with imageio.get_writer(path + sim_dir + '/' + 'rebound-inner-solar-system.gif', mode='I') as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
# Remove temporary files containing animation frame images
for filename in set(filenames):
os.remove(filename)
# Display completed GIF Animation
# with open(path + sim_dir + '/' + 'rebound-inner-solar-system.gif','rb') as file:
# display(Image(file.read()))
# Omitted: This adds GIF to notebook making the file much larger.
###Output
_____no_output_____ |
6_DQN_LunarLander.ipynb | ###Markdown
Deep Q-Network with Lunar LanderThis notebook shows an implementation of a DQN on the LunarLander environment.Details on the environment can be found [here](https://gym.openai.com/envs/LunarLander-v2/).Note: The following code is heavily inspired by [this]( https://www.katnoria.com/nb_dqn_lunar/) blog post. 1. SetupWe first need to install some dependencies for using the environment:
###Code
!pip3 install box2d-py
import random
import sys
from time import time
from collections import deque, defaultdict, namedtuple
import numpy as np
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
env = gym.make('LunarLander-v2')
env.seed(0)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device
###Output
_____no_output_____
###Markdown
2. Define the neural network, the replay buffer and the agent First, we define the neural network that predicts the Q-values for all actions, given a state as input.This is a fully-connected neural net with two hidden layers using Relu activations.The last layer does not have any activation and outputs a Q-value for every action.
###Code
class QNetwork(nn.Module):
def __init__(self, state_size, action_size, seed):
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 32)
self.fc2 = nn.Linear(32, 64)
self.fc3 = nn.Linear(64, action_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
###Output
_____no_output_____
###Markdown
Next, we define a replay buffer that saves previous transitions (so-called `experiences`) and provides a `sample` function to randomly extract a batch of experiences from the buffer.Note that experiences are internally saved as `numpy`-arrays. They are converted back to PyTorch tensors before being returned by the `sample`-method.
###Code
class ReplayBuffer:
def __init__(self, buffer_size, batch_size, seed):
self.batch_size = batch_size
self.seed = random.seed(seed)
self.memory = deque(maxlen=buffer_size) # maximum size of buffer
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
experience = self.experience(state, action, reward, next_state, done)
self.memory.append(experience)
def sample(self):
experiences = random.sample(self.memory, self.batch_size)
# Convert to PyTorch tensors
states = np.vstack([experience.state for experience in experiences if experience is not None])
states_tensor = torch.from_numpy(states).float().to(device)
actions = np.vstack([experience.action for experience in experiences if experience is not None])
actions_tensor = torch.from_numpy(actions).long().to(device)
rewards = np.vstack([experience.reward for experience in experiences if experience is not None])
rewards_tensor = torch.from_numpy(rewards).float().to(device)
next_states = np.vstack([experience.next_state for experience in experiences if experience is not None])
next_states_tensor = torch.from_numpy(next_states).float().to(device)
# Convert done flag from boolean to int
dones = np.vstack([experience.done for experience in experiences if experience is not None]).astype(np.uint8)
dones_tensor = torch.from_numpy(dones).float().to(device)
return (states_tensor, actions_tensor, rewards_tensor, next_states_tensor, dones_tensor)
def __len__(self):
return len(self.memory)
BUFFER_SIZE = int(1e5) # Replay memory size
BATCH_SIZE = 64 # Number of experiences to sample from memory
GAMMA = 0.99 # Discount factor
TAU = 1e-3 # Soft update parameter for updating fixed q network
LR = 1e-4 # Q Network learning rate
UPDATE_EVERY = 4 # How often to update Q network
class DQNAgent:
def __init__(self, state_size, action_size, seed):
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Initialize Q and Fixed Q networks
self.q_network = QNetwork(state_size, action_size, seed).to(device)
self.fixed_network = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.q_network.parameters())
# Initiliase memory
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, seed)
self.timestep = 0
def step(self, state, action, reward, next_state, done):
self.memory.add(state, action, reward, next_state, done)
self.timestep += 1
# trigger training
if self.timestep % UPDATE_EVERY == 0:
if len(self.memory) > BATCH_SIZE: # only when buffer is filled
sampled_experiences = self.memory.sample()
self.learn(sampled_experiences)
def learn(self, experiences):
states, actions, rewards, next_states, dones = experiences
action_values = self.fixed_network(next_states).detach()
max_action_values = action_values.max(1)[0].unsqueeze(1)
# If "done" just use reward, else update Q_target with discounted action values
Q_target = rewards + (GAMMA * max_action_values * (1 - dones))
Q_expected = self.q_network(states).gather(1, actions)
# Calculate loss and update weights
loss = F.mse_loss(Q_expected, Q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update fixed weights
self.update_fixed_network(self.q_network, self.fixed_network)
def update_fixed_network(self, q_network, fixed_network):
for source_parameters, target_parameters in zip(q_network.parameters(), fixed_network.parameters()):
target_parameters.data.copy_(TAU * source_parameters.data + (1.0 - TAU) * target_parameters.data)
def act(self, state, eps=0.0):
rnd = random.random()
if rnd < eps:
return np.random.randint(self.action_size)
else:
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
action_values = self.q_network(state)
action = np.argmax(action_values.cpu().data.numpy())
return action
###Output
_____no_output_____
###Markdown
3. Executes episodes and train the model We first define some paramters which are guiding the training process:
###Code
MAX_EPISODES = 2000 # Max number of episodes to play
MAX_STEPS = 1000 # Max steps allowed in a single episode/play
# Epsilon schedule
EPS_START = 1.0 # Default/starting value of eps
EPS_DECAY = 0.999 # Epsilon decay rate
EPS_MIN = 0.01 # Minimum epsilon
###Output
_____no_output_____
###Markdown
Then we start executing episodes and observe the mean score per episode.The environment is considered as solved if this score is above 200.
###Code
# Get state and action sizes
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
print('State size: {}, action size: {}'.format(state_size, action_size))
dqn_agent = DQNAgent(state_size, action_size, seed=0)
start = time()
# Maintain a list of last 100 scores
scores_window = deque(maxlen=100)
eps = EPS_START
for episode in range(1, MAX_EPISODES + 1 ):
state = env.reset()
score = 0
for t in range(MAX_STEPS):
action = dqn_agent.act(state, eps)
next_state, reward, done, info = env.step(action)
dqn_agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
eps = max(eps * EPS_DECAY, EPS_MIN)
scores_window.append(score)
if episode % 99 == 0:
mean_score = np.mean(scores_window)
print('Progress {}/{}, average score:{:.2f}'.format(episode, MAX_EPISODES, mean_score))
mean_score = np.mean(scores_window)
if mean_score >= 200:
print('\rEnvironment solved in {} episodes, average score: {:.2f}'.format(episode, mean_score))
sys.stdout.flush()
break
end = time()
print('Took {} seconds'.format(end - start))
###Output
State size: 8, action size: 4
Progress 99/2000, average score:-224.36
Progress 198/2000, average score:-64.88
Progress 297/2000, average score:-40.87
Progress 396/2000, average score:66.95
Progress 495/2000, average score:135.91
Progress 594/2000, average score:161.32
Progress 693/2000, average score:184.81
Progress 792/2000, average score:159.74
Progress 891/2000, average score:188.38
Progress 990/2000, average score:176.10
###Markdown
4. Play epsiode and record it Use the trained model to play and record one episode. The recorded video will be stored into the `video`-subfolder on disk.
###Code
import time
FPS = 25
record_folder="video"
env = gym.make('LunarLander-v2')
env = gym.wrappers.Monitor(env, record_folder, force=True)
state = env.reset()
total_reward = 0.0
while True:
start_ts = time.time()
env.render()
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
action_values = dqn_agent.q_network(state)
action = np.argmax(action_values.cpu().data.numpy())
state, reward, done, _ = env.step(action)
total_reward += reward
if done:
break
delta = 1/FPS - (time.time() - start_ts)
if delta > 0:
time.sleep(delta)
print("Total reward: %.2f" % total_reward)
env.close()
###Output
_____no_output_____ |
Big-Data-Clusters/CU8/Public/content/repair/tsg050-timeout-expired-waiting-for-volumes.ipynb | ###Markdown
TSG050 - Cluster create hangs with “timeout expired waiting for volumes to attach or mount for pod”===================================================================================================Description-----------The controller gets stuck during the `bdc create` create process.> Events: Type Reason Age From Message —- —— —- —- ——- Warning> FailedScheduling 12m (x7 over 12m) default-scheduler pod has unbound> immediate PersistentVolumeClaims (repeated 3 times) Normal Scheduled> 12m default-scheduler Successfully assigned> bdc/mssql-monitor-influxdb-0 to aks-nodepool1-32258814-0 Warning> FailedMount 1m (x5 over 10m) kubelet, aks-nodepool1-32258814-0 Unable> to mount volumes for pod> “mssql-monitor-influxdb-0\_bdc(888fb098-4857-11e9-92d1-0e4531614717)”:> timeout expired waiting for volumes to attach or mount for pod> “bdc”/“mssql-controller-0”. list of unmounted volumes=\[storage\].> list of unattached volumes=\[storage default-token-pj765\]NOTE: This Warning does often appear during a normally, but it shouldclear up with a couple of minutes.Steps----- Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
print(f"The path used to search for '{cmd_actual[0]}' was:")
print(sys.path)
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("tsg050-timeout-expired-waiting-for-volumes.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}
###Output
_____no_output_____
###Markdown
Instantiate Kubernetes client
###Code
# Instantiate the Python Kubernetes client into 'api' variable
import os
from IPython.display import Markdown
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
###Output
_____no_output_____
###Markdown
Get the namespace for the big data clusterGet the namespace of the Big Data Cluster from the Kuberenetes API.**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio.
###Code
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
###Output
_____no_output_____
###Markdown
Get the name of controller pod
###Code
label_selector = 'app=controller'
name=api.list_namespaced_pod(namespace, label_selector=label_selector).items[0].metadata.name
print ("Controller pod name: " + name)
###Output
_____no_output_____
###Markdown
Set the text for look for in pod eventsSet the text to look for in pod events that demonstrates this TSG isapplicable to a current cluster state
###Code
kind="Pod"
precondition_text="timeout expired waiting for volumes to attach or mount for pod"
###Output
_____no_output_____
###Markdown
Get events for a kubernetes resourcesGet the events for a kubernetes named space resource:
###Code
V1EventList=api.list_namespaced_event(namespace)
for event in V1EventList.items:
if (event.involved_object.kind==kind and event.involved_object.name==name):
print(event.message)
###Output
_____no_output_____
###Markdown
PRECONDITION CHECK
###Code
precondition=False
for event in V1EventList.items:
if (event.involved_object.kind==kind and event.involved_object.name==name):
if event.message.find(precondition_text) != -1:
precondition=True
if not precondition:
raise Exception("PRECONDITION NON-MATCH: 'tsg050-timeout-expired-waiting-for-volumes' is not a match for an active problem")
print("PRECONDITION MATCH: 'tsg050-timeout-expired-waiting-for-volumes' is a match for an active problem in this cluster")
###Output
_____no_output_____
###Markdown
Resolution----------Delete the pod that is stuck trying to mount a PV (Persisted Volume),the higher level kubernetes resource (statefulset, replicaset etc.) willre-create the Pod.
###Code
run(f'kubectl delete pod/{name} -n {namespace}')
###Output
_____no_output_____
###Markdown
Get the name of the new controller podGet the name of the new controller pod, and view the events to ensurethe issue has cleaned-up
###Code
name=api.list_namespaced_pod(namespace, label_selector=label_selector).items[0].metadata.name
print("New controller pod name: " + name)
###Output
_____no_output_____
###Markdown
Get events for a kubernetes resourcesGet the events for a kubernetes named space resource:
###Code
V1EventList=api.list_namespaced_event(namespace)
for event in V1EventList.items:
if (event.involved_object.kind==kind and event.involved_object.name==name):
print(event.message)
###Output
_____no_output_____
###Markdown
Validate the new controller pod gettings into a ‘Running’ state
###Code
run('kubectl get pod/{name} -n {namespace}')
print('Notebook execution complete.')
###Output
_____no_output_____ |
Exercises/ANN/ANN.ipynb | ###Markdown
Artificial Neural NetworkThis notebook was created by Camille-Amaury JUGE, in order to better understand ANN principles and how they work.(it follows the exercices proposed by Hadelin de Ponteves on Udemy : https://www.udemy.com/course/le-deep-learning-de-a-a-z/) Imports
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# scikit
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split, cross_val_score, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, accuracy_score
# keras
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasClassifier
###Output
Using TensorFlow backend.
###Markdown
DatasetThe dataset deals with banking customer's account, with several information on each individuals. The major problem to explore is that some of the customers are leaving the bank for some reasons that we don't know. Our aim is then to find profile(s) which match the leaving conditions and find a solution to keep the customers matching.
###Code
df = pd.read_csv("Churn_Modelling.csv")
df.head()
###Output
_____no_output_____
###Markdown
Firstly, we see that we only have categorical or continuous data. This will be easy do deal with by using sklearn encoders.Nextly, we want to check the different distribution of the dataset.
###Code
df.isna().sum()
df.describe().transpose().round()
###Output
_____no_output_____
###Markdown
Here we can clearly see that :* The percentage of people who leaved is lower than 25% (which is quite comforting).* The distribution over the credit score is balanced (mean ~ median).* The distribution over the Balance is going lower meaning that there are more outliers with little of money (mean < median).* The distribution over the estimated Salary seems quite balanced too.* The overall age is also balanced between younger and older people (which may earns more) around 37 years old for the median
###Code
pd.DataFrame(df["Geography"].value_counts().transpose())
###Output
_____no_output_____
###Markdown
Here, we can see that the Bank has collected the double of information in France than in Germany and Spain. Then the network could perform better on the France's customers.
###Code
pd.DataFrame(df["Gender"].value_counts().transpose())
###Output
_____no_output_____
###Markdown
Then, the gender distribution is quite balanced, there is just less women. Pre-ProcessingWe will now convert categorical variables for the network, and also deal with useless variable such as id, name ...
###Code
df_train = df.loc[:,df.columns[3:]]
df_train.head()
def label_encoder_converting(x):
label_encoder = LabelEncoder()
return label_encoder.fit_transform(x)
df_train["Gender"] = label_encoder_converting(df_train["Gender"])
df_train.head()
def one_hot_encoder_converting(x):
one_hot_encoder = OneHotEncoder(handle_unknown='ignore')
return one_hot_encoder.fit_transform(x).toarray()
geo_df = pd.DataFrame(one_hot_encoder_converting(df_train[["Geography"]]))
geo_df.head()
###Output
_____no_output_____
###Markdown
Since we only have three values (following binary theory) and we have three columns, we will delete one which is useless.
###Code
geo_df = geo_df.drop(labels=[0], axis=1)
df_train = df_train.drop(labels=["Geography"], axis=1)
df_train = pd.concat([df_train, geo_df], axis=1)
df_train.head()
y = df_train["Exited"]
X = df_train.drop(labels=["Exited"], axis=1)
###Output
_____no_output_____
###Markdown
We will now scale every values to help the network
###Code
std_scaler = StandardScaler()
X = pd.DataFrame(std_scaler.fit_transform(X))
X.head()
X_train, X_test, y_train, y_test = train_test_split(X, y)
###Output
_____no_output_____
###Markdown
Model creation
###Code
def create_labels(labels, length):
x_labels = []
for i in range(length):
x_labels.append("Predicted {}".format(labels[i]))
y_labels = []
for i in range(length):
y_labels.append("Is {}".format(labels[i]))
return (x_labels, y_labels)
def heatmap_numbers(m, labels, title):
lenght = len(labels)
# Creating plot base config
fig, ax = plt.subplots(figsize=(16,10))
im = ax.imshow(m, cmap="copper")
# Creating labels
(x_axis_labels, y_axis_labels) = create_labels(labels, lenght)
# Positionning Labels on axis
ax.set_xticks(np.arange(lenght))
ax.set_xticklabels(x_axis_labels)
ax.set_yticks(np.arange(lenght))
ax.set_yticklabels(y_axis_labels)
ax.grid(False)
# Rotate labels on x axis
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Put values in each case
for i in range(lenght):
for j in range(lenght):
text = ax.text(j, i, m[i, j], ha="center", va="center", color="red", fontsize="large")
ax.set_title(title)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Simple Hidden Layer
###Code
# model creation
clf_model_1 = Sequential()
# Input layer and first Hidden Layer
clf_model_1.add(Dense(units=6, activation="relu",
kernel_initializer="uniform",
input_dim=X_train.shape[1]))
# OutputLayer
clf_model_1.add(Dense(units=1, activation="sigmoid",
kernel_initializer="uniform"))
clf_model_1.summary()
clf_model_1.compile(optimizer="adam", loss="binary_crossentropy",
metrics=["accuracy"])
clf_model_1.fit(X_train, y_train, validation_split=0.1, epochs=100)
seuil = 0.5
y_pred = (clf_model_1.predict(X_test) > seuil)
cf_matrix = confusion_matrix(y_test, y_pred)
heatmap_numbers(cf_matrix, ["Staying", "Leaving"], "Simple Hidden Layer NN, confusion matrix")
print("Accuracy on test set : {}".format(accuracy_score(y_test, y_pred)))
###Output
Accuracy on test set : 0.8372
###Markdown
Two Hidden Layer
###Code
clf_model_2 = Sequential()
clf_model_2.add(Dense(units=6, activation="relu",
kernel_initializer="uniform",
input_dim=X_train.shape[1]))
clf_model_2.add(Dense(units=3, activation="relu",
kernel_initializer="uniform"))
clf_model_2.add(Dense(units=1, activation="sigmoid",
kernel_initializer="uniform"))
clf_model_2.summary()
clf_model_2.compile(optimizer="adam", loss="binary_crossentropy",
metrics=["accuracy"])
clf_model_2.fit(X_train.values, y_train, validation_split=0.1, epochs=100)
seuil = 0.5
y_pred = (clf_model_2.predict(X_test) > seuil)
cf_matrix = confusion_matrix(y_test, y_pred)
heatmap_numbers(cf_matrix, ["Staying", "Leaving"], "Multi Hidden Layer NN, confusion matrix")
print("Accuracy on test set : {}".format(accuracy_score(y_test, y_pred)))
###Output
Accuracy on test set : 0.8328
###Markdown
Two Hidden Layer Increased
###Code
clf_model_3 = Sequential()
clf_model_3.add(Dense(units=10, activation="relu",
kernel_initializer="uniform",
input_dim=X_train.shape[1]))
clf_model_3.add(Dense(units=6, activation="relu",
kernel_initializer="uniform"))
clf_model_3.add(Dense(units=1, activation="sigmoid",
kernel_initializer="uniform"))
clf_model_3.summary()
clf_model_3.compile(optimizer="adam", loss="binary_crossentropy",
metrics=["accuracy"])
clf_model_3.fit(X_train.values, y_train, validation_split=0.1, epochs=100)
seuil = 0.5
y_pred = (clf_model_3.predict(X_test) > seuil)
cf_matrix = confusion_matrix(y_test, y_pred)
heatmap_numbers(cf_matrix, ["Staying", "Leaving"], "Multi Hidden Improved Layer NN, confusion matrix")
print("Accuracy on test set : {}".format(accuracy_score(y_test, y_pred)))
###Output
Accuracy on test set : 0.8556
###Markdown
Cross ValidationNow that we have done some basic neural networks tests, we are going to try a more advanced method which is cross validation (K-Fold).We seek for a low variance(regularity in training) and a low biais (good accuracy)
###Code
def build_classifier():
clf_model = Sequential()
clf_model.add(Dense(units=10, activation="relu",
kernel_initializer="uniform",
input_dim=X_train.shape[1]))
clf_model.add(Dense(units=6, activation="relu",
kernel_initializer="uniform"))
clf_model.add(Dense(units=1, activation="sigmoid",
kernel_initializer="uniform"))
clf_model.summary()
clf_model.compile(optimizer="adam", loss="binary_crossentropy",
metrics=["accuracy"])
return clf_model
clf_1 = KerasClassifier(build_fn=build_classifier)
k_fold_scores = cross_val_score(clf_1, X=X_train, y=y_train, cv=10,
verbose=4, n_jobs=-1)
k_fold_scores.mean().round(3)
k_fold_scores.std().round(3)
###Output
_____no_output_____
###Markdown
DropoutWe will explore here a way to counterbalance overfitting called dropout
###Code
clf_model = Sequential()
clf_model.add(Dense(units=10, activation="relu",
kernel_initializer="uniform",
input_dim=X_train.shape[1]))
clf_model.add(Dropout(rate=0.1))
clf_model.add(Dense(units=6, activation="relu",
kernel_initializer="uniform"))
clf_model.add(Dropout(rate=0.1))
clf_model.add(Dense(units=1, activation="sigmoid",
kernel_initializer="uniform"))
clf_model.summary()
###Output
Model: "sequential_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_9 (Dense) (None, 10) 120
_________________________________________________________________
dropout_1 (Dropout) (None, 10) 0
_________________________________________________________________
dense_10 (Dense) (None, 6) 66
_________________________________________________________________
dropout_2 (Dropout) (None, 6) 0
_________________________________________________________________
dense_11 (Dense) (None, 1) 7
=================================================================
Total params: 193
Trainable params: 193
Non-trainable params: 0
_________________________________________________________________
###Markdown
Improve by increasing hyperparametersUsing gridsearch or randomsearch to optimize
###Code
def build_classifier(input_dim, layers_nb, function, kernel_init, optimizer):
clf_model = Sequential()
layers_nb = layers_nb.split("-")
for i in range(len(layers_nb)):
if i == 0:
clf_model.add(Dense(units=int(layers_nb[i]), activation="relu",
kernel_initializer=kernel_init,
input_dim=input_dim))
elif i == len(layers_nb) - 1:
clf_model.add(Dense(units=int(layers_nb[i]), activation=function,
kernel_initializer=kernel_init))
else:
clf_model.add(Dense(units=int(layers_nb[i]), activation="relu",
kernel_initializer=kernel_init))
clf_model.compile(optimizer=optimizer, loss="binary_crossentropy",
metrics=["accuracy"])
return clf_model
layer_nb = []
for i in range(3,11):
# single-layer
layer_nb.append("{}-1".format(i))
# multi-layer
for j in range(2,i+1):
layer_nb.append("{}-{}-1".format(i,j))
params = {
"epochs": range(20,101,20),
"optimizer":["adam", "rmsprop"],
"function":["sigmoid"],
"kernel_init":["uniform"],
"input_dim":[X_train.shape[1]],
"layers_nb":layer_nb
}
clf_1 = KerasClassifier(build_fn=build_classifier)
rs_cv = RandomizedSearchCV(estimator=clf_1, param_distributions=params,
scoring="accuracy", cv=10, verbose=1, n_jobs=-1)
rs_cv = rs_cv.fit(X_train, y_train)
rs_cv.best_params_
rs_cv.best_score_
###Output
_____no_output_____ |
Workshop2/notebooks/02 - Cross-validation and Grid Search.ipynb | ###Markdown
Cross-validation
###Code
import matplotlib.pyplot as plt
import numpy as np
import sklearn
sklearn.set_config(print_changed_only=True)
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(
digits.data, digits.target)
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
cross_val_score(KNeighborsClassifier(),
X_train, y_train, cv=5)
from sklearn.model_selection import KFold, RepeatedStratifiedKFold
cross_val_score(KNeighborsClassifier(),
X_train, y_train, cv=KFold(n_splits=10, shuffle=True, random_state=42))
cross_val_score(KNeighborsClassifier(),
X_train, y_train,
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=42))
###Output
_____no_output_____
###Markdown
Grid Searches================= Grid-Search with build-in cross validation
###Code
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
###Output
_____no_output_____
###Markdown
Define parameter grid:
###Code
import numpy as np
rs = RepeatedStratifiedKFold(n_splits=5, n_repeats=2)
param_grid = {'C': 10. ** np.arange(-3, 3),
'gamma' : 10. ** np.arange(-5, 0)}
np.set_printoptions(suppress=True)
print(param_grid)
grid_search = GridSearchCV(SVC(), param_grid, verbose=3,cv=rs)
###Output
_____no_output_____
###Markdown
A GridSearchCV object behaves just like a normal classifier.
###Code
grid_search.fit(X_train, y_train)
grid_search.predict(X_test)
grid_search.score(X_test, y_test)
grid_search.best_params_
grid_search.best_score_
grid_search.best_estimator_
# We extract just the scores
scores = grid_search.cv_results_['mean_test_score']
scores = np.array(scores).reshape(6, 5)
plt.matshow(scores)
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(5), param_grid['gamma'])
plt.yticks(np.arange(6), param_grid['C']);
###Output
_____no_output_____
###Markdown
ExercisesUse GridSearchCV to adjust n_neighbors of KNeighborsClassifier.
###Code
from sklearn.neighbors import KNeighborsClassifier
param_grid = {'n_neighbors': [1, 3, 5, 7, 10]}
grid = GridSearchCV(KNeighborsClassifier(), param_grid=param_grid,
return_train_score=True)
grid.fit(X_train, y_train)
print("best parameters: %s" % grid.best_params_)
print("Training set accuracy: %s" % grid.score(X_train, y_train))
print("Test set accuracy: %s" % grid.score(X_test, y_test))
results = grid.cv_results_
plt.plot(param_grid['n_neighbors'], results['mean_train_score'], label="train")
plt.plot(param_grid['n_neighbors'], results['mean_test_score'], label="test")
plt.legend()
###Output
best parameters: {'n_neighbors': 1}
Training set accuracy: 1.0
Test set accuracy: 0.9822222222222222
|
My_GAN_Learning_1D_Gaussian.ipynb | ###Markdown
###Code
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from scipy.stats import norm
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Function to generate samples from Uniform [-1,1] for input for generator
###Code
def sample_noise(M):
z = np.float32(np.linspace(-1.0, 1.0, M) + np.random.random(M) * 0.01)
return z
sample_noise(10)
###Output
_____no_output_____
###Markdown
PLOT METRICS
###Code
def plot_fig(generate, discriminate):
xs = np.linspace(-5, 5, 1000)
plt.plot(xs, norm.pdf(xs, loc=mu, scale=sigma), label='p_data')
r = 100
xs = np.float32(np.linspace(-3, 3, r))
xs_tensor = Variable(torch.from_numpy(xs.reshape(r, 1)))
ds_tensor = discriminate(xs_tensor)
ds = ds_tensor.data.numpy()
plt.plot(xs, ds, label='decision boundary')
n=1000
zs = sample_noise(n)
plt.hist(zs, bins=20, density=True, label='noise')
zs_tensor = Variable(torch.from_numpy(np.float32(zs.reshape(n, 1))))
gs_tensor = generate(zs_tensor)
gs = gs_tensor.data.numpy()
plt.hist(gs, bins=20, density=True, label='generated')
plt.plot(xs, norm.pdf(xs, loc=np.mean(gs), scale=np.std(gs)), label='generated_dist')
plt.legend()
plt.xlim(-3,3)
plt.ylim(0,5)
plt.show()
###Output
_____no_output_____
###Markdown
GENERATOR
###Code
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.l1 = nn.Linear(1, 10)
self.l1_relu = nn.ReLU()
self.l2 = nn.Linear(10, 10)
self.l2_relu = nn.ReLU()
self.l3 = nn.Linear(10, 1)
def forward(self, input):
output = self.l1(input)
output = self.l1_relu(output)
output = self.l2(output)
output = self.l2_relu(output)
output = self.l3(output)
return output
###Output
_____no_output_____
###Markdown
DISCRIMINATOR
###Code
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.l1 = nn.Linear(1, 10)
self.l1_tanh = nn.Tanh()
self.l2 = nn.Linear(10, 10)
self.l2_tanh = nn.Tanh()
self.l3 = nn.Linear(10, 1)
self.l3_sigmoid = nn.Sigmoid()
def forward(self, input):
output = self.l1_tanh(self.l1(input))
output = self.l2_tanh(self.l2(output))
output = self.l3_sigmoid(self.l3(output))
return output
def generator_criterion(d_output_g):
return -0.5 * torch.mean(torch.log(d_output_g))
def discriminator_criterion(d_output_true, d_output_g):
return -0.5 * torch.mean(torch.log(d_output_true) + torch.log(1-d_output_g))
mu = 2
sigma = 0.2
M = 200
discriminate = Discriminator()
generate = Generator()
plot_fig(generate, discriminate)
epochs = 500
histd, histg = np.zeros(epochs), np.zeros(epochs)
k = 20
###Output
_____no_output_____
###Markdown
TRAIN
###Code
discriminate_optimizer = torch.optim.SGD(discriminate.parameters(), lr=0.1, momentum=0.6)
generate_optimizer = torch.optim.SGD(generate.parameters(), lr=0.01, momentum=0.6)
for i in range(epochs):
for j in range(k):
discriminate.zero_grad()
x = np.float32(np.random.normal(mu, sigma, M))
z = sample_noise(M)
z_tensor = Variable(torch.from_numpy(np.float32(z.reshape(M, 1))))
x_tensor = Variable(torch.from_numpy(np.float32(x.reshape(M, 1))))
g_out = generate(z_tensor)
d_out_true = discriminate(x_tensor)
d_out_g = discriminate(g_out)
loss = discriminator_criterion(d_out_true, d_out_g)
loss.backward()
discriminate_optimizer.step()
histd[i] = loss.data.numpy()
generate.zero_grad()
z = sample_noise(M)
z_tensor = Variable(torch.from_numpy(np.float32(z.reshape(M, 1))))
g_out = generate(z_tensor)
d_out_g = discriminate(g_out)
loss = generator_criterion(d_out_g)
loss.backward()
generate_optimizer.step()
histg[i] = loss.data.numpy()
if i % 10 == 0:
for param_group in generate_optimizer.param_groups:
param_group['lr'] *= 0.999
for param_group in discriminate_optimizer.param_groups:
param_group['lr'] *= 0.999
if i % 50 == 0:
plt.clf()
plot_fig(generate, discriminate)
plt.draw()
#LOSS CONVERGE
plt.plot(range(epochs), histd, label='Discriminator')
plt.plot(range(epochs), histg, label='Generator')
plt.legend()
plt.show()
plot_fig(generate, discriminate)
plt.show()
###Output
_____no_output_____ |
CNN_Speech_Keyword_Recognition.ipynb | ###Markdown
For convenience, we can increase the display width of the Notebook to make better use of widescreen format
###Code
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
###Output
_____no_output_____
###Markdown
Next, we will import all the libraries that we need.
###Code
import numpy as np
import tensorflow as tf
from tensorflow import keras
from pathlib import Path
from scipy.io import wavfile
import python_speech_features
from tqdm.notebook import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from livelossplot import PlotLossesKeras
import sounddevice as sd
%matplotlib inline
import matplotlib.pyplot as plt
from datetime import datetime
from scipy.signal import butter, sosfilt
from timeit import default_timer as timer
from IPython.display import clear_output
###Output
_____no_output_____
###Markdown
The Google Speech Command Dataset which we'll be using contains 30 different words, 20 core words and 10 auxiliary words. In this project we'll be using only the 20 core words.We can define ourselves a dictionary that maps each different word to a number and a list that does the inverse mapping. This is necessary because we need numerical class labels for our Neural Network.
###Code
word2index = {
# core words
"yes": 0,
"no": 1,
"up": 2,
"down": 3,
"left": 4,
"right": 5,
"on": 6,
"off": 7,
"stop": 8,
"go": 9,
"zero": 10,
"one": 11,
"two": 12,
"three": 13,
"four": 14,
"five": 15,
"six": 16,
"seven": 17,
"eight": 18,
"nine": 19,
}
index2word = [word for word in word2index]
###Output
_____no_output_____
###Markdown
Next, we will go trough the dataset and save all the paths to the data samples in a list.You can download the Google Speech Commands dataset [here](http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz) (1.4 GB!)and decompress it (e.g. with 7zip). Define the path where you stored the decommpressed dataset as *speech_commands_dataset_basepath*.The dataset doesn't contain an equal number of samples for each word, but it contains >2000 valid samples for each of the 20 core words.Each sample is supposed to be exactly 1s long, which at a sampling rate of 16kHz and 16bit quantization should result in 32044 Byte large files.Somehow some samples in the dataset are not exactly that size, we skip those.Additionally we'll use a nice [tqdm](https://tqdm.github.io/) progress bar to make it more fancy. In the end, we should have gathered a total of 40000 samples.
###Code
num_classes = len(word2index)
num_samples_per_class = 2000
speech_commands_dataset_basepath = Path(r"C:\Users\tunin\Desktop\Marcel_Python\speech_command_dataset")
print("loading dataset...")
samples = []
classes = []
with tqdm(total=num_samples_per_class*20) as pbar:
for word_class in word2index:
folder = speech_commands_dataset_basepath / word_class # sub-folder for each word
count = 0
for file in folder.iterdir(): # iterate over all files in the folder
# somehow, there are samples which aren't exactly 1 s long in the dataset. ignore those
if file.stat().st_size == 32044:
samples.append(file) # store path of sample file
classes.append(word2index[word_class]) # append word class index to list
count +=1
pbar.update()
if count >= num_samples_per_class:
break
classes = np.array(classes, dtype=np.int)
###Output
loading dataset...
###Markdown
Next we'll define two functions to compute some [features](https://en.wikipedia.org/wiki/Feature_(machine_learning)) of the audio samples and another function which loads the sample wav-file and then computes the features. Before computing features, it is a good idea to normalize our input data. Depending on your recording device and other parameters, the amplitudes of the recorded audio signals may vary drastically and might even have an offset. Thus we can subtract the mean value to remove the offset and divide by the absolute maximum value of the signal, so that it's new range lies between -1.0 and +1.0.In this case, we'll use the so called "Mel-Frequency Cepstrum Coefficients", which are very commonly used for speech recognition tasks. The MFCCs are computed as follows:* Apply a simple pre-emphasis filter to the signal, to emphasize higher frequencies (optional): $y_t \leftarrow y_t - \alpha \cdot y_{t-1} \hspace{1mm} , \hspace{2mm} t=1...T$ * Extract snippets from the audio signal. A good choice for the length of the snippets is 25ms. The stride between snippets is 10ms, so the snippets will overlap. To "cut" the snippets from the audio signal, a window like the Hamming window is appropriate to mitigate the leakage effect when performing the Fourier Transform in the next step.* Calculate the FFT of the signal and then the power spectrum, i.e. the squared magnitude of the spectrum for each snippet.* Apply a Mel filterbank to the power spectrum of each snippet. The [Mel scale](https://en.wikipedia.org/wiki/Mel_scale) is a scale, that takes into account the fact, that in human auditory perception, the perceivable pitch (i.e. frequency) changes decrease with higher frequencies. This means e.g. that we can distinguish much better between a 200Hz and 300Hz tone than between a 10200 Hz and a 10300 Hz tone even though the absolute difference is the same. The filterbank consists of $N=40$ triangular filters evenly spaced in the Mel scale (and nonlinearly spaced in frequency scale). These filters are multiplied with the power spectrum, which gives us the sum of "energies" in each filter. Additionally, we take the log() of these energies.* The log-energies of adjacent filters usually correlate strongly. Therefore, a [Discrete Cosine Transform](https://en.wikipedia.org/wiki/Discrete_cosine_transform) is applied to the log filterbank energies of each snippet. The resulting values are called *Cepstral coefficients*. The zeroth coefficient represents the average log-energy in each snippet, it may or may not be discarded (here we'll keep it as a feature). Usually, only a subset, e.g. the first 8-12 Cepstral coefficients are used (here we'll use 20), the rest are discarded For more details about MFCC, a good source is: *pp. 85-72 in K.S. Rao and Manjunath K.E., "Speech Recognition Using Articulatory and Excitation Source Features", 2017, Springer* (pp. 85-92 available for preview at [https://link.springer.com/content/pdf/bbm%3A978-3-319-49220-9%2F1.pdf])Thankfully we don't have to implement the MFCC computation ourselves, we'll use the library [python_speech_features](https://python-speech-features.readthedocs.io/en/latest/).
###Code
# compute MFCC features from audio signal
def audio2feature(audio):
audio = audio.astype(np.float)
# normalize data
audio -= audio.mean()
audio /= np.max((audio.max(), -audio.min()))
# compute MFCC coefficients
features = python_speech_features.mfcc(audio, samplerate=16000, winlen=0.025, winstep=0.01, numcep=20, nfilt=40, nfft=512, lowfreq=100, highfreq=None, preemph=0.97, ceplifter=22, appendEnergy=True, winfunc=np.hamming)
return features
# load .wav-file, add some noise and compute MFCC features
def wav2feature(filepath):
samplerate, data = wavfile.read(filepath)
data = data.astype(np.float)
# normalize data
data -= data.mean()
data /= np.max((data.max(), -data.min()))
# add gaussian noise
data += np.random.normal(loc=0.0, scale=0.025, size=data.shape)
# compute MFCC coefficients
features = python_speech_features.mfcc(data, samplerate=16000, winlen=0.025, winstep=0.01, numcep=20, nfilt=40, nfft=512, lowfreq=100, highfreq=None, preemph=0.97, ceplifter=22, appendEnergy=True, winfunc=np.hamming)
return features
###Output
_____no_output_____
###Markdown
If we compute the features for one audio sample, we see that the feature shape is (99, 20). The first index is that of the 10ms long snippet of the 1s long audio signal, so we have 1s/10ms-1=99 snippets. The second dimension is the number of MFC coefficients, in this case we have 20.Now we can load all audio samples and pre-compute the MFCC features for each sample. Note that this will take quite a long time!
###Code
feature_shape = wav2feature(samples[0]).shape
features = np.empty((num_classes*num_samples_per_class, )+(feature_shape), dtype=np.float)
print("features.shape", features.shape)
print("pre-computing features from audio files...")
with tqdm(total=num_samples_per_class*num_classes) as pbar:
for k, sample in enumerate(samples):
features[k] = wav2feature(sample)
pbar.update()
###Output
features.shape (40000, 99, 20)
pre-computing features from audio files...
###Markdown
Now we can save the pre-computed training dataset containing the features of the training samples and their class labels. This way, we won't have to re-compute the features next time.
###Code
# save computed features and classes to hard drive
np.save("mfcc_plus_energy_features_40000x99x20", features)
np.save("classes", np.array(classes, dtype=np.int))
###Output
_____no_output_____
###Markdown
We can load the pre-computed features and class labels as follows:
###Code
# load pre-computed training features dataset and training class labels
features = np.load("mfcc_plus_energy_features_40000x99x20.npy")
classes = np.load("classes.npy")
###Output
_____no_output_____
###Markdown
Now the next thing to do is divide our dataset into a training dataset and a validation dataset. The training dataset is used for training our Neural Network, i.e. the Neural Network will learn to correctly predict a sample's class label based on it's features.One problem that can occur in Machine Learning is so called *Overfitting*. Our basic goal is to train our Neural Network so that it does not only classify the training samples correctly, but also new samples, which it has never "seen" before. This is called *Generalization*. But with complex networks it can happen that instead of really learning to classify samples based on their features, the network simply "learns by heart" to which class each training sample belongs. This is called Overfitting. In this case, the network will perform great on the training data, but poorly on new previously unseen data.One method to mitigate, is the use of a separate validation dataset. So we split the whole dataset, and use a small subset (e.g. here one third of the data) for validation, the rest is our training set. Now during training, only the training dataset will be used to calculate the weigths of the Neural Network (which is the "learning" part). After each epoch (i.e. once all training samples have been considered once), we will tell the network to try and predict the class labels of all samples in our validation dataset and based on that, calculate the accuracy on the validation set.So during training, after each training epoch, we can look at the accuracy of the network on the training set and on the validation set. At the beginning of the training, both accuracies will typically improve. At one point we might see that the validation accuracy plateaus or evendecreases, while the training accuracy still improves. This indicates that the network is starting to overfit, thus it is a good time to stop the training.Another method to mitigate overfitting is the use of so called [Dropout-Layers](https://en.wikipedia.org/wiki/Dilution_(neural_networks)) which randomly set a subset of the weigths of a layer to zero. In this project, we won't use them.
###Code
train_data, validation_data, train_classes, validation_classes = train_test_split(features, classes,
test_size=0.30, random_state=42, shuffle=True)
###Output
_____no_output_____
###Markdown
The next step is to define our Neural Network's architecture. The network can be described by a sequence of layers. For this task we will implement a [Convolutional Neural Network (CNN)](https://en.wikipedia.org/wiki/Convolutional_neural_network). The two main characteristics of CNNs are convolutional layers and pooling layers.Convolutional layers convolve a filter vector (1D) or matrix (2D) with the input data. The main advantage of convolutional layers (and thus of CNNs) is, that they can achieve a high degree of shift-/translation-invariance. Picture the following example: we have two 2s long recordings of the same spoken word, but in one recording the word is right at the beginning of the recording, and in the other one at the end. Now a conventional Neural Network might have a hard time learning to recognize the words, because it expects certain features at certain position in time. Another example migth be an image classifier that recognizes objects well if they're all in the center of the image and in the same orientation, but fails if the objects are in a corner of the image or rotated. So we want the network to be invariant to translations and rotations of the features, i.e. recognize features regardless of their position in time (e.g. in case of audio) or space (e.g. in case of an image). A convolutional layer needs 3 parameters:* filter size $F$: width (1D) or height and width (2D) of filter vector/matrix. Determines number of weigths of the layer.* stride $S$: determines the step size with which we move the filter across the signal or image* padding $P$: pad the input data with zeros The size of the convolutional layer's output is: $W_{out}=\frac{W_{in}-F-2P}{S}+1$ In Keras, the default stride is one and the default padding is zero.A pooling layer is somewhat similar in that it also convolves a "filter"-vector/matrix across the input with a certain stride and possibly padding. But instead of multiplying the input values with the filter values, the pooling layer computes either the average or maximum value of the values. Max-pooling layers are commonly used, average pooling rarely. So e.g. a 3x3 max-pooling layer will slide a 3x3-filter over the input and deliver the maximum of the 3*3=9 values as output. In contrary to the convolutional layer, a pooling layer introduces no additional weights. The output size of a pooling layer can be calculated with the same formula as for the convolutional layer. In Keras, the default stride is equal to the filter size and the default padding is zero. In this case the formula simplifies to: $W_{out}=\frac{W_{in}-F-2P}{S}+1=\frac{W_{in}-F}{F}+1=\frac{W_{in}}{F}$ A max-pooling layer achieves a down-sampling of the feature vector/matrix and also a translation/shift-invariance of the features. It is common to use a pooling layer after a convolution layer. We'll be creating our CNN model using keras' [Sequential](https://keras.io/guides/sequential_model/) model. At first, we add an input layer whose input size matches the dimensions of our MFCC features. In this case wee have 20 MFC coefficients and 99 timeframes, thus the feature matrix for an audio sample is of size (99, 20). In Keras, the input shapes are by default as follows:* (batch, axis, channel): one-dimensional data with $\geq$1 channels. This most commonly represents a timeseries, i.e. a number of features that change over time. In our case, the (99, 20) feature matrix is interpreted as a time series (i.e. axis represents time(-frame)) with 20 channels (the MFC coefficients. We can perform a 1D-convolution on such data.* (batch, axis0, axis1, channel): two-dimensional data with $\geq$1 channels. This is most often a color image, where axis0 and axis1 are the horizontal and vertical position of an image pixel and each pixel has 3 channels for the red, green and blue color values. We can perform a 2D-convolution on such data.* (batch, axis0, axis1, ..., axisN-1, channel): n-dimensional data with $\geq$1 channels.Now you may wonder about the batch dimension. This is another dimension that specifies the number of samples, because during training we often load batches of more than one sample in one iteration to average the gradients during optimization. In Keras, during model specification the batch dimension is ignored, so we won't have to specify it explicitly. But as you can see, our *features* variable, which contains all training samples has the shape (40000, 99, 20), so its first axis is the batch dimension. This way when we'll later pass the training data to the *fit()*-function, it can fetch a batch, i.e. a subset of the dataset for each training iteration.Next, we add a 1-D convolutional layer ([Conv1D](https://keras.io/api/layers/convolution_layers/convolution1d/)). This layer performs a one dimensional convolution along the (in our case) time (or more precisely timeframe) axis. The first argument is the number of filters to apply. Most often we use many filters, thus performing the convolution multiple times with different filter kernels. This way, the number of channels of the convolutional layer's output is the number of filters used. The second argument is the kernel size, this is the size of our convolution filter. At last, we specify an activation function used, in this case the ReLU-function is used. After the first convolutional layer, we add a max pooling layer with a size of 3 that reduces the data size along the time axis. Note that in case the division is fractional, the resulting size will be the floor value.Such a combination of convolutional layer and pooling layer is very common in CNNs. The main idea is to repeatedly stack convolution and pooling layers, so that the dimension (in time or space) of the input data is subsequently reduced, while the feature space dimensionality (i.e. number of channels) increases. Next, we add two more stacks of convolutional and max pooling layer. For the last pooling layer, we use a global pooling layer, which behaves just like the normal pooling layer, but with a filter that spans the whole axis size. After the global max pooling operation, our data is one-dimensional, with the time axis completely removed and only the feature dimension remaining.In the next step, we add a couple of fully connected (Keras calles them "dense") layers, just like in a regular Multi Layer Perceptron (MLP). Each layer reduces the feature dimensionality, so that the last layer has an output dimension equal to the number of different classes (in our case words). Using the Softmax activation function on the last dense layer, we can interpret the networks output as an a posteriori probability distribution of the sample belonging to a certain class, given the audio sample's input features.
###Code
keras.backend.clear_session() # clear previous model (if cell is executed more than once)
### CNN MODEL DEFINITION ###
model = keras.models.Sequential()
model.add(keras.layers.Input(shape=(99, 20)))
model.add(keras.layers.Conv1D(64, kernel_size=8, activation="relu"))
model.add(keras.layers.MaxPooling1D(pool_size=3))
model.add(keras.layers.Conv1D(128, kernel_size=8, activation="relu"))
model.add(keras.layers.MaxPooling1D(pool_size=3))
model.add(keras.layers.Conv1D(256, kernel_size=5, activation="relu"))
model.add(keras.layers.GlobalMaxPooling1D())
model.add(keras.layers.Dense(128, activation="relu"))
model.add(keras.layers.Dense(64, activation="relu"))
model.add(keras.layers.Dense(num_classes, activation='softmax'))
# print model architecture
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1d (Conv1D) (None, 92, 64) 10304
_________________________________________________________________
max_pooling1d (MaxPooling1D) (None, 30, 64) 0
_________________________________________________________________
conv1d_1 (Conv1D) (None, 23, 128) 65664
_________________________________________________________________
max_pooling1d_1 (MaxPooling1 (None, 7, 128) 0
_________________________________________________________________
conv1d_2 (Conv1D) (None, 3, 256) 164096
_________________________________________________________________
global_max_pooling1d (Global (None, 256) 0
_________________________________________________________________
dense (Dense) (None, 128) 32896
_________________________________________________________________
dense_1 (Dense) (None, 64) 8256
_________________________________________________________________
dense_2 (Dense) (None, 20) 1300
=================================================================
Total params: 282,516
Trainable params: 282,516
Non-trainable params: 0
_________________________________________________________________
###Markdown
Now that our CNN model is defined, we can configure it for training. Therefore we choose an optimization algorithm, e.g. Stochastic Gradient Descent (SGD) or ADAM. Additionally, we need to specify a loss function for training. The loss function determines, how the performance of the network is evaluated. In this case, we have a multi-class classification problem, where the class labels are represented as integer values. In this case, the sparse categorical cross-entropy loss can be used. If our class labels were encoded using a one-hot encoding scheme, we would use the normal (non-sparse) variant. As a metric we specify the accuracy so that after every epoch, the accuracy of the network is computed.
###Code
sgd = keras.optimizers.SGD()
loss_fn = keras.losses.SparseCategoricalCrossentropy() # use Sparse because classes are represented as integers not as one-hot encoding
model.compile(optimizer=sgd, loss=loss_fn, metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
Before starting the training, it can be useful to define an early stopping criterion in order to avoid overfitting as explained previously. We define a callback function which checks if the accuracy on the validation set has increased in the last 5 epochs and stops training if this is not the case. After stopping, the model is reverted to the state (i.e. the weigths) which had achieved the best result. We'll also use the [livelossplot](https://github.com/stared/livelossplot) library, which provides functions to plot a live graph of the accuracy and loss metrics during training. We pass the plot function as a callback too.Finally, we can start training using *model.fit()*. We specify the training and validation dataset, the max number of epochs to train, the callbacks and the batch size. In this case, a batch size of 32 is used, so that in every iteration, a batch of 32 samples is used to compute the gradient. Especially when using SGD, the batch size influences the training. In each iteration, the average of the gradients for the batch is computed and used to update the weights. A smaller batch leads to a "noisy" gradient which can be good to explore the weigth space further (and not get stuck very early in a local minimum), but affects convergence towards the end of training negatively. A larger batch leads to less noisy gradients, so that larger steps can be made (i.e. a higher learning-rate) which lead to faster training. Additionally, larger batches tend to reduce computation overhead. A batch size of one would be "pure" stochastic gradient descent, while a batch equal to the whole training set would be considered standard (i.e. non-stochastic) gradient descent. With a batch size in between (often called "mini batch"), a good compromise can be found. Sidenote: It seems that matplotlib's *notebook* mode (which is for use in Jupyter Notebooks) doesn't work well with the live plotting, so we use *inline* mode.
###Code
early_stopping = tf.keras.callbacks.EarlyStopping(monitor="val_accuracy", patience=5, restore_best_weights=True)
plt.close()
history = model.fit(train_data,
train_classes,
batch_size=32,
epochs=100,
validation_data=(validation_data, validation_classes),
callbacks=[PlotLossesKeras(), early_stopping])
###Output
_____no_output_____
###Markdown
As we can see, during the training, the losses decrease and the accuracy increases.After training, we can save our model for later use if we want to.
###Code
# save model
model.save(datetime.now().strftime("%d_%m_%Y__%H_%M")+".h5")
# load model
model = keras.models.load_model("05_08_2020__19_23.h5")
###Output
_____no_output_____
###Markdown
Another useful tool for evaluating a classifier's performance is a so called confusion matrix.To compute the confusion matrix, we use our network to predict the class labels of all samples in the validation set.The confusion matrix plots the probability with which a sample of a certain class is classified as belonging to a certain class. Thus, the values on the matrix' diagonal represent the correct classifications and those outside the diagonal the incorrect classifications. The matrix is thus by nanture symmetric. The interesting thing to see is that the confusion matrix allows us to see if a certain pair of class labels are often falsely classified, i.e. confused with each other. If two classes would often be confused (e.g. because two words sound very similar) we would find a high value outside the diagonal. For example, if we look closely at the matrix below, we can see a slightly larger value (darker color) at "go"-"no". This means that these two words are more often confused with eachother, which is plausible since they sound very similar. The ideal result would be a value of $\frac1N$ ($N$=number of classes) on the diagonals (assuming classes are equally represented in the dataset) and zeros everywhere outside the diagonal.
###Code
# plot confusion matrix
y = np.argmax(model.predict(validation_data), axis=1)
cm = confusion_matrix(validation_classes, y, normalize="all")
%matplotlib inline
plt.close()
plt.figure(figsize = (8,8))
plt.imshow(cm, cmap=plt.cm.Blues)
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.xticks(np.arange(0, 20, 1), index2word, rotation=90)
plt.yticks(np.arange(0, 20, 1), index2word)
plt.tick_params(labelsize=12)
plt.title('Confusion matrix ')
plt.colorbar()
plt.show()
###Output
_____no_output_____
###Markdown
Ok, now we can try the keyword recognizer ourselves! To easily record and play audio, we'll use the library [sounddevice](https://python-sounddevice.readthedocs.io/en/0.4.0/index.html). One thing to consider is, that we have created our CNN model so that it accepts an input feature vector that corresponds to an audio snippet of exactly 1s length at 16kHz sampling rate, i.e. 16000 samples. So we could record for exactly 1s, but this is not very practical, as you would have to say the word just at the right time after starting the recording so that it lies within the 1s time window. A more elegant solution is to record for a longer duration, e.g. 3s and then extract a 1s long snippet which we can then feed to our CNN. For this simple case we'll assume that the user says only one word during the recording, so we extract the 1s long snippet of the recording which contains the maximum signal energy. This sounds complicated, but can be quite easily computed using a [convolution](https://en.wikipedia.org/wiki/Convolution). First, we compute the power signal by element-wise squaring the audio signal. Then we create a 1s (i.e. 16000 points) long rectangle window and convolve the power signal with the window. We use ["valid" mode](https://numpy.org/doc/stable/reference/generated/numpy.convolve.html) which means that only points where the signals overlap completely are computed (i.e. no zero-padding). This way, by computing the time at which the convolution is maximal, we get the starting time of the rectangle window which leads to maximal signal energy in the extracted snippet. We can then extract a 1s long snippet from the recording.After defining a function to extract the 1s snippet, we configure the samplerate and device for recording. You can find out the number of the devices via *sd.query_devices()*. After recording for 3s and extracting the 1s snippet we can play it back. Then we compute the MFCC features and add a "fake" batch dimension to our sample before feeding it into our CNN mmodel for prediction. This is needed because the model expects batches of $\geq1$ samples as input, so since we have only one sample, we append a dimension to get a batch of one single sample. Additionally, we'll time the computation and model prediction to see how fast it is. We can normalize the CNN model's output to get a probability distribution (not strictly mathematical, but we can interpret it that way). Then we get the 3 candidates with highest probability and print the result. We'll also plot the raw audio signal and visulize the MFC coefficients.
###Code
def extract_loudest_section(audio, length):
audio = audio[:, 0].astype(np.float) # to avoid integer overflow when squaring
audio_pw = audio**2 # power
window = np.ones((length, ))
conv = np.convolve(audio_pw, window, mode="valid")
begin_index = conv.argmax()
return audio[begin_index:begin_index+length]
sd.default.samplerate = 16000
sd.default.channels = 1, 2 # mono record, stereo playback
recording = sd.rec(int(3*sd.default.samplerate), channels=1, samplerate=sd.default.samplerate, dtype=np.float, blocking=True)
recording = extract_loudest_section(recording, int(1*sd.default.samplerate)) # extract 1s snippet with highest energy (only necessary if recording is >3s long)
sd.play(recording, blocking=True)
t1 = timer()
recorded_feature = audio2feature(recording)
t2 = timer()
recorded_feature = np.expand_dims(recorded_feature, 0) # add "fake" batch dimension 1
prediction = model.predict(recorded_feature).reshape((20, ))
t3 = timer()
# normalize prediction output to get "probabilities"
prediction /= prediction.sum()
# print the 3 candidates with highest probability
prediction_sorted_indices = prediction.argsort()
print("candidates:\n-----------------------------")
for k in range(3):
i = int(prediction_sorted_indices[-1-k])
print("%d.)\t%s\t:\t%2.1f%%" % (k+1, index2word[i], prediction[i]*100))
print("-----------------------------")
print("feature computation time: %2.1f ms" % ((t2-t1)*1e3))
print("CNN model prediction time: %2.1f ms" % ((t3-t2)*1e3))
print("total time: %2.1f ms" % ((t3-t1)*1e3))
plt.close()
plt.figure(1, figsize=(10, 7))
plt.subplot(211)
plt.plot(recording)
plt.subplot(212)
plt.imshow(recorded_feature.reshape(99, 20).T, aspect="auto")
plt.show()
###Output
candidates:
-----------------------------
1.) yes : 99.8%
2.) no : 0.1%
3.) left : 0.0%
-----------------------------
feature computation time: 5.3 ms
CNN model prediction time: 92.4 ms
total time: 97.7 ms
###Markdown
As we see in this case, the results look really good. If the probability for the best candidate is very high and those of the second-best and third-best candidates are pretty low, the prediction seems quite trustworthy. Additionally, we can see that the feauture computation and CNN model prediction are quite fast. The total execution time is around 100ms, which means that our method is quite able to work in "real-time". So now let's adapt and extend this little demo to work in real-time. For this, we'll use a buffer that contains 5 succeeding snippets of 3200 samples, i.e. 200ms each. We implement this audio buffer as a ringbuffer, which means that every time a new 200ms long snippet has been recorded, the oldest snippet in the buffer is discarded, the buffer is moved one step back and the newest snippet is put at the last position. This way, our buffer is updated every 200ms and always contains the last 1s of recorded audio. Since our prediction takes approximately 100ms and we have 200ms between each update, we have enough time for computation and achieve a latency of <200ms (so I think it can be considered "real time" in this context). To implement the buffer in python, we can make use of numpy's [roll()](https://numpy.org/doc/stable/reference/generated/numpy.roll.html) function. We roll our buffer with a step of -1 along the first axis, which means that all 5 snippets are shifted to the left and the first snippet rolls over to the last position. Then we replace the snippet at the last position (which is the oldest snippet we whish to discard) with the newest snippet. We define a callback function with an appropriate signature for the sounddevice Stream API (see [here](https://python-sounddevice.readthedocs.io/en/0.4.0/api/streams.htmlsounddevice.Stream)) that updates the audio buffer and makes a new prediction each time a new snippet is recorded. We use a simple threshold of 70% probability to check if a word has been recognized. When a word is recognized, it will also appear in the buffer after the next couple of updates, so it will be recognized more than once in a row. To avoid this, we can implement a timeout that ignores a recognized word, if the same word has already been recognized shortly before.
###Code
audio_buffer = np.zeros((5, 3200))
last_recognized_word = None
last_recognition_time = 0
recognition_timeout = 1.0
def audio_stream_callback(indata, frames, time, status):
global audio_buffer
global model
global index2word
global last_recognized_word
global last_recognition_time
audio_buffer = np.roll(audio_buffer, shift=-1, axis=0)
audio_buffer[-1, :] = np.squeeze(indata)
t1 = timer()
recorded_feature = audio2feature(audio_buffer.flatten())
recorded_feature = np.expand_dims(recorded_feature, 0) # add "fake" batch dimension 1
t2 = timer()
prediction = model.predict(recorded_feature).reshape((20, ))
# normalize prediction output to get "probabilities"
prediction /= prediction.sum()
#print(prediction)
best_candidate_index = prediction.argmax()
best_candidate_probability = prediction[best_candidate_index]
t3 = timer()
if(best_candidate_probability > 0.7): # treshold
word = index2word[best_candidate_index]
if( (timer()-last_recognition_time)>recognition_timeout or word!=last_recognized_word ):
last_recognition_time = timer()
last_recognized_word = word
clear_output(wait=True) # clear ouput as soon as new output is available to replace it
print("%s\t:\t%2.1f%%" % (word, best_candidate_probability*100))
print("-----------------------------")
###Output
_____no_output_____
###Markdown
Now we can finally start the real-time demo of our CNN keyword recognizer. Therefore we start an input stream which calls our callback function each time a new block of 3200 samples has been recorded. We'll let the recognizer run for one minute so we have plenty of time to try it out.
###Code
# REALTIME KEYWORD RECOGNITION DEMO (60s long)
with sd.InputStream(samplerate=16000, blocksize=3200, device=None, channels=1, dtype="float32", callback=audio_stream_callback):
sd.sleep(60*1000)
###Output
stop : 99.0%
-----------------------------
|
quiz/m7/m7l6/calculate_shap.ipynb | ###Markdown
Calculate Shapley values Shapley values as used in coalition game theory were introduced by William Shapley in 1953. [Scott Lundberg](http://scottlundberg.com/) applied Shapley values for calculating feature importance in [2017](http://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf). If you want to read the paper, I recommend reading: Abstract, 1 Introduction, 2 Additive Feature Attribution Methods, (skip 2.1, 2.2, 2.3), and 2.4 Classic Shapley Value Estimation.Lundberg calls this feature importance method "SHAP", which stands for SHapley Additive exPlanations.Here’s the formula for calculating Shapley values:$ \phi_{i} = \sum_{S \subseteq M \setminus i} \frac{|S|! (|M| - |S| -1 )!}{|M|!} [f(S \cup i) - f(S)]$A key part of this is the difference between the model’s prediction with the feature $i$, and the model’s prediction without feature $i$. $S$ refers to a subset of features that doesn’t include the feature for which we're calculating $\phi_i$. $S \cup i$ is the subset that includes features in $S$ plus feature $i$. $S \subseteq M \setminus i$ in the $\Sigma$ symbol is saying, all sets $S$ that are subsets of the full set of features $M$, excluding feature $i$. Options for your learning journey* If you’re okay with just using this formula, you can skip ahead to the coding section below. * If you would like an explanation for what this formula is doing, please continue reading here. Optional (explanation of this formula)The part of the formula with the factorials calculates the number of ways to generate the collection of features, where order matters.$\frac{|S|! (|M| - |S| -1 )!}{|M|!}$ Adding features to a CoalitionThe following concepts come from coalition game theory, so when we say "coalition", think of it as a team, where members of the team are added, one after another, in a particular order.Let’s imagine that we’re creating a coalition of features, by adding one feature at a time to the coalition, and including all $|M|$ features. Let’s say we have 3 features total. Here are all the possible ways that we can create this “coalition” of features. $x_0,x_1,x_2$ $x_0,x_2,x_1$ $x_1,x_0,x_2$ $x_1,x_2,x_0$ $x_2,x_0,x_1$ $x_2,x_1,x_0$Notice that for $|M| = 3$ features, there are $3! = 3 \times 2 \times 1 = 6$ possible ways to create the coalition. marginal contribution of a featureFor each of the 6 ways to create a coalition, let's see how to calculate the marginal contribution of feature $x_2$.Model’s prediction when it includes features 0,1,2, minus the model’s prediction when it includes only features 0 and 1. $x_0,x_1,x_2$: $f(x_0,x_1,x_2) - f(x_0,x_1)$ Model’s prediction when it includes features 0 and 2, minus the prediction when using only feature 0. Notice that feature 1 is added after feature 2, so it’s not included in the model. $x_0,x_2,x_1$: $f(x_0,x_2) - f(x_0)$Model's prediction including all three features, minus when the model is only given features 1 and 0. $x_1,x_0,x_2$: $f(x_1,x_0,x_2) - f(x_1,x_0)$Model's prediction when given features 1 and 2, minus when the model is only given feature 1. $x_1,x_2,x_0$: $f(x_1,x_2) - f(x_1)$Model’s prediction if it only uses feature 2, minus the model’s prediction if it has no features. When there are no features, the model’s prediction would be the average of the labels in the training data. $x_2,x_0,x_1$: $f(x_2) - f( )$Model's prediction (same as the previous one) $x_2,x_1,x_0$: $f(x_2) - f( )$Notice that some of these marginal contribution calculations look the same. For example the first and third sequences, $f(x_0,x_1,x_2) - f(x_0,x_1)$ would get the same result as $f(x_1,x_0,x_2) - f(x_1,x_0)$. Same with the fifth and sixth. So we can use factorials to help us calculate the number of permutations that result in the same marginal contribution. break into 2 partsTo get to the formula that we saw above, we can break up the sequence into two sections: the sequence of features before adding feature $i$; and the sequence of features that are added after feature $i$.For the set of features that are added before feature $i$, we’ll call this set $S$. For the set of features that are added after feature $i$ is added, we’ll call this $Q$.So, given the six sequences, and that feature $i$ is $x_2$ in this example, here’s what set $S$ and $Q$ are for each sequence: $x_0,x_1,x_2$: $S$ = {0,1}, $Q$ = {} $x_0,x_2,x_1$: $S$ = {0}, $Q$ = {1} $x_1,x_0,x_2$: $S$ = {1,0}, $Q$ = {} $x_1,x_2,x_0$: $S$ = {1}, $Q$ = {0} $x_2,x_0,x_1$: $S$ = {}, $Q$ = {0,1} $x_2,x_1,x_0$: $S$ = {}, $Q$ = {1,0} So for the first and third sequences, these have the same set S = {0,1} and same set $Q$ = {}. Another way to calculate that there are two of these sequences is to take $|S|! \times |Q|! = 2! \times 0! = 2$.Similarly, the fifth and sixth sequences have the same set S = {} and Q = {0,1}. Another way to calculate that there are two of these sequences is to take $|S|! \times |Q|! = 0! \times 2! = 2$. And now, the original formulaTo use the notation of the original formula, note that $|Q| = |M| - |S| - 1$.Recall that to calculate that there are 6 total sequences, we can use $|M|! = 3! = 3 \times 2 \times 1 = 6$. We’ll divide $|S|! \times (|M| - |S| - 1)!$ by $|M|!$ to get the proportion assigned to each marginal contribution. This is the weight that will be applied to each marginal contribution, and the weights sum to 1.So that’s how we get the formula: $\frac{|S|! (|M| - |S| -1 )!}{|M|!} [f(S \cup i) - f(S)]$ for each set $S \subseteq M \setminus i$We can sum up the weighted marginal contributions for all sets $S$, and this represents the importance of feature $i$.You’ll get to practice this in code!
###Code
import sys
!{sys.executable} -m pip install numpy==1.14.5
!{sys.executable} -m pip install scikit-learn==0.19.1
!{sys.executable} -m pip install graphviz==0.9
!{sys.executable} -m pip install shap==0.25.2
import sklearn
import shap
import numpy as np
import graphviz
from math import factorial
###Output
_____no_output_____
###Markdown
Generate input data and fit a tree modelWe'll create data where features 0 and 1 form the "AND" operator, and feature 2 does not contribute to the prediction (because it's always zero).
###Code
# AND case (features 0 and 1)
N = 100
M = 3
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[:1 * N//4, 1] = 1
X[:N//2, 0] = 1
X[N//2:3 * N//4, 1] = 1
y[:1 * N//4] = 1
# fit model
model = sklearn.tree.DecisionTreeRegressor(random_state=0)
model.fit(X, y)
# draw model
dot_data = sklearn.tree.export_graphviz(model, out_file=None, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
###Output
_____no_output_____
###Markdown
Calculate Shap valuesWe'll try to calculate the local feature importance of feature 0. We have 3 features, $x_0, x_1, x_2$. For feature $x_0$, determine what the model predicts with or without $x_0$. Subsets S that exclude feature $x_0$ are: {} {$x_1$} {$x_2$} {$x_1,x_2$} We want to see what the model predicts with feature $x_0$ compared to the model without feature $x_0$: $f(x_0) - f( )$ $f(x_0,x_1) - f(x_1)$ $f(x_0,x_2) - f(x_2)$ $f(x_0,x_1,x_2) - f(x_1,x_2)$ Sample data pointWe'll calculate the local feature importance of a sample data point, where feature $x_0 = 1$ feature $x_1 = 1$ feature $x_2 = 1$
###Code
sample_values = np.array([1,1,1])
print(f"sample values to calculate local feature importance on: {sample_values}")
###Output
_____no_output_____
###Markdown
helper functionTo make things easier, we'll use a helper function that takes the entire feature set M, and also a list of the features (columns) that we want, and puts them together into a 2D array.
###Code
def get_subset(X, feature_l):
"""
Given a 2D array containing all feature columns,
and a list of integers representing which columns we want,
Return a 2D array with just the subset of features desired
"""
cols_l = []
for f in feature_l:
cols_l.append(X[:,f].reshape(-1,1))
return np.concatenate(cols_l, axis=1)
# try it out
tmp = get_subset(X,[0,2])
tmp[0:10]
###Output
_____no_output_____
###Markdown
helper function to calculate permutation weightThis helper function calculates $\frac{|S|! (|M| - |S| - 1)!}{|M|!}$
###Code
from math import factorial
def calc_weight(size_S, num_features):
return factorial(size_S) * factorial(num_features - size_S - 1) / factorial(num_features)
###Output
_____no_output_____
###Markdown
Try it out when size of S is 2 and there are 3 features total. The answer should be equal to $\frac{2! \times (3-2-1)!}{3!} = \frac{2 \times 1}{6} = \frac{1}{3}$
###Code
calc_weight(size_S=2,num_features=3)
###Output
_____no_output_____
###Markdown
case A Calculate the prediction of a model that uses features 0 and 1 Calculate the prediction of a model that uses feature 1 Calculate the difference (the marginal contribution of feature 0)$f(x_0,x_1) - f(x_1)$ Calculate $f(x_0,x_1)$
###Code
# S_union_i
S_union_i = get_subset(X,[0,1])
# fit model
f_S_union_i = sklearn.tree.DecisionTreeRegressor()
f_S_union_i.fit(S_union_i, y)
###Output
_____no_output_____
###Markdown
Remember, for the sample input for which we'll calculate feature importance, we chose values of 1 for all features.
###Code
# This will throw an error
try:
f_S_union_i.predict(np.array([1,1]))
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
The error message says:>Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.So we'll reshape the data so that it represents a sample (a row), which means it has 1 row and 1 or more columns.
###Code
# feature 0 and feature 1 are both 1 in the sample input
sample_input = np.array([1,1]).reshape(1,-1)
sample_input
###Output
_____no_output_____
###Markdown
The prediction of the model when it has features 0 and 1 is:
###Code
pred_S_union_i = f_S_union_i.predict(sample_input)
pred_S_union_i
###Output
_____no_output_____
###Markdown
When feature 0 and feature 1 are both 1, the prediction of the model is 1 Calculate $f(x_1)$
###Code
# S
S = get_subset(X,[1])
f_S = sklearn.tree.DecisionTreeRegressor()
f_S.fit(S, y)
###Output
_____no_output_____
###Markdown
The sample input for feature 1 is 1.
###Code
sample_input = np.array([1]).reshape(1,-1)
###Output
_____no_output_____
###Markdown
The model's prediction when it is only training on feature 1 is:
###Code
pred_S = f_S.predict(sample_input)
pred_S
###Output
_____no_output_____
###Markdown
When feature 1 is 1, then the prediction of this model is 0.5. If you look at the data in X, this makes sense, because when feature 1 is 1, half of the time, the label in y is 0, and half the time, the label in y is 1. So on average, the prediction is 0.5 Calculate difference
###Code
diff_A = pred_S_union_i - pred_S
diff_A
###Output
_____no_output_____
###Markdown
Calculate the weightCalculate the weight assigned to the marginal contribution. In this case, if this marginal contribution occurs 1 out of the 6 possible permutations of the 3 features, then its weight is 1/6
###Code
size_S = S.shape[1] # should be 1
weight_A = calc_weight(size_S, M)
weight_A # should be 1/6
###Output
_____no_output_____
###Markdown
Quiz: Case BCalculate the prediction of a model that uses features 0 and 2 Calculate the prediction of a model that uses feature 2 Calculate the difference$f(x_0,x_2) - f(x_2)$ Calculate $f(x_0,x_2)$
###Code
# TODO
S_union_i = # ...
f_S_union_i = # ...
#f_S_union_i.fit(?, ?)
sample_input = # ...
pred_S_union_i = # ...
pred_S_union_i
###Output
_____no_output_____
###Markdown
Since we're using features 0 and 2, and feature 2 doesn't help with predicting the output, then the model really just depends on feature 0. When feature 0 is 1, half of the labels are 0, and half of the labels are 1. So the average prediction is 0.5 Calculate $f(x_2)$
###Code
# TODO
S = # ...
f_S = # ...
# f_S.fit(?, ?)
sample_input = # ...
pred_S = # ...
pred_S
###Output
_____no_output_____
###Markdown
Since feature 2 doesn't help with predicting the labels in y, and feature 2 is 0 for all 100 training observations, then the prediction of the model is the average of all 100 training labels. 1/4 of the labels are 1, and the rest are 0. So that prediction is 0.25 Calculate the difference in predictions
###Code
# TODO
diff_B = # ...
diff_B
###Output
_____no_output_____
###Markdown
Calculate the weight
###Code
# TODO
size_S = #... # is 1
weight_B = # ...
weight_B # should be 1/6
###Output
_____no_output_____
###Markdown
Quiz: Case CCalculate the prediction of a model that uses features 0,1 and 2 Calculate the prediction of a model that uses feature 1 and 2 Calculate the difference$f(x_0,x_1,x_2) - f(x_1,x_2)$ Calculate $f(x_0,x_1,x_2) $
###Code
# TODO
S_union_i = # ...
f_S_union_i = # ...
# f_S_union_i.fit(?, ?)
sample_input = # ...
pred_S_union_i = # ...
pred_S_union_i
###Output
_____no_output_____
###Markdown
When we use all three features, the model is able to predict that if feature 0 and feature 1 are both 1, then the label is 1. Calculate $f(x_1,x_2)$
###Code
# TODO
S = # ...
f_S = # ...
#f_S.fit(?, ?)
sample_input = # ...
pred_S = # ...
pred_S
###Output
_____no_output_____
###Markdown
When the model is trained on features 1 and 2, then its training data tells it that half of the time, when feature 1 is 1, the label is 0; and half the time, the label is 1. So the average prediction of the model is 0.5 Calculate difference in predictions
###Code
# TODO
diff_C = # ...
diff_C
###Output
_____no_output_____
###Markdown
Calculate weights
###Code
# TODO
size_S = # ...
weight_C = # ... # should be 2 / 6 = 1/3
weight_C
###Output
_____no_output_____
###Markdown
Quiz: case D: remember to include the empty set!The empty set is also a set. We'll compare how the model does when it has no features, and see how that compares to when it gets feature 0 as input.Calculate the prediction of a model that uses features 0. Calculate the prediction of a model that uses no features Calculate the difference$f(x_0) - f()$ Calculate $f(x_0)$
###Code
# TODO
S_union_i = # ...
f_S_union_i = # ...
#f_S_union_i.fit(?, ?)
sample_input = # ...
pred_S_union_i = # ...
pred_S_union_i
###Output
_____no_output_____
###Markdown
With just feature 0 as input, the model predicts 0.5 Calculate $f()$**hint**: you don't have to fit a model, since there are no features to input into the model.
###Code
# TODO
# with no input features, the model will predict the average of the labels, which is 0.25
pred_S = # ...
pred_S
###Output
_____no_output_____
###Markdown
With no input features, the model's best guess is the average of the labels, which is 0.25 Calculate difference in predictions
###Code
# TODO
diff_D = # ...
diff_D
###Output
_____no_output_____
###Markdown
Calculate weightWe expect this to be: 0! * (3-0-1)! / 3! = 2/6 = 1/3
###Code
# TODO
size_S = # ...
weight_D = # ... # weight is 1/3
weight_D
###Output
_____no_output_____
###Markdown
Calculate Shapley valueFor a single sample observation, where feature 0 is 1, feature 1 is 1, and feature 2 is 1, calculate the shapley value of feature 0 as the weighted sum of the differences in predictions.$\phi_{i} = \sum_{S \subseteq N \setminus i} weight_S \times (f(S \cup i) - f(S))$
###Code
# TODO
shap_0 = # ...
shap_0
###Output
_____no_output_____
###Markdown
Verify with the shap libraryThe [shap](https://github.com/slundberg/shap) library is written by Scott Lundberg, the creator of Shapley Additive Explanations.
###Code
sample_values = np.array([1,1,1])
shap_values = shap.TreeExplainer(model).shap_values(sample_values)
print(f"Shapley value for feature 0 that we calculated: {shap_0}")
print(f"Shapley value for feature 0 is {shap_values[0]}")
print(f"Shapley value for feature 1 is {shap_values[1]}")
print(f"Shapley value for feature 2 is {shap_values[2]}")
###Output
_____no_output_____ |
3D_landslide_detection_workflow.ipynb | ###Markdown
3D landslide detection and volume estimation Workflow Getting started This code takes 2 point clouds (pre-event and post-event) and 1 point cloud defining the core points as input. The outputs are:* 1 point cloud with the 3D-M3C2 fields* 1 point cloud of the significant changes* 1 point cloud of landslide sources with the corresponding id and additional information.* 1 point cloud of landslide deposits with the corresponding id and additional information.* 1 .csv file with the landslide source information* 1 .csv file with the landslide deposits information 1. Define your paths, filenames of the LiDAR data and the parameters in the "parameters.py" file2. Execute this Jupyter Notebook Note that the present version of the code does not include the iterative procedure that progressively increases the depth of the cylinder (pmax) used by M3C2. A new version will be available when this option will be available in Cloudcompare. Geomorphic change detection
###Code
%%time
import scripts.Landslide_detection as LD
import scripts.functions as fc
from scripts.parameters import *
import importlib
importlib.reload(fc)
importlib.reload(LD)
LD.Geomorphic_change_detection()
###Output
Wall time: 13min 58s
###Markdown
Here the significant changes located in the river are manually filtered out. Landslide segmentation and volume estimation
###Code
import importlib
import scripts.Landslide_detection as LD
import scripts.functions as fc
importlib.reload(LD)
importlib.reload(fc)
import scripts.cloudcompare as cc
importlib.reload(cc)
from scripts.parameters import *
%%time
LD.Landslide_segmentation()
###Output
Wall time: 29min 56s
###Markdown
Results: Landslide properties
###Code
path = 'D:/Beyond_2D_inventories_synoptic_3D_landslide_volume_calculation_from_repeat_LiDAR_data/data_to_publish/Code/Landslide_detection/res/'
filenames = {'Sources':'Landslide_source_infos.csv','Deposits':'Landslide_deposits_infos.csv'}
importlib.reload(fc)
fc.read_landslide_prop(path,filenames)
###Output
_____no_output_____ |
exercises/CNNExercises.ipynb | ###Markdown
Exercise 1You've been hired by a shipping company to overhaul the way they route mail, parcels and packages. They want to build an image recognition system capable of recognizing the digits in the zipcode on a package, so that it can be automatically routed to the correct location. You are tasked to build the digit recognition system. Luckily, you can rely on the MNIST dataset for the intial training of your model!Build a deep convolutional neural network with at least two convolutional and two pooling layers before the fully connected layer.Start from the network we have just builtInsert a Conv2D layer after the first MaxPool2D, give it 64 filters.Insert a MaxPool2D after that oneInsert an Activation layerretrain the modeldoes performance improve?how many parameters does this new model have? More or less than the previous model? Why?how long did this second model take to train? Longer or shorter than the previous model? Why?did it perform better or worse than the previous model?
###Code
(X_train, y_train), (X_test, y_test) = mnist.load_data('/tmp/mnist.npz')
X_train.shape
X_test.shape
plt.imshow(X_train[0], cmap='gray')
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
X_train = X_train.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1, 28, 28, 1)
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
K.clear_session()
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train_cat, batch_size=128,
epochs=2, verbose=1, validation_split=0.3)
model.evaluate(X_test, y_test_cat)
Exercise 2
Pleased with your performance with the digits recognition task, your boss decides to challenge you with a harder task. Their online branch allows people to upload images to a website that generates and prints a postcard that is shipped to destination. Your boss would like to know what images people are loading on the site in order to provide targeted advertising on the same page, so he asks you to build an image recognition system capable of recognizing a few objects. Luckily for you, there's a dataset ready made with a collection of labeled images. This is the Cifar 10 Dataset, a very famous dataset that contains images for 10 different categories:
airplane
automobile
bird
cat
deer
dog
frog
horse
ship
truck
In this exercise we will reach the limit of what you can achieve on your laptop and get ready for the next session on cloud GPUs.
Here's what you have to do:
load the cifar10 dataset using keras.datasets.cifar10.load_data()
display a few images, see how hard/easy it is for you to recognize an object with such low resolution
check the shape of X_train, does it need reshape?
check the scale of X_train, does it need rescaling?
check the shape of y_train, does it need reshape?
build a model with the following architecture, and choose the parameters and activation functions for each of the layers:
conv2d
conv2d
maxpool
conv2d
conv2d
maxpool
flatten
dense
output
compile the model and check the number of parameters
attempt to train the model with the optimizer of your choice. How fast does training proceed?
If training is too slow (as expected) stop the execution and move to the next session!
from keras.datasets import cifar10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train.shape
plt.imshow(X_train[1])
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
y_train.shape
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
y_train_cat.shape
model = Sequential()
model.add(Conv2D(32, (3, 3),
padding='same',
input_shape=(32, 32, 3),
activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train_cat,
batch_size=32,
epochs=2,
validation_data=(X_test, y_test_cat),
shuffle=True)
###Output
Train on 50000 samples, validate on 10000 samples
Epoch 1/2
50000/50000 [==============================] - 160s 3ms/step - loss: 1.3978 - acc: 0.5013 - val_loss: 1.0818 - val_acc: 0.6284
Epoch 2/2
50000/50000 [==============================] - 164s 3ms/step - loss: 0.9177 - acc: 0.6818 - val_loss: 1.1016 - val_acc: 0.6423
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.