path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
data-retriever.ipynb
###Markdown Imports ###Code import copy import glob import importlib import math import os import random import shutil import sys from os.path import * import seaborn as sns import keras import mahotas.features as mah import matplotlib.pyplot as plt import numpy as np import pandas as pd import SimpleITK as sitk import skimage.feature as skf from skimage.morphology import ball import cnn_builder as cbuild import config import lipiodol_methods as lm import lipiodol_vis as lvis import lipiodol_analysis as lan import niftiutils.helper_fxns as hf import niftiutils.liver_metrics as lmet import niftiutils.masks as masks import niftiutils.registration as reg import niftiutils.transforms as tr import niftiutils.visualization as vis from config import Config import skimage.morphology %matplotlib inline importlib.reload(config) C = config.Config() img_dir = "D:\\Lipiodol\\Images all" seg_dir = "D:\\Lipiodol\\Images extracted and segmentations" target_dir = "D:\\Lipiodol\\Data" lesions = [basename(fn) for fn in glob.glob(join(target_dir,"*"))] liplvls = [0, 87.3, 158.8, 238.7] lesion_id = "PK-04" print(join(target_dir, lesion_id, "CT24h")) import pydicom ###Output _____no_output_____ ###Markdown Data Handling ###Code df = pd.read_excel("D:\Lipiodol\pk-study_spreadsheet.xlsx", sheetname="pat_info") df = df.dropna(subset=["Subject ID", "CT24 Accession #"]) for fn in glob.glob(join(target_dir, "*", "masks", "enh_bl*")): #os.rename(fn, join(dirname(fn), "mrbl"+basename(fn)[len("mribl"):])) os.rename(fn, join(dirname(fn), "lipiodol_low"+splitext(basename(fn))[1])) for fn in glob.glob(join(pk_target_dir, "*")): accnum = df.loc[df["Subject ID"]==basename(fn),"CT24 Accession #"].values[0] if not exists(join(pk_src_dir, accnum)): print(basename(fn), accnum) else: shutil.copytree(join(pk_src_dir, accnum), join(fn, "CT24h")) print(join(fn, "CT24h")) ###Output _____no_output_____ ###Markdown Corrupt data ###Code big_path = r"C:\Users\Clinton\Desktop\MRI Abdomen W WO Contrast" header = hf.dcm_load_header(big_path) dcm_path = [r"C:\Users\Clinton\Desktop\New folder", r"C:\Users\Clinton\Desktop\New folder2", r"C:\Users\Clinton\Desktop\New folder3", r"C:\Users\Clinton\Desktop\New folder4"] for i in range(4): os.makedirs(dcm_path[i]) acq_t = [124326.42, 124404.46, 124446.79, 124633.32] for ix,fn in enumerate(glob.glob(join(big_path, "*"))): h = hf.dcm_load_header(fn) for i in range(4): if abs(float(h[0].AcquisitionTime) - acq_t[i]) < .02: shutil.copy(fn, join(dcm_path[i], basename(fn))) ###Output _____no_output_____ ###Markdown for fn in glob.glob(join(dcm_path, "*")): if int(header[int(fn[-7:-4])-1].TemporalPositionIdentifier) == 2: shutil.move(fn, join(dcm_path2, basename(fn))) elif int(header[int(fn[-7:-4])-1].TemporalPositionIdentifier) == 3: shutil.move(fn, join(dcm_path3, basename(fn))) elif int(header[int(fn[-7:-4])-1].TemporalPositionIdentifier) == 4: shutil.move(fn, join(dcm_path4, basename(fn))) ###Code dim = 1 base = float(header[0].SliceLocation) for x,h in enumerate(header[1:]): dz = float(h.SliceLocation) - base print(dz) base = float(h.SliceLocation) fn = ["t1-pre.nii", "t1-art.nii", "t1-pv.nii", "t1-equ.nii"] for i in range(2,3): a,dims = hf.dcm_load(dcm_path[i], True, True) save_path = join(dirname(dcm_path[i]), "07", fn[i]) hf.save_nii(a, save_path, dims) dcm_path = join(target_dir, lesion_id, "CT24h") fns = glob.glob(join(dcm_path, "*")) z1 = float(hf.dcm_load_header(fns[0])[0].ImagePositionPatient[-1]) z2 = float(hf.dcm_load_header(fns[1])[0].ImagePositionPatient[-1]) dz = z1-z2 for ix,fn in enumerate(fns[1:]): h = hf.dcm_load_header(fn) z2 = float(h[0].ImagePositionPatient[-1]) if abs(z1-z2 - dz) > 1e-3: print(fn, z1-z2) break z1 = z2 ###Output D:\Lipiodol\Data\34\CT24h\IMG-0001-00010.dcm 6.0 ###Markdown MRI loading ###Code target_dir = "D:\\Lipiodol\\Data" importlib.reload(lm) lesion_id = "BM-07" P = lm.get_paths_dict(lesion_id, target_dir) modality = "MRI-30d" dcm_paths = [x for x in glob.glob(join(target_dir, lesion_id, modality,"*")) if x.find('.')==-1 and '70s' in x] for dcm_path in dcm_paths: if not exists(splitext(dcm_path)[0] + ".nii.gz"): try: save_path = splitext(dcm_path)[0] + ".nii.gz" img, dims = hf.dcm_load(dcm_path, True, True) hf.save_nii(img, save_path, dims) except: print(dcm_path) paths = [x for x in glob.glob(join(target_dir, lesion_id, modality,"*.nii.gz")) if '70s' in x or 'Ph2-Ax' in x] if modality == "MRI-30d": save_path = P['mr30']['ven'] elif modality == "MRI-BL": save_path = P['mrbl']['ven'] if len(paths)==1: os.rename(paths[0], save_path) paths = [x for x in glob.glob(join(target_dir, lesion_id, modality,"*.nii.gz")) \ if ('pre' in x.lower() and 'post' not in x.lower()) or basename(x).startswith('Ax LAVA-XV')] if modality == "MRI-30d": save_path = P['mr30']['pre'] elif modality == "MRI-BL": save_path = P['mrbl']['pre'] if len(paths)==1: os.rename(paths[0], save_path) importlib.reload(lm) modality="MRI-30d" for fn in glob.glob(join(target_dir,"*")): lesion_id = basename(fn) P = lm.get_paths_dict(lesion_id, target_dir) paths = [x for x in glob.glob(join(target_dir, lesion_id, modality,"*.nii.gz")) \ if '3min' in x or '3 min' in x or basename(x).startswith('Ph3-Ax')] if modality == "MRI-30d": save_path = P['mr30']['equ'] if modality == "MRI-BL": save_path = P['mrbl']['equ'] if len(paths)==1: os.rename(paths[0], save_path) glob.glob(join(target_dir,"*")).index('D:\\Lipiodol\\Data\\PK-17A') importlib.reload(lm) for fn in glob.glob(join(target_dir,"*")): lesion_id = basename(fn) P = lm.get_paths_dict(lesion_id, target_dir) #os.rename(join(target_dir, lesion_id, "MRI-30d", "mri30d_pre.nii.gz"), P['mr30']['pre']) #os.rename(join(target_dir, lesion_id, "MRI-30d", "mri30d_art.nii.gz"), P['mr30']['art']) mod='mrbl' if not exists(P[mod]['sub']): art, D = hf.nii_load(P[mod]['art']) sub = art - hf.nii_load(P[mod]['pre'])[0] hf.save_nii(sub, P[mod]['sub'], D) ###Output _____no_output_____ ###Markdown Make liver mask ###Code model = keras.models.load_model(join(config.Config().model_dir, "mri_liver.hdf5")) lm.seg_liver_mri_from_path(P['mr30']['art'], P['mr30']['liver'], model, P['mr30']['tumor']) model = keras.models.load_model(join(config.Config().model_dir, "ct_liver.hdf5")) #importlib.reload(lm) lm.seg_liver_ct(P['ct24'], P['ct24']['liver'], model, P['ct24']['tumor']) ###Output _____no_output_____ ###Markdown Segment Lipiodol ###Code importlib.reload(masks) for fn in glob.glob(r"Z:\Sophie\24hCT ROIs Threshold\ROI_*.off"): masks.off2ids(fn, num_tumors=1, R=2.) importlib.reload(masks) Masks = [] for fn in glob.glob(r"Z:\Sophie\24hCT ROIs Threshold\ROI_*.ids")[16:]: lesion_id = fn[-9:-4] if lesion_id == "BM-04": lesion_id = "BM-04B" elif lesion_id not in lesions: lesion_id += "A" P = lm.get_paths_dict(lesion_id, target_dir) Masks.append(masks.get_mask(fn, img_path=P['ct24']['img'], overlaid=True)) meds = [np.percentile(M[M != 0], 99) for M in Masks] np.mean(meds) plt.figure(figsize=(6,2), dpi=100) plt.hist(I[I > 0].flatten(), 40); plt.xlabel('Intensity (Hounsfield units)') plt.ylabel('Number of Voxels') sns.despine(top=True, right=True) np.median(list(cutoffs.values())) cutoffs.pop('PK-22') plt.figure(figsize=(8, 1), dpi=300) plt.plot(np.linspace(40,90,100), [0]*100, 'k-', lw=.5) plt.plot(list(cutoffs.values()), np.zeros(len(cutoffs)), '.', markersize=20, alpha=.9) plt.plot([82], [0], 'r.', markersize=20, alpha=.9) sns.despine(top=True, right=True, left=True, bottom=True) plt.yticks([]); plt.axes().spines['bottom'].set_position('center') ###Output _____no_output_____ ###Markdown importlib.reload(lm)blct_dir = r"D:\Lipiodol\CTBL Data"cutoffs = {}for mask_fn in glob.glob(join(blct_dir, "*.off")): path = splitext(mask_fn)[0] lesion_id = basename(path) I,D = hf.nii_load(path+".nii.gz") M = masks.get_mask(path, D, I.shape)[0] I = I*M/M.max() cutoffs[lesion_id]=np.percentile(I[I != 0], 99) ct_img, ct_dims = hf.dcm_load(join(target_dir, lesion_id, "CTBL"), True, True)hf.save_nii(ct_img, ctbl_path, ct_dims)lm.seg_liver_ct(ctbl_path, ctbl_liver_mask_path, model)masks.restrict_mask_to_largest(ctbl_tumor_mask_path, img_path=ctbl_path)A=masks.draw_mask(ctbl_liver_mask_path, ctbl_path, [0,200])A = skimage.morphology.grey.erosion(A) np.percentile(A[A>0], 99.5) masks.draw_mask(ct24_tumor_mask_path, ct24_path, [0,300]); ###Code importlib.reload(hf) ct_img, D = hf.dcm_load_special(join(target_dir, lesion_id, "CT24h")) hf.save_nii(ct_img, P['ct24']['img'], D) importlib.reload(lm) for lesion_id in lesions: P = lm.get_paths_dict(lesion_id, target_dir) lm.seg_lipiodol(P, liplvls[1:]) ###Output _____no_output_____ ###Markdown Split masks of multiple tumors ###Code importlib.reload(lm) lm.check_multi_tumors(lesion_id, target_dir) lm.restrict_masks(lesion_id, target_dir) #Identify multi-tumor lesions importlib.reload(lm) for fn in glob.glob(join(target_dir,"*")): lesion_id = basename(fn) lm.check_multi_tumors(lesion_id, target_dir) lm.restrict_masks(lesion_id, target_dir) ###Output _____no_output_____ ###Markdown importlib.reload(masks)for fn in glob.glob(join(target_dir,"*","masks", "ZZbackup*")): os.rename(fn, join(dirname(fn), basename(fn)[8:])) img,_ = hf.nii_load(mribl_art_path)mask,_ = masks.get_mask(mribl_tumor_mask_path, img_path=mribl_art_path) hf.draw_slices(hf.crop_nonzero(mask*img)[0]) qEASL ###Code df = pd.read_excel("D:\\Lipiodol\\qEASL.xlsx", index_col=0) target_dir = "D:\\Lipiodol\\Data" for fn in glob.glob(join(target_dir,"*")): lesion_id = basename(fn) patient_id = lesion_id[:5] P = lm.get_paths_dict(lesion_id, target_dir) mod='mrbl' if not exists(P[mod]['nec']+".ids") and not exists(P[mod]['enh']+".ids"): threshold = df.loc[patient_id, "BL Threshold"] lmet.seg_tumor_from_threshold(P[mod]['art'], P[mod]['pre'], threshold, P[mod]['tumor'], P[mod]['enh'], P[mod]['nec']) threshold = df.loc[patient_id, "30d Threshold"] mod='mr30' lmet.seg_tumor_from_threshold(P[mod]['art'], P[mod]['pre'], threshold, P[mod]['tumor'], P[mod]['enh'], P[mod]['nec']) df = pd.read_excel("D:\\Lipiodol\\qEASL.xlsx", index_col=0) for fn in lesions[13:17]: lesion_id = basename(fn) P = lm.get_paths_dict(lesion_id, target_dir) mod='mrbl' lmet.seg_tumor_from_threshold(P[mod]['art'], P[mod]['pre'], df.loc[lesion_id, "BL Threshold"], P[mod]['tumor'], P[mod]['enh'], P[mod]['nec']) mod='mr30' lmet.seg_tumor_from_threshold(P[mod]['art'], P[mod]['pre'], df.loc[lesion_id, "30d Threshold"], P[mod]['tumor'], P[mod]['enh'], P[mod]['nec']) #df = pd.read_excel("D:\\Lipiodol\\qEASL.xlsx", index_col=0) #target_dir = "D:\\Lipiodol\\Data" lesion_id = "BM-30A".upper() importlib.reload(lm) P = lm.get_paths_dict(lesion_id, target_dir) mod = 'mrbl' mask,D = masks.get_mask(P[mod]['tumor'], img_path=P[mod]['art']) mask.sum()/mask.max()*np.product(D) / 1000 mod = 'mr30' mask,D = masks.get_mask(P[mod]['tumor'], img_path=P[mod]['art']) mask.sum()/mask.max()*np.product(D) / 1000 mod = 'ct24' mask,D = masks.get_mask(P[mod]['tumor'], img_path=P[mod]['img']) mask.sum()/mask.max()*np.product(D) / 1000 for fn in glob.glob(P['ct24Tx']['mrbl']['enh']+"*"): os.remove(fn) df = pd.read_excel("D:\\Lipiodol\\qEASL.xlsx", index_col=0) importlib.reload(masks) threshold = df.loc[lesion_id, "BL Threshold"] mod='mrbl' lmet.seg_tumor_from_threshold(P[mod]['art'], P[mod]['pre'], threshold, P[mod]['tumor'], P[mod]['enh'], P[mod]['nec']) if exists(P[mod]['enh']+".off"): masks.draw_mask(P[mod]['enh'], P[mod]['art'], limit_mask_path=P[mod]['tumor']); mod='mrbl' if exists(P[mod]['nec']+".off"): masks.draw_mask(P[mod]['nec'], P[mod]['art'], limit_mask_path=P[mod]['tumor']); df = pd.read_excel("D:\\Lipiodol\\qEASL.xlsx", index_col=0) importlib.reload(masks) threshold = df.loc[lesion_id, "30d Threshold"] mod='mr30' lmet.seg_tumor_from_threshold(P[mod]['art'], P[mod]['pre'], threshold, P[mod]['tumor'], P[mod]['enh'], P[mod]['nec']) if exists(P[mod]['enh']+".off"): masks.draw_mask(P[mod]['enh'], P[mod]['art'], limit_mask_path=P[mod]['tumor']); mod='mr30' if exists(P[mod]['nec']+".off"): masks.draw_mask(P[mod]['nec'], P[mod]['art'], limit_mask_path=P[mod]['tumor']); importlib.reload(lm) P = lm.get_paths_dict(lesion_id, target_dir) M = masks.get_mask(P['ct24Tx']['crop']['tumor'])[0] tumor_len = int(np.sum(M/M.max())**(1/3)) R = (tumor_len / 13)**.5 #R=1. lm.reg_to_ct24(lesion_id, target_dir, D=[R,R,R*2.5], padding=0.2) importlib.reload(lvis) save_dir = r"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\Lesion Gallery\Registered Imgs" lvis.draw_reg_seq(lesion_id, target_dir, save_dir) lan.get_qEASL(lesion_id, target_dir) importlib.reload(hf) mod='mrbl' SUB=masks.crop_img_to_mask_vicinity(P[mod]['sub'], P[mod]['tumor'], .3) #PRE=masks.crop_img_to_mask_vicinity(P[mod]['pre'], P[mod]['tumor'], .1) hf.draw_slices(SUB) CT=masks.crop_img_to_mask_vicinity(P['ct24']['img'], P['ct24']['tumor'], .5) CT=tr.apply_window(CT) hf.draw_slices(CT) # plot subtraction mod='mr30' SUB=masks.crop_img_to_mask_vicinity(P[mod]['sub'], P[mod]['tumor'], .5) hf.draw_slices(SUB) importlib.reload(masks) I=masks.draw_mask(P['ct24']['tumor'], P['ct24']['img'], [0,300]) #qEASLy importlib.reload(lmet) mod='mrbl' threshold1=lmet.segment_tumor_from_paths(P[mod]['art'], P[mod]['pre'], P[mod]['liver'], P[mod]['tumor'], P[mod]['enh'], P[mod]['nec']) mod='mr30' threshold2=lmet.segment_tumor_from_paths(P[mod]['art'], P[mod]['pre'], P[mod]['liver'], P[mod]['tumor'], P[mod]['enh'], P[mod]['nec']) ###Output _____no_output_____ ###Markdown Non-rigid reg ###Code tumor_lens = {} for fn in glob.glob(join(target_dir,"*")): lesion_id = basename(fn) P = lm.get_paths_dict(lesion_id, target_dir) M = masks.get_mask(P['ct24Tx']['crop']['tumor'])[0] tumor_lens[lesion_id] = int(np.sum(M/M.max())**(1/3)) intercept = np.percentile(list(tumor_lens.values()), 20) lesions.index("BM-06") intercept importlib.reload(lm) #lesion_id = "BM-12" for fn in lesions[57:]: lesion_id = basename(fn) print(lesion_id) P = lm.get_paths_dict(lesion_id, target_dir) M = masks.get_mask(P['ct24Tx']['crop']['tumor'])[0] tumor_len = int(np.sum(M/M.max())**(1/3)) R = (tumor_len / intercept)**.5 lm.reg_to_ct24(lesion_id, target_dir, D=[R,R,R*2.5]) master_df = pd.read_excel(r"D:\Lipiodol\MASTER SOPHIE.xlsx", "Lesions analyzed", index_col="Lesion_ID") importlib.reload(lm) for lesion_id in lesions: P = lm.get_paths_dict(lesion_id, target_dir) if master_df.loc[lesion_id, "0=well delineated, 1=infiltrative"] == 0 and not exists(P['ball']['ct24']['img']): print(lesion_id) lm.spherize(lesion_id, target_dir) importlib.reload(lm) for fn in glob.glob(join(target_dir,"*")): lesion_id = basename(fn) print(lesion_id) lm.reg_to_modality(lesion_id, target_dir) importlib.reload(lm) lesion_id = "BM-07" P = lm.get_paths_dict(lesion_id, target_dir) ###Output _____no_output_____ ###Markdown Rename folders semi-automatically ###Code def sort_by_series_num(arr): return sorted(arr, key=lambda x: int(x[x.rfind("_")+1:x.find('.')])) for fn in glob.glob(join(target_dir,"*"))[42:]: lesion_id = basename(fn) P = lm.get_paths_dict(lesion_id, target_dir) DCE = [x for x in os.listdir(join(fn, "MRI-BL")) if ("vibe" in x or "dynamic" in x) and "post" in x and x.endswith('.gz')] break for fn in glob.glob(join(target_dir,"*"))[42:]: lesion_id = basename(fn) P = lm.get_paths_dict(lesion_id, target_dir) mod="MRI-30d" DCE = [x for x in os.listdir(join(fn, mod)) if ("vibe" in x or "post" in x) and x.endswith('.gz')] if len(DCE) > 0 and not exists(join(fn, mod, "mr30_equ.nii.gz")): DCE = sort_by_series_num([x for x in DCE if "min" not in x]) print(fn, DCE[-1], DCE[0], sep="\n") #sort_by_series_num(PRE)[-1], correct = input() if correct == "0": #os.rename(join(accnum, PRE[0]), join(accnum, "T1_BL")) os.rename(join(fn, mod, DCE[-1]), join(fn, mod, "mr30_equ.nii.gz")) elif correct == "q": break else: continue ###Output _____no_output_____ ###Markdown Misc visualization ###Code importlib.reload(lvis) save_dir = r"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\Lesion Gallery\Registered Imgs" lesion_id = "BM-34" lvis.draw_reg_seq(lesion_id, target_dir, save_dir) lesions.index('PK-24') P = lm.get_paths_dict(lesion_id, target_dir) importlib.reload(lvis) #save_dir = r"D:\Lipiodol\Results\enh-masks" save_dir = r"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\Lesion Gallery\qEASL output" #lesion_id = "PK-01E" for fn in lesions[13:17]: lesion_id = "BM-34"#basename(fn)#"PK-01B" lvis.draw_mrseq_with_mask(lesion_id, target_dir, save_dir, "mrbl") lvis.draw_mrseq_with_mask(lesion_id, target_dir, save_dir, "mr30") importlib.reload(lvis) #save_dir = r"D:\Lipiodol\Results\reg-seq" save_dir = r"C:\Users\Clinton\Box\FOR CLINTON BOX FOLDER\Lesion Gallery\Registered Imgs" if not exists(save_dir): os.makedirs(save_dir) for fn in lesions[13:17]: lesion_id = "BM-02"#"PK-01B" print(lesion_id) #r"D:\Lipiodol\Results\enh-masks" lvis.draw_reg_seq(lesion_id, target_dir, save_dir) importlib.reload(lvis) for fn in glob.glob(join(target_dir,"*")): lesion_id = basename(fn) try: lvis.draw_sub_and_depo(lesion_id, target_dir, "D:\\Lipiodol\\Results\\subs-ct", include_FU=True) except: print(lesion_id) continue ###Output _____no_output_____ ###Markdown Extra Compose with rotation ###Code mribl_liver_mask_path = join(target_dir, lesion_id, "liver bl final.ids") ct24_liver_mask_path = glob.glob(join(target_dir, lesion_id, "wholeliver_24hCT*.ids"))[0] mribl_ct24_xform, ct24_tumor, mribl_tumor, mribl_ct24_crops = full_reg_masks(ct24_tumor_mask_path, ct24_path, mribl_tumor_mask_path, mribl_art_path) # BL MRI to 24h CT vec_field = lm.reg_masks(ct_tumor_mask_path, blmri_tumor_mask_path) mribl_enh_mask = reg.apply_field(vec_field) ct24_lip = lm.seg_lipiodol() reg.reg_sitk(ct24_lip, vec_field) mri_dcm_paths = [x for x in glob.glob(join(target_dir, '*', 'MRI-BL', '*20s*')) if '.nii' not in x] mri_dcm_paths path_dict = {} lesion_id = '04' for path in glob.glob(join(target_dir, lesion_id, "MRI-BL","*","*")): header = hf.dcm_load_header(path) if len(header) > 0: series_descript = header[0][('0008', '103e')].value path_dict[path] = series_descript series_descript = series_descript.replace("/","-") os.rename(path, join(target_dir,lesion_id, "MRI-BL", series_descript)) for path in glob.glob(join(target_dir, "*", "masks","*final*")): if not exists(path.replace(" final", "")): os.rename(path, path.replace(" final", "")) ###Output _____no_output_____ ###Markdown Colored Dicoms ###Code L = ["BM-07"] for lesion_id in L: # P = lm.get_paths_dict(lesion_id, target_dir) img_paths = [P['mrbl'][ph] for ph in ["art", "ven", "equ"]] save_path = join(r"D:\Multiphase-color", lesion_id, "BL") hf.save_tricolor_dcm(img_paths, save_path) ###Output _____no_output_____
notebooks/Combining Datasets.ipynb
###Markdown Compiling Final DatasetWe've made our table from the two datasets with most of the information. Excluded were species that were not in the taxa information we gathered. Of course there is a lot of missing fields, so we will explore what to keep and what to throw out in this notebook ###Code import os import pickle import numpy as np import pandas as pd base_dir = os.environ['HOME'] + '/python/Mushroom_Classifier/' DF_MO = pickle.load(open(base_dir + 'MO_tables/finished_df_MO.p','rb')) DF_DSA = pickle.load(open(base_dir + 'DSA_info/finished_df_DSA.p', 'rb')) DF = pd.concat([DF_MO, DF_DSA], axis=0) #creating truly unique identifier for each image DF['Unique_ID'] = \ DF['Data_Source'].apply(lambda x: x + '_') \ + DF['id'].apply(lambda x: str(x)) print(DF.shape) display(DF) pickle.dump(DF, open(base_dir + 'combined_dataset_df.p', 'wb')) ###Output (1022772, 16) ###Markdown OK. Let's get some stats. I want the following: 1) How many entries have most taxa fields 2) How many images per class 3) How many entries do we lose with vote_cache > 1.5 ###Code filters = [] fields_to_include = \ ['Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species'] for fields in fields_to_include: filters.append(DF[fields].apply(lambda x: x != '')) filter_complete = pd.concat(filters, axis=1) observations_with_number_of_fields = {} for i in range(7): fields_complete_filter = (filter_complete.sum(axis=1) == i) observations_with_number_of_fields[i] = DF[fields_complete_filter].shape[0] for k,v in observations_with_number_of_fields.items(): print('Number of entries (%i) with %i taxonomy labels' %(v,k)) #how many entries have both genus and species? filters = [] for fields in ['Genus','Species']: filters.append(DF[fields].apply(lambda x: x != '')) filter_complete = pd.concat(filters, axis=1) contains_genus_and_species = sum(filter_complete.sum(axis=1) == 2) contains_only_genus = print('Number of observations that contain genus and species: %i/%i'\ %(contains_genus_and_species, DF.shape[0])) yes_g = np.reshape(filter_complete.values[:,0]==1, (filter_complete.shape[0],1)) no_g = np.reshape(filter_complete.values[:,0]==0, (filter_complete.shape[0],1)) yes_s = np.reshape(filter_complete.values[:,1]==1, (filter_complete.shape[0],1)) no_s = np.reshape(filter_complete.values[:,1]==0, (filter_complete.shape[0],1)) genus_and_no_species = sum(np.sum(np.concatenate([yes_g, no_s], axis=1), axis=1) == 2) no_genus_and_species = sum(np.sum(np.concatenate([no_g, yes_s], axis=1), axis=1) == 2) genus_and_species = sum(np.sum(np.concatenate([yes_g, yes_s], axis=1), axis=1) == 2) neither_genus_or_species = sum(np.sum(np.concatenate([no_g, no_s], axis=1), axis=1) == 2) print('Genus and no species %i' %genus_and_no_species) print('No Genus but species %i' %no_genus_and_species) print('Genus and Species :) %i' %genus_and_species) print('Neither: %i' %neither_genus_or_species) adds_up = neither_genus_or_species + genus_and_species \ + no_genus_and_species + genus_and_no_species print('Does it add up? %i/%i' %(adds_up, DF.shape[0])) ###Output Genus and no species 290559 No Genus but species 0 Genus and Species :) 704783 Neither: 27430 Does it add up? 1022772/1022772 ###Markdown OK, there are no observations that only have species and no genus, so at least it follows some hierarchical logic. I'm going to add those species as 'unknown'. That would be a respectable prediction, if we can get the genus but not the species, and keep 290k images. ###Code DF.loc[DF['Species'] == '', 'Species'] = 'unknown' display(DF) #replace vote_cache which is empty with a 1 for now DF.loc[DF.vote_cache.apply(lambda x: np.isnan(x)), 'vote_cache'] = 1 DF['GS_Dir'] = DF['Genus'] + '_' + DF['Species'] ###Output _____no_output_____ ###Markdown FilteringWe are making the following rules for images that we want to keep. But I also want statistics of how many pictures we're losing: 1) Must have both Genus and Species -> We are losing 27,430 pictures 2) There must be at least 100 pictures per class -> We are losing 204,719 pictures 3) Vote_cache must be greater than 0 -> We are losing 6000 pictures ###Code taxa_fields = ['Phylum','Class','Order','Family','Genus','Species'] #Must have both Genus and Species DF = DF[DF.Genus != ''] #Vote_cache must be greater than 0 DF = DF[DF.vote_cache > 0] images_per_class = {i: sub_df.shape[0] for i, sub_df in DF.groupby('GS_Dir')} images_per_class = sorted(images_per_class.items(), key=lambda x: x[1], reverse=True) species_more_than_100 = [w for w in images_per_class if w[1]>=100] no_observations = sum([w[1] for w in species_more_than_100]) no_classes = len(species_more_than_100) observations_lost = DF.shape[0] - no_observations print('Observations: %i across %i classes' %(no_observations, no_classes)) print('We lost %i observations' %observations_lost) filtered_df = [] #let's actually make a new dataframe for i, sub_df in DF.groupby('GS_Dir'): if sub_df.shape[0] >= 100: filtered_df.append(sub_df) DF = pd.concat(filtered_df, axis=0) display(DF) pickle.dump(DF, open(base_dir + 'combined_dataset_df.p', 'wb')) #adjusting for Unique_IDs for observations with multiple images DF['Unique_ID'] = DF['Unique_ID'] + ['_' + str(w) if w>0 else '' for w in DF.index] pickle.dump(DF, open(base_dir + 'combined_dataset_df.p', 'wb')) ###Output _____no_output_____
notebooks/API Request Evaluation.ipynb
###Markdown API Data SchemaGeoff Pidcock | 20190613 ScopeUse the Python Requests library to extract information about event listings, and determine an appropriate schema. TODO- Build out the query string for Sydney events- Double check Eventbrite schema against example schema. ###Code # See environment.yml for setup instructions import sys import os import pandas as pd import json import requests # Get API keys from the following places: ## Eventbrite: https://www.eventbrite.com.au/account-settings/apps ## Meetup: https://secure.meetup.com/meetup_api/key/ # This has been stored in a creds python file above the parent directory print(os.getcwd()) os.chdir('..//..//') print(os.getcwd()) from creds import meetup_api_key, eventbrite_api_key_public ## Alternative - specify keys manually here: # meetup_api_key = 'YOUR KEY' # https://secure.meetup.com/meetup_api/key/ # eventbrite_api_key = 'YOUR KEY' # https://www.eventbrite.com/support/articles/en_US/How_To/how-to-locate-your-eventbrite-api-user-key?lg=en_US ###Output _____no_output_____ ###Markdown Pull Data from Meetup API[Docs](https://www.meetup.com/meetup_api/docs/) ###Code # https://www.meetup.com/meetup_api/docs/find/upcoming_events/ # Todo - look into topic categories for smarter filtering - https://www.meetup.com/meetup_api/docs/find/topic_categories/ payload = dict( text = 'Data', lat = '-33.87', lon = '151.21', radius = '100', page = '100', order = 'time', fields = 'featured_photo,key_photo,key_photo,meta_category,group_category', key = meetup_api_key ) attempt = requests.get("https://api.meetup.com/find/upcoming_events",params=payload) data = attempt.json() len(data['events']) data['city'] i = 0 for event in data['events']: print("index: {} | name: {}".format(i,event['name'])) i += 1 data['events'][52] # 48, 52 ###Output _____no_output_____ ###Markdown Meetup Findings- Not sure if list is comprehensive - might make sense to assemble a list of data related groups, and then iterate through each - Will need to process datetimes from epoc to calendar before writing to database ###Code # DB Schema - Postgres 9.3+ sql = """ CREATE TABLE IF NOT EXISTS raw_events( event_id SERIAL PRIMARY KEY, source_platform_id CHAR VARYING(20) source_platform CHAR VARYING(50), event_city CHAR VARYING(50), event_date_local DATE, event_time_local TIME, event_name TEXT, event_organiser TEXT, event_location_name TEXT, event_location_address TEXT, event_lat NUMERIC(20,16), event_lon NUMERIC(20,16), registration_link TEXT, description TEXT, event_or_group_photo Text, event_category CHAR VARYING(100), event_category CHAR VARYING(100) ) """ # Response Schema based on Meetup [ { 'event_id': '1', 'source_platform_id': '261218488', 'source_platform': 'Meetup', 'event_city': 'Sydney', 'event_date': '2019-06-14', 'event_time': '07:30', 'event_name': 'GA & Data Science Breakfast Meetup presents: The Rise of Automation', 'event_organiser': 'Data Science Breakfast Meetup', 'event_location_name': 'GA Sydney (Main Campus)', 'event_location_address': 'The Podium Building, 1 Market St Sydney AU', 'event_lat': '-33.869998931884766', 'event_lon': '151.20460510253906', 'registration_link': 'https://www.meetup.com/The-Sydney-Data-Science-Breakfast-Meetup-Group/events/261218488/', 'description': '<p>Note: this is a partnered event with General Assembly. Please make sure to register using the General Assembly event page, linked here: <a href="https://ga.co/2PPoZWd" class="linkified">https://ga.co/2PPoZWd</a></p> <p>****<br/>Abstract:<br/>There has been a lot of attention in the media surrounding the rise of automation. As advanced technologies such as robotic process automation, machine learning, and artificial intelligence have matured, companies have found practical applications to these new technologies.</p> <p>As part of this evolution, Intelligent Automation has become a topic of interest for business leaders across industries looking to combine cognitive capabilities with robotic process technologies to create a "living" system that can go beyond mundane and repeatable tasks. This is extremely advantageous to any organization that can implement these systems seamlessly but there is still a delta between the ideation of AI integration. and the ability to put these plans into effect. This delta can instill fear and anxiety around using AI as well as the added question of the ethical implications of AI such as Facial Recognition and targeted advertising.</p> <p>Join General Assembly and the Data Science Breakfast Meetup as we present a panel of experts at the forefront of AI incorporation for an engaging conversation that will touch on.</p> <p>*****<br/>Agenda:<br/>07:30 - arrival and networking<br/>07:50 - panel kick off<br/>08:30 - panel Q&amp;A<br/>09:00 - more networking (and anyone who needs to head off can leave)<br/>09:30 - close</p> <p>*****<br/>Speaker BIO\'s:</p> <p>Anthony Tockar (Moderator):<br/>Anthony Tockar is director and cofounder at Verge Labs, a new type of AI company focused on the applied side of machine learning. A jack-of-all-trades, he has worked on problems across insurance, technology, telecommunications, loyalty, sports betting and even neuroscience. He qualified as an actuary, then moved into data science, completing an MS in Analytics at the prestigious Northwestern University.</p> <p>After hitting the headlines with his posts on data privacy at Neustar, he returned to Sydney to practice as a data scientist and cofounded the Minerva Collective, a not-for-profit focused on using data for social good, as well as multiple meetup groups. His key missions are to extend the reach and impact of data science to help people, and to assist Australian businesses to become more data driven.</p> <p>Sam Zheng (Panelist):<br/>Sam is Co-founder/CEO of Curious Thing - a voice-based AI interviewer for talent acquisition startup. Sam is a tech entrepreneur, self-taught engineer, and qualified actuary. Before Curious Thing, Sam was Co-founder/CTO of Hyper Anna, an AI for business analytics startup.</p> <p>Dima Galat (Panelist):<br/>Dima learned to program in Assembly on an i486, back when disk sizes were measured in megabytes. He always saw programming as a tool for facilitating communication between disparate data sources and end users.</p> <p>After his first encounter with data mining a decade ago, he became obsessed with applied machine learning, which supercharges this communication process. He has a background in computer vision productisation, data engineering, and a variety of analytics projects for clients ranging from financial institutions to United Nations.</p> <p>Usman Shahbaz (panelist):<br/>Usman is an experienced leader with more than 14 years of rich experience in applying product, network, risk-assurance and consumer analytics to drive actionable business outcomes. His core specialties include Advanced Analytics, Machine &amp; Deep Learning, Statistical Modelling and Optimisation. Usman is currently enrolled for a PhD in Machine Learning. He also holds an MBA and a Bachelor’s degree in Electrical Engineering.</p> <p>Passiona Cottee (panelist):<br/>Data Scientist, Commonwealth Bank, Co-lecturer at UTS</p> ', 'event_or_group_photo': 'https://secure.meetupstatic.com/photos/event/c/6/6/5/highres_481070789.jpeg', 'event_format': 'panel', 'event_category': 'automation' } ] ###Output _____no_output_____ ###Markdown Pull Data from EventbriteInvolves setting up an app and getting the app's api public key.*References:*- [EventBrite API Docs](https://www.eventbrite.com/platform/api) ###Code url = "https://www.eventbriteapi.com/v3/events/search/" querystring = {"token":eventbrite_api_key_public} payload = "" headers = { 'cache-control': "no-cache", } response = requests.request("GET", url, data=payload, headers=headers, params=querystring) response.status_code data = response.json() print(len(data['events'])) # Pagination will likely be needed print(data['events'][0]) # might make sense to pretty print this, and sort out schema ###Output {'name': {'text': 'Big Tigger Hosts Concert Weekend Kickoff at Suite Lounge - RSVP NOW', 'html': 'Big Tigger Hosts Concert Weekend Kickoff at Suite Lounge - RSVP NOW'}, 'description': {'text': '\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\xa0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nSUITE LIFE FRIDAYS\r\nHOSTED BY\r\nBIG TIGGER\r\nFREE TIL 12AM W/ RSVP\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\xa0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n©2019 Furious | Peachtree Road, Atlanta Ga\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n', 'html': '<TABLE ID="container" CLASS="container logoless" STYLE="color: #000000; font-family: Times; table-layout: fixed; border-collapse: collapse; border-spacing: 0px; width: 640px;">\r\n<TBODY>\r\n<TR>\r\n<TD ID="" STYLE="padding-top: 20px;">\r\n<TABLE ID="outer_wrapper" STYLE="table-layout: fixed; border-collapse: collapse; border-spacing: 0px; border-radius: 4px; background-color: #000000; width: 590px;">\r\n<TBODY>\r\n<TR>\r\n<TD ID="" STYLE="padding: 0px;">\r\n<DIV CLASS="borderless" STYLE="border-radius: 4px;">\r\n<DIV ID="header_inner" STYLE="border-style: none; width: 590px;"><IMG STYLE="border-style: none; display: block; border-radius: 3px 3px 0px 0px; width: 590px;" ALT="FuriousLogo copy" SRC="https://cascade.madmimi.com/promotion_images/1197/8294/original/FuriousLogo_copy.png?1473957906" WIDTH="590"></DIV>\r\n<TABLE ID="" CLASS="full spacer spacer-top" STYLE="border-style: none; clear: both; table-layout: fixed; border-collapse: collapse; border-spacing: 0px; width: 590px; min-width: 100%;">\r\n<TBODY>\r\n<TR>\r\n<TD ID="" STYLE="margin: 0px auto; height: 30px; line-height: 30px; padding: 0px 30px;">\xa0</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n<TABLE ID="" CLASS="full text table-1 middle" STYLE="border-style: none; width: 590px; clear: both; table-layout: fixed; border-collapse: collapse; border-spacing: 0px; min-width: 100%;">\r\n<TBODY>\r\n<TR>\r\n<TD ID="" STYLE="padding: 0px 30px 20px;">\r\n<DIV CLASS="module text" DIR="ltr">\r\n<P STYLE="font-family: sans-serif; font-size: 12px; line-height: 1.5em; vertical-align: baseline; color: #3a352a; text-align: center; unicode-bidi: embed; margin: 0px 0px 1.3em; padding: 0px;"><SPAN STYLE="font-variant-numeric: normal; font-variant-east-asian: normal; font-weight: bold; font-stretch: normal; font-size: 40px; line-height: normal; font-family: Arial; color: gold;">SUITE LIFE FRIDAYS</SPAN></P>\r\n<P STYLE="font-family: sans-serif; font-size: 12px; line-height: 1.5em; vertical-align: baseline; color: #3a352a; text-align: center; unicode-bidi: embed; margin: 0px 0px 1.3em; padding: 0px;"><SPAN STYLE="font-variant-numeric: normal; font-variant-east-asian: normal; font-weight: bold; font-stretch: normal; font-size: 40px; line-height: normal; font-family: Arial; color: white;">HOSTED BY</SPAN></P>\r\n<P STYLE="font-family: sans-serif; font-size: 12px; line-height: 1.5em; vertical-align: baseline; color: #3a352a; text-align: center; unicode-bidi: embed; margin: 0px 0px 1.3em; padding: 0px;"><SPAN STYLE="font-variant-numeric: normal; font-variant-east-asian: normal; font-weight: bold; font-stretch: normal; font-size: 70px; line-height: normal; font-family: Arial; color: gold;">BIG TIGGER</SPAN></P>\r\n<P STYLE="font-family: sans-serif; font-size: 12px; line-height: 1.5em; vertical-align: baseline; color: #3a352a; text-align: center; unicode-bidi: embed; margin: 0px 0px 1.3em; padding: 0px;"><SPAN STYLE="font-variant-numeric: normal; font-variant-east-asian: normal; font-weight: bold; font-stretch: normal; font-size: 40px; line-height: normal; font-family: Arial; color: white;">FREE TIL 12AM W/ RSVP</SPAN></P>\r\n</DIV>\r\n</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n<TABLE ID="" CLASS="full image table-2 middle" STYLE="border-style: none; width: 590px; clear: both; table-layout: fixed; border-collapse: collapse; border-spacing: 0px; min-width: 100%;">\r\n<TBODY>\r\n<TR>\r\n<TD ID="" STYLE="padding: 0px 30px;">\r\n<DIV CLASS="module large-image-container image" DIR="ltr"><BR></DIV>\r\n</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n<TABLE ID="" CLASS="full divider table-3 middle" STYLE="border-style: none; width: 590px; clear: both; table-layout: fixed; border-collapse: collapse; border-spacing: 0px; min-width: 100%;">\r\n<TBODY>\r\n<TR>\r\n<TD ID="" STYLE="padding: 0px 30px 20px;">\r\n<DIV CLASS="module divider" DIR="ltr">\r\n<DIV CLASS="divider-container module-3"><IMG STYLE="width: 530px; height: auto !important;" ALT="***" SRC="https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png" WIDTH="530"></DIV>\r\n</DIV>\r\n</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n<TABLE ID="" CLASS="full image table-4 last" STYLE="border-style: none; width: 590px; clear: both; table-layout: fixed; border-collapse: collapse; border-spacing: 0px; min-width: 100%;">\r\n<TBODY>\r\n<TR>\r\n<TD ID="" STYLE="padding: 0px 30px;">\r\n<DIV CLASS="module large-image-container image" DIR="ltr">\r\n<TABLE CLASS="module-4" STYLE="width: 530px; float: none; margin-left: auto; margin-right: auto; padding: 0px; table-layout: fixed; border-collapse: collapse; border-spacing: 0px;">\r\n<TBODY>\r\n<TR>\r\n<TD CLASS="single" STYLE="padding: 0px 0px 18px; width: 530px;">\r\n<DIV CLASS="image-container" STYLE="text-align: center;"><IMG STYLE="border-style: none; width: 530px; padding: 0px; float: none; height: auto;" ALT="img341673761" SRC="https://cascade.madmimi.com/promotion_images/8096/2086/original/img341673761.jpg?1550610925" WIDTH="530"></DIV>\r\n</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n</DIV>\r\n</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n<TABLE ID="" CLASS="full spacer spacer-bottom" STYLE="border-style: none; width: 590px; clear: both; table-layout: fixed; border-collapse: collapse; border-spacing: 0px; min-width: 100%;">\r\n<TBODY>\r\n<TR>\r\n<TD ID="" STYLE="margin: 0px auto; padding: 0px 10px; height: 10px; line-height: 10px;">\xa0</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n<TABLE ID="footer_wrapper" CLASS="full" STYLE="border-style: none; width: 590px; clear: both; table-layout: fixed; border-collapse: collapse; border-spacing: 0px; border-radius: 0px 0px 3px 3px; min-width: 100%;" BORDER="0">\r\n<TBODY>\r\n<TR>\r\n<TD CLASS="footer" STYLE="color: #999999; font-family: sans-serif; font-size: 11px; padding: 15px 30px; width: 530px; border-radius: 0px 0px 3px 3px; text-align: center; background-color: #ffd700;">\r\n<P STYLE="font-family: sans-serif; font-size: 11px; unicode-bidi: embed; margin: 10px 0px;">©2019 Furious | Peachtree Road, Atlanta Ga</P>\r\n<P><BR></P>\r\n</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n</DIV>\r\n</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>\r\n</TD>\r\n</TR>\r\n</TBODY>\r\n</TABLE>'}, 'id': '53667810867', 'url': 'https://www.eventbrite.com/e/big-tigger-hosts-concert-weekend-kickoff-at-suite-lounge-rsvp-now-tickets-53667810867?aff=ebapi', 'vanity_url': 'https://richthekidatopera.eventbrite.com', 'start': {'timezone': 'America/New_York', 'local': '2019-06-14T22:00:00', 'utc': '2019-06-15T02:00:00Z'}, 'end': {'timezone': 'America/New_York', 'local': '2019-06-15T03:00:00', 'utc': '2019-06-15T07:00:00Z'}, 'organization_id': '25399112885', 'created': '2018-12-13T01:07:44Z', 'changed': '2019-06-13T00:59:09Z', 'published': '2018-12-13T01:07:47Z', 'capacity': None, 'capacity_is_custom': None, 'status': 'live', 'currency': 'USD', 'listed': True, 'shareable': True, 'online_event': False, 'tx_time_limit': 480, 'hide_start_date': False, 'hide_end_date': False, 'locale': 'en_US', 'is_locked': False, 'privacy_setting': 'unlocked', 'is_series': False, 'is_series_parent': False, 'inventory_type': 'limited', 'is_reserved_seating': False, 'show_pick_a_seat': False, 'show_seatmap_thumbnail': False, 'show_colors_in_seatmap_thumbnail': False, 'source': 'create_2.0', 'is_free': False, 'version': '3.0.0', 'summary': '\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\xa0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nSUITE LIFE FRIDAYS\r\nHOSTED BY\r\nBIG TIGGER\r\nFREE TIL 12AM W/ RSVP\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r', 'logo_id': '62417653', 'organizer_id': '1964406257', 'venue_id': '33064986', 'category_id': '103', 'subcategory_id': '3008', 'format_id': '11', 'resource_uri': 'https://www.eventbriteapi.com/v3/events/53667810867/', 'is_externally_ticketed': False, 'logo': {'crop_mask': {'top_left': {'x': 62, 'y': 0}, 'width': 872, 'height': 436}, 'original': {'url': 'https://img.evbuc.com/https%3A%2F%2Fcdn.evbuc.com%2Fimages%2F62417653%2F25399112885%2F1%2Foriginal.20190516-184347?auto=compress&s=c0d04175ab0b388420360b4e041c04f5', 'width': 960, 'height': 960}, 'id': '62417653', 'url': 'https://img.evbuc.com/https%3A%2F%2Fcdn.evbuc.com%2Fimages%2F62417653%2F25399112885%2F1%2Foriginal.20190516-184347?h=200&w=450&auto=compress&rect=62%2C0%2C872%2C436&s=7197f8ac82d70285ee7455826c5ba566', 'aspect_ratio': '2', 'edge_color': '#1e164c', 'edge_color_set': True}}
Obsolete Py27/Module3/.ipynb_checkpoints/Module3 - Lab2-checkpoint.ipynb
###Markdown DAT210x - Programming with Python for DS Module3 - Lab2 ###Code import pandas as pd import matplotlib.pyplot as plt import matplotlib # Look pretty... # matplotlib.style.use('ggplot') plt.style.use('ggplot') ###Output _____no_output_____ ###Markdown Load up the wheat seeds dataset into a dataframe. We've stored a copy in the Datasets directory. ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Create a 2d scatter plot that graphs the `area` and `perimeter` features: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Create a 2d scatter plot that graphs the `groove` and `asymmetry` features: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown Create a 2d scatter plot that graphs the `compactness` and `width` features: ###Code # .. your code here .. ###Output _____no_output_____ ###Markdown BONUSAfter completing the above, go ahead and run your program Check out the results, and see what happens when you add in the optional display parameter marker with values of either `'^'`, `'.'`, or `'o'`: ###Code # .. your code here .. # Display the graphs: plt.show() ###Output _____no_output_____
numpy/Lecture[07]-changingShapeAndSplitting.ipynb
###Markdown Splitting ###Code x = np.arange(10) x np.split(x, 2) np.split(x, 3) np.split(x, [6,9]) a = np.array((["Rajasthan", "Assam", "Goa", "Mumbai", "Delhi", "Tamil Nadu"], ["Jaipur", "Himachal Pradesh", "Panaji", "Mahrastra", "Delhi", "Bengluru"])) a np.hsplit(a, 3) p_1, p_2 = np.vsplit(a, 2) print(p_1) print(p_2) b1, b2, b3, b4, b5, b6 = a.T ###Output _____no_output_____
MA477 - Theory and Applications of Data Science/Homework/Instructor Solutions/Homework 3/Homework 3 - Matplotlib & Seaborn_Instructor Solution.ipynb
###Markdown ====================================================== MA477 - Theory and Applications of Data Science Homework 3: Matplotlib & Seaborn Dr. Valmir Bucaj United States Military Academy, West Point, AY20-2======================================================= Weight: 50pts Cadet Name:Date: $\dots \dots$ MY DOCUMENTATION IDENTIFIES ALL SOURCES USED AND ASSISTANCE RECEIVED IN THIS ASSIGNMENT$\dots \dots$ I DID NOT USE ANY SOURCES OR ASSISTANCE REQUIRING DOCUMENATION IN COMPLETING THIS ASSIGNMENT Signature/Initials: Complete the following tasks: Import the following libaraires: `matplotlib.pyplot, seaborn, pandas, numpy` ###Code import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np ###Output _____no_output_____ ###Markdown Recreate the following plot as closely as you can10pts ###Code #Enter your code here #Doon't run this cell unless you have recreated it, as the plot below will dissapear fig=plt.figure(figsize=(8,6)) x=np.linspace(-4,4,50) axes1=fig.add_axes([0.1,0.1,0.9,0.9]) axes2=fig.add_axes([0.43,0.2,0.25,0.4]) axes1.plot(x,3*np.exp(-0.25*x**2),'r-.',label='$3e^{-0.25x^2}$',lw=3) axes1.plot(x,2.8*np.exp(-0.15*(x-0.1)**2), marker='o',markeredgecolor='black',markerfacecolor='y', markersize=10,mew=2,label='$2.8e^{-0.15(x-0.1)^2}$') axes1.set_title('Many Plots',fontsize=22) axes1.set_xlabel("That took a while",fontsize=18) #Setting the Legend for the larger figure axes1.legend(loc='upper right') #Smaller Plot text={'Cool':(-1.7,2),'Plot':(-0.3,0),'Bro':(1.1,-2)} color=['red','blue','green'] #Setting the min and max values for the x and y axes axes2.set_xlim(-2,2) axes2.set_ylim(-3,3) #Plotting the words contained in text for item,coord,color in zip(text.keys(),text.values(),color): axes2.text(coord[0],coord[1],item,color=color,fontdict={'size':16}) #Setting title for the small plot axes2.set_title('Small Plot') plt.show() ###Output _____no_output_____ ###Markdown For the rest of the exercises we will be using the `Airbnb` dataset contained in this folder. Read in the dataset and save it as `abnb` ###Code #Enter code here abnb=pd.read_excel('Airbnb.xlsx') ###Output _____no_output_____ ###Markdown Check out the head of the data: ###Code #Enter code here #Don't run this cell unless you are happy with your answer above abnb.head() ###Output _____no_output_____ ###Markdown Recreate the following `jointplot` 5pts ###Code #Enter code here #Don't run this cell unless you are happy with your answer sns.jointplot(x='number_of_reviews',y='price',data=abnb,kind='kde') ###Output _____no_output_____ ###Markdown Recreate the following `boxplots` 5pts ###Code #Enter code here #Don't run this cell unless you are happy with your answer plt.figure(figsize=(12,6)) sns.boxplot(x='neighbourhood_group',y='price',data=abnb) plt.xlabel("Neighbourhood Group",fontsize=14) plt.ylabel("Price",fontsize=14) ###Output _____no_output_____ ###Markdown 10pts ###Code #Don't run this cell unless you are happy with your answer plt.figure(figsize=(12,8)) sns.boxplot(x='neighbourhood_group',y='number_of_reviews',data=abnb,hue='room_type') plt.xlabel("Neighbourhood Group",fontsize=14) plt.ylabel("Number of Reviews",fontsize=14) ###Output _____no_output_____ ###Markdown Recreate the following `violinplot` comparing the distribution of ONLY `Entire home/apt` and `Private room` for all five `neighbourhood groups`10pts ###Code #Ener Code Here abnb1=abnb[(abnb['room_type']=='Private room')|(abnb['room_type']=='Entire home/apt')] abnb1.head() #Don't run this cell unless you are happy with your answer plt.figure(figsize=(12,8)) sns.violinplot(x='neighbourhood_group',y='price',data=abnb1, hue='room_type',split=True) plt.xlabel("Neighbourhood Group",fontsize=16) plt.ylabel("Price",fontsize=16) ###Output _____no_output_____ ###Markdown Challenging!!!Time Series: Recreate the following plot10pts(Hint: Convert the column `last_review` to `DateTime` format and reset it as the index of the dataframe) ###Code abnb['last_review']=pd.to_datetime(abnb['last_review']) type(abnb['last_review'][1]) abnb.head() abnb.set_index('last_review',inplace=True) #Enter answer here abnb.head() #Don't erase this cell unless you are happy with your answer plt.figure(figsize=(14,6)) plt.plot(abnb['2016-10':'2019-02']['number_of_reviews'].sort_index(), label='Number of Reviews') plt.plot(abnb['2016-10':'2019-02']['price'].sort_index(),label='Price') plt.title("Fluctuations in Number of Reviews and Price over time",fontsize=18) plt.xlabel("last_review",fontsize=14) plt.legend() plt.show() ###Output _____no_output_____
08_passagens_aereas_apf.ipynb
###Markdown Análise de Gastos com Passagens Aéreas (SCDP) Informações* Origem dos dados```Portal da Transparência (http://transparencia.gov.br/download-de-dados/viagens)```* Dicionário de dados```http://transparencia.gov.br/pagina-interna/603364-dicion%C3%A1rio-de-dados-viagens-a-Servi%C3%A7o-Pagamentos```* 4 arquivos: * 2018_Pagamento.csv * 2018_Passagem.csv * 2018_Trecho.csv * 2018_Viagem.csv Perguntas a serem respondidas?* Qual o trecho mais executado?* Qual o valor médio para o trecho mais executado?* Qual a mediana para o trecho mais executado?* Identificar possíveis outliers nos valores do trecho mais executado ###Code import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.ticker as ticker import seaborn as sns pd.set_option('display.max_columns', 500) pd.set_option('display.max_colwidth', 1000) pd.set_option('display.float_format', lambda x: '{:.2f}'.format(x)) plt.rcParams['figure.dpi'] = 90 ###Output _____no_output_____ ###Markdown Leitura dos Dados--- ###Code """ Leia o arquivo 'dados/c04_passagens/2018_Passagem.csv.zip' """ df_passagens = pd.read_csv('dados/c04_passagens/2018_Passagem.csv.zip', encoding='latin1', sep=';') ###Output _____no_output_____ ###Markdown Identificação de valores nulos ou faltantes--- ###Code df_passagens.info() df_passagens.head() """ Vamos verificar algumas estatísticas básicas sobre colunas numéricas. """ df_passagens.describe() """ Vamos verificar algumas estatísticas básicas sobre as demais colunas. """ df_passagens.describe(include='object') """ Tratamento em todas as colunas para colocar o valor None quando o valor for 'Não informado' ou NaN """ for c in df_passagens.columns: df_passagens[c] = df_passagens[c].apply(lambda x: x if x != 'Sem Informação' and pd.notnull(x) else None) df_passagens.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 310816 entries, 0 to 310815 Data columns (total 16 columns): Identificador do processo de viagem 310816 non-null int64 Meio de transporte 310816 non-null object País - Origem ida 310816 non-null object UF - Origem ida 303560 non-null object Cidade - Origem ida 310816 non-null object País - Destino ida 310816 non-null object UF - Destino ida 301096 non-null object Cidade - Destino ida 310816 non-null object País - Origem volta 23485 non-null object UF - Origem volta 20492 non-null object Cidade - Origem volta 23485 non-null object Pais - Destino volta 23485 non-null object UF - Destino volta 22903 non-null object Cidade - Destino volta 23485 non-null object Valor da passagem 310816 non-null object Taxa de serviço 310816 non-null object dtypes: int64(1), object(15) memory usage: 37.9+ MB ###Markdown Conversão das colunas para os tipos de dados corretos e padronização campos--- ###Code """ Verificamos valores nulos e tipos de dados com o método info() do dataframe. """ df_passagens.info() """ A conversão pode ser feita utilizando o método apply da série que deseja converter. """ def converter_numero(valor): try: return float(valor.replace('.', '').replace(',', '.')) except: return np.nan df_passagens_1 = df_passagens.copy() df_passagens_1['Valor da passagem'] = ___ df_passagens_1['Taxa de serviço'] = ___ df_passagens_1[['Valor da passagem', 'Taxa de serviço']].head() df_passagens_1.info() """ Padronização das strings é interessante para que a ausência ou a presença de acentos não interfira em agrupamentos. """ from libs.texto import TratamentoTexto def tratar_texto(valor): v = valor # somente realiza o tratamento se o texto não for nulo if v: # remove acentuação v = TratamentoTexto.remover_acentuacao(v) # converte para letras maiúsculas v = v.upper() return v colunas = ['Meio de transporte', 'País - Origem ida', 'UF - Origem ida', 'Cidade - Origem ida', 'País - Destino ida', 'UF - Destino ida', 'Cidade - Destino ida', 'País - Origem volta', 'UF - Origem volta', 'Cidade - Origem volta', 'Pais - Destino volta', 'UF - Destino volta', 'Cidade - Destino volta'] # padronize as colunas da lista acima no dataframe df_passagens_1 ___ df_passagens_1.head() ###Output _____no_output_____ ###Markdown Você já está pensando em responder as questões?Não é uma boa idéia na maioria dos casos. ###Code colunas_gb = ['País - Origem ida', 'UF - Origem ida', 'Cidade - Origem ida', 'País - Destino ida', 'UF - Destino ida', 'Cidade - Destino ida'] colunas_selecao = ['País - Origem ida', 'UF - Origem ida', 'Cidade - Origem ida', 'País - Destino ida', 'UF - Destino ida', 'Cidade - Destino ida', 'Valor da passagem'] df_passagens_1[colunas_selecao].groupby(colunas_gb, as_index=False).agg(['count', 'mean', 'median']).sort_values(('Valor da passagem', 'count'), ascending=False).head(10) ###Output _____no_output_____ ###Markdown Entendimento e organização dos dadosVamos olhar as variáveis de interesse separadamente. Identificador do processo de viagem ###Code plt.rcParams['figure.figsize'] = (16.5,6) df_passagens_1['Identificador do processo de viagem'].value_counts().value_counts().plot.bar() plt.xlabel('Quantidade de Trechos por ID de Viagem') plt.ylabel('Quantidade de Registros') plt.yscale('log') plt.grid(True) plt.show() ''' Exemplo de uma viagem com 3 trechos ''' df_passagens_1[ df_passagens_1['Identificador do processo de viagem'] == 14756123 ] ''' Exemplo de uma viagem com 5 trechos ''' df_passagens_1[ df_passagens_1['Identificador do processo de viagem'] == 14825056 ] ''' Exemplo de uma viagem com 14 trechos ''' df_passagens_1[ df_passagens_1['Identificador do processo de viagem'] == 15190575 ] df_passagens_1.describe(include='all') """ Vamos criar uma coluna que identifica se o trecho faz parte de uma viagem internacional ou não """ # vamos marcar trechos que são de origem ou destino internacional df_passagens_1['Trecho Internacional'] = \ (df_passagens_1['País - Origem ida'] != 'BRASIL') | (df_passagens_1['País - Destino ida'] != 'BRASIL') \ | ( df_passagens_1['País - Origem volta'].notnull() & ((df_passagens_1['País - Origem volta'] != 'BRASIL') | (df_passagens_1['Pais - Destino volta'] != 'BRASIL'))) \ df_passagens_1[df_passagens_1['Trecho Internacional']].head() # quais viagens possuem ao menos um trecho internacional df_viagens_internacionais = \ df_passagens_1[['Identificador do processo de viagem','Trecho Internacional']]\ .groupby('Identificador do processo de viagem', as_index=False)\ .max() df_viagens_internacionais.head() # quais processos de viagem possuem trechos internacionais df_viagens_internacionais = df_viagens_internacionais.rename(columns={'Trecho Internacional': 'Viagem Internacional'}) df_viagens_internacionais.head() df_passagens_2 = pd.merge(df_passagens_1, df_viagens_internacionais, how='left', left_on='Identificador do processo de viagem', right_on='Identificador do processo de viagem') df_passagens_2.head() df_passagens_2[df_passagens_2['Identificador do processo de viagem'] == 14342418] df_passagens_g = df_passagens_2[df_passagens_2['Valor da passagem'] > 0.001] plt.rcParams['figure.figsize'] = 16,10 ax = sns.boxplot(y=df_passagens_g['Valor da passagem'], x=df_passagens_g['Trecho Internacional'].apply(lambda x: 'Internacional' if x else 'Nacional')) ax.set_yscale('log') ax.plot(x=[-1000, 1000], y=[2000,2000], color='red', linewidth=2, markersize=12) plt.grid(True) plt.show() ###Output _____no_output_____ ###Markdown Observações* Número de trechos interfere no valor das passagens?* Trechos domésticos e viagens internacionais possuem valor superior aos dos trechos domésticos em viagens nacionais?* Trechos com valores muito baixos. Como isso é possível? Meio de transporte--- ###Code df_passagens_1['Meio de transporte'].value_counts().to_frame() plt.rcParams["figure.figsize"] = 16, 4 ax = df_passagens_1['Meio de transporte'].value_counts().plot.barh() ax.set_xscale('log') plt.grid(True) plt.show() plt.rcParams['figure.figsize'] = 16,7 plt.rcParams['figure.dpi'] = 90 df_passagens_g = df_passagens_2[df_passagens_2['Valor da passagem']>0] sns.boxplot(df_passagens_g['Meio de transporte'], df_passagens_g['Valor da passagem'], order=df_passagens_g['Meio de transporte'].drop_duplicates().sort_values()) plt.yscale('log') plt.ylim = [0, 150000] plt.grid(True) plt.show() ###Output _____no_output_____ ###Markdown Observações* Trechos aéreos acompanhados de outras modalidades possuem valor diferenciado?* Foco nos trechos aéreos Taxa de Serviço--- ###Code """ Muitos registros zerados. """ df_passagens_1['Taxa de serviço'].value_counts().head() df_passagens_1['Taxa de serviço'].describe().to_frame() df_passagens_g = df_passagens_2[df_passagens_2['Taxa de serviço']>0] plt.rcParams['figure.figsize'] = 16,8 sns.boxplot(x='Meio de transporte', y='Taxa de serviço', data=df_passagens_g, order=df_passagens_g['Meio de transporte'].drop_duplicates().sort_values()) plt.yscale('log') plt.grid(True) plt.show() ###Output _____no_output_____ ###Markdown Observações* Campo com poucas observações relevantes.* Talvez seja interessante investigar os outliers com valores altos.* Para o propósito levantado inicialmente, este campo não é necessário. Origem, Destino, Ida e Volta ###Code """ Ocorrência da mesma variável em mais de uma coluna no dataset. """ df_passagens_2.head() """ Vamos organizar. """ df_passagens_3 = df_passagens_2.copy() df_passagens_3[df_passagens_3['País - Origem volta'].notnull()].shape """ Vamos quebrar as linhas que possuem os trachos de ida e volta. """ df_passagens_3['Tipo Trecho'] = 'IDA' df_passagens_3['Tipo Compra'] = 'SEPARADA' voltas = [] for idx, df in df_passagens_3[df_passagens_3['País - Origem volta'].notnull()].iterrows(): valor_passagem = df_passagens_3.at[idx, 'Valor da passagem'] / 2 df_passagens_3.at[idx, 'Valor da passagem'] = valor_passagem df_passagens_3.at[idx, 'Tipo Compra'] = 'CONJUNTA' for ic in range(8,14): df_passagens_3.iat[idx, ic] = None df['Tipo Trecho'] = 'VOLTA' df['Tipo Compra'] = 'CONJUNTA' df['Valor da passagem'] = valor_passagem df['País - Origem ida'] = df['País - Origem volta'] df['UF - Origem ida'] = df['UF - Origem volta'] df['Cidade - Origem ida'] = df['Cidade - Origem volta'] df['País - Destino ida'] = df['Pais - Destino volta'] df['UF - Destino ida'] = df['UF - Destino volta'] df['Cidade - Destino ida'] = df['Cidade - Destino volta'] df['País - Origem volta'] = None df['UF - Origem volta'] = None df['Cidade - Origem volta'] = None df['Pais - Destino volta'] = None df['UF - Destino volta'] = None df['Cidade - Destino volta'] = None voltas.append(df) df_voltas = pd.concat(voltas, ignore_index=False, axis=1).T df_voltas.head() df_passagens_4 = pd.concat([df_passagens_3, df_voltas]) df_passagens_4.head() df_passagens_4 = df_passagens_4.sort_values(['Identificador do processo de viagem', 'Tipo Trecho']).reset_index(drop=True) df_passagens_4.loc[10:11] df_passagens_5 = df_passagens_4.drop(['País - Origem volta', 'UF - Origem volta', 'Cidade - Origem volta', 'Pais - Destino volta', 'UF - Destino volta', 'Cidade - Destino volta'], axis=1) df_passagens_5.head() """ Vamos utilizar apenas a sigla do estado. """ df_ufs = pd.read_csv('./dados/lista_ufs.csv', sep=';', encoding='latin1') df_ufs['UF'] = df_ufs['UF'].apply(tratar_texto) df_ufs.head() df_passagens_6 = pd.merge(df_passagens_5, df_ufs, left_on='UF - Origem ida', right_on='UF', how='left') df_passagens_6.head() df_passagens_6 = pd.merge(df_passagens_6, df_ufs, left_on='UF - Destino ida', right_on='UF', how='left') df_passagens_6.head() """ Vamos concatenar Cidade e UF em uma única coluna """ df_passagens_7 = df_passagens_6.copy() df_passagens_7['Origem'] = df_passagens_7['Cidade - Origem ida'] + '-' + df_passagens_7['SG_UF_x'].apply(lambda x: x if pd.notnull(x) else '') df_passagens_7['Destino'] = df_passagens_7['Cidade - Destino ida'] + '-' + df_passagens_7['SG_UF_y'].apply(lambda x: x if pd.notnull(x) else '') df_passagens_7.head() """ Selecionamos apenas as colunas desejadas e renomeamos elas """ df_passagens_7 = df_passagens_7[['Identificador do processo de viagem', 'Origem', 'Destino', 'Valor da passagem', 'Taxa de serviço', 'Trecho Internacional', 'Viagem Internacional', 'Tipo Trecho', 'Tipo Compra', 'Meio de transporte']] df_passagens_7.columns = ['ID_VIAGEM', 'ORIGEM', 'DESTINO', 'VALOR', 'TAXA', 'TRECHO_INT', 'VIAGEM_INT', 'TIPO_TRECHO', 'TIPO_COMPRA', 'MEIO_TRANSPORTE'] df_passagens_7['VIAGEM_INT'] = df_passagens_7['VIAGEM_INT'].apply(lambda x: 1 if x else 0) df_passagens_7['TRECHO_INT'] = df_passagens_7['TRECHO_INT'].apply(lambda x: 1 if x else 0) df_passagens_7.head() df_passagens_7.info() """ Fazemos novamente um tratamento de tipos de dados para as colunas """ df_passagens_7['VALOR'] = df_passagens_7['VALOR'].astype(np.float64) df_passagens_7['TAXA'] = df_passagens_7['TAXA'].astype(np.float64) df_passagens_7['ORIGEM'] = df_passagens_7['ORIGEM'].astype('category') df_passagens_7['DESTINO'] = df_passagens_7['DESTINO'].astype('category') df_passagens_7['TIPO_TRECHO'] = df_passagens_7['TIPO_TRECHO'].astype('category') df_passagens_7['TIPO_COMPRA'] = df_passagens_7['TIPO_COMPRA'].astype('category') df_passagens_7.head() """ É possível ver que não existem mais registros com informações nulas e os tipos de dados estão adequados para cada situação. """ df_passagens_7.info() """ Vamos eliminar todas as viagens internacionais. Vamos também manter apenas o meio de transporte AEREO """ df_passagens_8 = df_passagens_7[df_passagens_7['VIAGEM_INT'] == False] df_passagens_8 = df_passagens_8[df_passagens_8['MEIO_TRANSPORTE'] == 'AEREO'].reset_index(drop=True) df_passagens_8.info() df_passagens_8[['ORIGEM','DESTINO']]\ .groupby(['ORIGEM','DESTINO'])\ .size().reset_index(name='TOTAL').sort_values('TOTAL', ascending=False).head(10) """ Vamos calcular a quantidade de ocorrências, média, mediana e desvio padrão por trecho """ filtro_a = (df_passagens_7['ORIGEM'] == 'RIO DE JANEIRO-RJ') & (df_passagens_7['DESTINO'] == 'BRASILIA-DF') filtro_b = (df_passagens_7['ORIGEM'] == 'BRASILIA-DF') & (df_passagens_7['DESTINO'] == 'RIO DE JANEIRO-RJ') df_passagens_g = df_passagens_7[ (filtro_a | filtro_b) ].copy() df_passagens_g['ORIGEM'] = df_passagens_g['ORIGEM'].astype(str) ax = sns.boxplot(x='VALOR', y='ORIGEM', data=df_passagens_g, hue='TIPO_COMPRA') ax.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(100)) plt.grid(True) plt.show() df_passagens_g.describe() """ Desenhe um histograma com os valores encontrados para o trecho mais frequente. (Considere ida ou volta) """ """ Identifique os outliers utilizando as marcações do boxplot. IQR = q3 - q1 limite inferior = q1 - 1.5*IQR limite superior = q3 + 1.5*IQR """ """ Utilize o dataframe df_viagem para incluir as informações de Órgão Solicitante e Motivo da Viagem """ df_viagem = pd.read_csv('dados/c04_passagens/2018_Viagem.csv.zip', sep=';', encoding='iso-8859-1', error_bad_lines=False, quotechar="\"") df_viagem.head() """ Identifique os órgãos com mais ocorrências entre os outliers. """ ###Output _____no_output_____
colab/Lyft_resnet18_TPU.ipynb
###Markdown ###Code from google.colab import drive drive.mount('/content/drive') import os os.makedirs('/content/lyft-motion-prediction-autonomous-vehicles/', exist_ok=True) os.chdir('/content/lyft-motion-prediction-autonomous-vehicles/') %cd '/content/lyft-motion-prediction-autonomous-vehicles/' !pwd os.makedirs('/content/lyft-motion-prediction-autonomous-vehicles/scenes/', exist_ok=True) os.chdir('/content/lyft-motion-prediction-autonomous-vehicles/scenes/') !wget --header="Host: storage.googleapis.com" --header="User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36" --header="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" --header="Accept-Language: en-US,en;q=0.9" --header="Referer: https://www.kaggle.com/" --header="Cookie: _ga=GA1.3.1723755215.1594569279" --header="Connection: keep-alive" "https://storage.googleapis.com/kaggle-competitions-data/kaggle-v2/19990/1472735/compressed/scenes.zip?GoogleAccessId=web-data@kaggle-161607.iam.gserviceaccount.com&Expires=1600509762&Signature=bobq0fsme7KAMVwgKrFMy4adMBhMspT9MkV%2Be4PrZG%2BXR78pdZfN91f9bLu0xNUzneK9UBF2aXhFqhuKdV5oxs%2BHmhwoW9qs6OmJ%2F5%2FYBUSbHD%2B%2BMh4wI6VIKKWatO9fWv0fOs3R0x3RMPhfz0mmTbY0QCY9dOiPP0bCRRM1cBtMiXVIg9bx%2FTHWfaLdl9MNC2w3KIJaomHpTPx2RSuhTZtP%2FHFyw6ap%2BSWHeQdZjlxEMnmndQAXqd7IQXY8wrnY61i82p5uGsedVwkV77GYkRDYdwK%2FDQf%2FTRGP3ufCMYBQrUB9F6E8SFO8frdAk1muOEW%2F%2FDpT1U%2FXNIAH%2FB60Qg%3D%3D&response-content-disposition=attachment%3B+filename%3Dscenes.zip" -c -O 'scenes.zip' !unzip ./scenes.zip %cd '/content/lyft-motion-prediction-autonomous-vehicles/scenes/' !pwd !ls %rm -r validate.zarr/ %rm -r sample.zarr/ %rm scenes.zip %cd .. !pwd !ls os.makedirs('/content/lyft-motion-prediction-autonomous-vehicles/aerial_map/', exist_ok=True) os.chdir('/content/lyft-motion-prediction-autonomous-vehicles/aerial_map/') !wget --header="Host: storage.googleapis.com" --header="User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36" --header="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" --header="Accept-Language: en-US,en;q=0.9" --header="Referer: https://www.kaggle.com/" --header="Cookie: _ga=GA1.3.1723755215.1594569279" --header="Connection: keep-alive" "https://storage.googleapis.com/kaggle-competitions-data/kaggle-v2/19990/1472735/compressed/aerial_map.zip?GoogleAccessId=web-data@kaggle-161607.iam.gserviceaccount.com&Expires=1600509800&Signature=bWgVzkHmcNngUK3KJvk%2B9FZXCMNuqnP%2Bg8M%2FlxPbEr7CGL5Rp2amdUTkVkp60KZvIfg5mM1L2XlXn1zyCeJYcP1Dav9Qcy1XwG7oOGGHN0Y1ZI8hKJY4sTjogcvMn%2BFYKNAUB4VUZ8mYc1FJ8jXLvN2MLhjfLFOBBhBi4AF3PzxT9%2BDyGLTRxgixoKMu6jnWXcuviSH5vwRKGC6xUhbHmI60rR%2ByIiaOEJUGTwFYXBo%2FqaxyCHn%2B0WIIrBk4TXovH8bi1AIQvpOyB%2Fltso8PdS4yRcNDy37VdPZdktwjOGxhgAGsOTNvZwLrzotwda82t77lmcjfC2MZRJJjRKP5Ng%3D%3D&response-content-disposition=attachment%3B+filename%3Daerial_map.zip" -c -O 'aerial_map.zip' !unzip ./aerial_map.zip %rm -r aerial_map.zip %cd .. !pwd !ls os.makedirs('/content/lyft-motion-prediction-autonomous-vehicles/semantic_map/', exist_ok=True) os.chdir('/content/lyft-motion-prediction-autonomous-vehicles/semantic_map/') !wget --header="Host: storage.googleapis.com" --header="User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36" --header="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" --header="Accept-Language: en-US,en;q=0.9" --header="Referer: https://www.kaggle.com/" --header="Cookie: _ga=GA1.3.1723755215.1594569279" --header="Connection: keep-alive" "https://storage.googleapis.com/kaggle-competitions-data/kaggle-v2/19990/1472735/upload/semantic_map.zip?GoogleAccessId=web-data@kaggle-161607.iam.gserviceaccount.com&Expires=1600509832&Signature=C0hWGkYs3Q1J4BM2ykth4xr0ka7RC1LJ0UCN5us8f2WCvZ%2B0RUzDtxqiP10KqGIvQr93Rv9riFtMmwjfXZZggi1Agwk3233NugIYGEeN3tohGUTaejnBJ%2BrmQdEimR%2Bg7XfLWMdQebrp6yNnpr82Tpj0IBgcXIKQynKa9yS0LF4xtVlsKKvPQmbDPRIQ%2FtAyfEegyIsnRclhSvNcAP5Xr1Si9Luh9hXSrE9RxDCznNNhl0IMn%2BN5GXYpJwzbBBX1T2tJuYhVwOwSp4J9ab5if0gwWAeGZ5219hzoLzFG5kO5xc%2BWFOhXJ%2FxZgmfBrq4laPwS5LNuVJxyjwEkE0Xu7A%3D%3D&response-content-disposition=attachment%3B+filename%3Dsemantic_map.zip" -c -O 'semantic_map.zip' !unzip ./semantic_map.zip %rm -r semantic_map.zip %cd .. !pwd !ls ## this script transports l5kit and dependencies !pip -q install pymap3d==2.1.0 !pip -q install protobuf==3.12.2 !pip -q install transforms3d !pip -q install zarr !pip -q install ptable !pip -q install --no-dependencies l5kit !pip install -q cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.6-cp36-cp36m-linux_x86_64.whl ###Output Building wheel for pymap3d (setup.py) ... [?25l[?25hdone  |████████████████████████████████| 1.3MB 3.3MB/s  |████████████████████████████████| 71kB 2.3MB/s [?25h Building wheel for transforms3d (setup.py) ... [?25l[?25hdone  |████████████████████████████████| 3.3MB 3.5MB/s  |████████████████████████████████| 5.8MB 30.5MB/s [?25h Building wheel for zarr (setup.py) ... [?25l[?25hdone Building wheel for asciitree (setup.py) ... [?25l[?25hdone Building wheel for ptable (setup.py) ... [?25l[?25hdone  |████████████████████████████████| 81kB 2.5MB/s  |████████████████████████████████| 133.2MB 63kB/s  |████████████████████████████████| 61kB 2.8MB/s [?25h ###Markdown Import Packages ###Code # import packages from google.colab import files import numpy as np import torch import gc, os, time from torch import nn, optim from torch.utils.data import DataLoader, RandomSampler from torchvision.models.resnet import resnet18, resnet34, resnet50 from torchvision.models.densenet import densenet121 from tqdm import tqdm from typing import Dict from l5kit.data import LocalDataManager, ChunkedDataset from l5kit.dataset import AgentDataset, EgoDataset from l5kit.rasterization import build_rasterizer import torch_xla import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.distributed.parallel_loader as pl import torch_xla.distributed.xla_multiprocessing as xmp import torch_xla.utils.utils as xu # PyTorch/XLA GPU Setup (only if GPU runtime) assert os.environ['COLAB_TPU_ADDR'] ###Output _____no_output_____ ###Markdown TPU Check ###Code # check if TPU env working properly TPU_Path = 'grpc://'+os.environ['COLAB_TPU_ADDR'] print('TPU Address:', TPU_Path) # set env variable for data os.environ["L5KIT_DATA_FOLDER"] = '/content/lyft-motion-prediction-autonomous-vehicles/' os.environ['XLA_USE_BF16'] = "1" os.environ['XLA_TENSOR_ALLOCATOR_MAXSIZE'] = '100000000' dm = LocalDataManager(None) cfg = { 'format_version': 4, 'model_params': { 'model_architecture': 'resnet18', 'history_num_frames': 10, 'history_step_size': 1, 'history_delta_time': 0.1, 'future_num_frames': 50, 'future_step_size': 1, 'future_delta_time': 0.1 }, 'raster_params': { 'raster_size': [450, 450], 'pixel_size': [0.5, 0.5], 'ego_center': [0.25, 0.5], 'map_type': 'py_semantic', 'satellite_map_key': 'aerial_map/aerial_map.png', 'semantic_map_key': 'semantic_map/semantic_map.pb', 'dataset_meta_key': 'meta.json', 'filter_agents_threshold': 0.5 }, 'train_data_loader': { 'key': 'scenes/train.zarr', 'batch_size': 16, 'shuffle': True }, 'train_params': { 'epochs': 10, 'verbose': 50, } } # get config print(cfg) def loader(cfg): # training configurations train_cfg = cfg["train_data_loader"] # Rasterizer rasterizer = build_rasterizer(cfg, dm) # Train dataset/dataloader train_zarr = ChunkedDataset(dm.require(train_cfg["key"])).open() train_dataset = AgentDataset(cfg, train_zarr, rasterizer) return train_dataset class LyftModel(nn.Module): def __init__(self, cfg: Dict): super().__init__() self.backbone = resnet18(pretrained=True, progress=True) num_history_channels = (cfg["model_params"]["history_num_frames"] + 1) * 2 num_in_channels = 3 + num_history_channels self.backbone.conv1 = nn.Conv2d( num_in_channels, self.backbone.conv1.out_channels, kernel_size=self.backbone.conv1.kernel_size, stride=self.backbone.conv1.stride, padding=self.backbone.conv1.padding, bias=False, ) backbone_out_features = 512 # X, Y coords for the future positions (output shape: Bx50x2) num_targets = 2 * cfg["model_params"]["future_num_frames"] self.head = nn.Sequential( # nn.Dropout(0.2), nn.Linear(in_features=backbone_out_features, out_features=4096), ) self.logit = nn.Linear(4096, out_features=num_targets) def forward(self, x): x = self.backbone.conv1(x) x = self.backbone.bn1(x) x = self.backbone.relu(x) x = self.backbone.maxpool(x) x = self.backbone.layer1(x) x = self.backbone.layer2(x) x = self.backbone.layer3(x) x = self.backbone.layer4(x) x = self.backbone.avgpool(x) x = torch.flatten(x, 1) x = self.head(x) x = self.logit(x) return x mx = LyftModel(cfg) def loss_fn(outputs, targets): # pass in outputs and targets, return loss function return nn.MSELoss(reduction="none")(outputs, targets) def train_loop(cfg, dataloader, model, optimizer, device): # putting model in training mode model.train() losses_train = [] for num, data in enumerate(dataloader): inputs = data["image"].to(device) target_availabilities = data["target_availabilities"].unsqueeze(-1).to(device) targets = data["target_positions"].to(device) outputs = model(inputs).reshape(targets.shape) loss = loss_fn(outputs, targets) loss = loss * target_availabilities loss = loss.mean() losses_train.append(loss.item()) if num % cfg['train_params']['verbose'] == 0: xm.master_print(f'Train Steps: {num}, loss: {loss.item()}, loss(avg): {np.mean(losses_train[-100:])}, Time: {time.asctime()}') optimizer.zero_grad() loss.backward() xm.optimizer_step(optimizer) # Only instantiate model weights once in memory. # WRAPPED_MODEL = xmp.MpModelWrapper(LyftModel(cfg)) def _run(cfg): # random seed for each of the 8 process torch.manual_seed(42) train_dataset = loader(cfg) train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=True) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=cfg["train_data_loader"]["batch_size"], num_workers=0, drop_last=True) device = xm.xla_device() model = mx.to(device) xm.master_print('done loading model') # model parameters to optimize param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] # apply to weight decay optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.001}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] xm.master_print('training on train dataset') # Scale learning rate to num cores learning_rate = 1e-3 * xm.xrt_world_size() optimizer = optim.AdamW(optimizer_grouped_parameters, lr=learning_rate) # criterion = nn.MSELoss(reduction="none") # calculate the total number of training steps num_train_steps = int(len(train_dataset) / cfg['train_data_loader']['batch_size'] / xm.xrt_world_size() * cfg['train_params']['epochs']) xm.master_print(f'num_training_steps = {num_train_steps}, world_size={xm.xrt_world_size()}') for epoch in range(cfg["train_params"]["epochs"]): gc.collect() para_loader = pl.ParallelLoader(train_dataloader, [device]) xm.master_print('parallel loader created...') gc.collect() xm.master_print('Training Model...') train_loop(cfg, para_loader.per_device_loader(device), model, optimizer, device) xm.master_print("Finished training epoch {}".format(epoch)) del para_loader gc.collect() xm.save({'model_state_dict': model.state_dict()}, f'resnet18_450x450.pth') # Start training processes def _mp_fn(rank, flags): torch.set_default_tensor_type('torch.FloatTensor') a = _run(cfg) FLAGS={} start_time = time.time() xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=1, start_method='fork') print('Time taken for training: ',time.time()-start_time) xm.save({'model_state_dict': model.state_dict()}, f'/content/drive/My Drive/Kaggle/Lyft L5 Motion Prediction/resnet18_450x450_TPU.pth') ###Output _____no_output_____
Model backlog/Train/280-tweet-train-5fold-roberta-onecycle-clean-jac.ipynb
###Markdown Dependencies ###Code import json, warnings, shutil from scripts_step_lr_schedulers import * from tweet_utility_scripts import * from tweet_utility_preprocess_roberta_scripts import * from transformers import TFRobertaModel, RobertaConfig from tokenizers import ByteLevelBPETokenizer from tensorflow.keras.models import Model from tensorflow.keras import optimizers, metrics, losses, layers from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler SEED = 0 seed_everything(SEED) warnings.filterwarnings("ignore") ###Output wandb: WARNING W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable. ###Markdown Load data ###Code database_base_path = '/kaggle/input/tweet-dataset-5fold-roberta-64-clean-jac/' k_fold = pd.read_csv(database_base_path + '5-fold.csv') display(k_fold.head()) # Unzip files !tar -xf /kaggle/input/tweet-dataset-5fold-roberta-64-clean-jac/fold_1.tar.gz !tar -xf /kaggle/input/tweet-dataset-5fold-roberta-64-clean-jac/fold_2.tar.gz !tar -xf /kaggle/input/tweet-dataset-5fold-roberta-64-clean-jac/fold_3.tar.gz !tar -xf /kaggle/input/tweet-dataset-5fold-roberta-64-clean-jac/fold_4.tar.gz !tar -xf /kaggle/input/tweet-dataset-5fold-roberta-64-clean-jac/fold_5.tar.gz ###Output _____no_output_____ ###Markdown Model parameters ###Code vocab_path = database_base_path + 'vocab.json' merges_path = database_base_path + 'merges.txt' base_path = '/kaggle/input/qa-transformers/roberta/' config = { "MAX_LEN": 64, "BATCH_SIZE": 32, "EPOCHS": 2, "LEARNING_RATE": 1e-4, "ES_PATIENCE": 2, "N_FOLDS": 5, "question_size": 4, "base_model_path": base_path + 'roberta-base-tf_model.h5', "config_path": base_path + 'roberta-base-config.json' } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) ###Output _____no_output_____ ###Markdown Tokenizer ###Code tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path, lowercase=True, add_prefix_space=True) tokenizer.save('./') # pre-process k_fold['jaccard'] = k_fold.apply(lambda x: jaccard(x['text'], x['selected_text']), axis=1) k_fold['text_tokenCnt'] = k_fold['text'].apply(lambda x : len(tokenizer.encode(x).ids)) k_fold['selected_text_tokenCnt'] = k_fold['selected_text'].apply(lambda x : len(tokenizer.encode(x).ids)) ###Output _____no_output_____ ###Markdown Learning rate schedule ###Code lr_min = 1e-6 lr_start = 0 lr_max = config['LEARNING_RATE'] train_size = len(k_fold[k_fold['fold_1'] == 'train']) step_size = train_size // config['BATCH_SIZE'] total_steps = config['EPOCHS'] * step_size rng = [i for i in range(0, total_steps, config['BATCH_SIZE'])] y = [one_cycle_schedule(tf.cast(x, tf.float32), total_steps=total_steps, lr_start=lr_min, lr_max=lr_max) for x in rng] sns.set(style="whitegrid") fig, ax = plt.subplots(figsize=(20, 6)) plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) ###Output Learning rate schedule: 1e-06 to 9.8e-05 to 3.27e-06 ###Markdown Model ###Code module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False) def model_fn(MAX_LEN): input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids') attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask') base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model") last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask}) logits = layers.Dense(2, name="qa_outputs", use_bias=False)(last_hidden_state) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1, name='y_start') end_logits = tf.squeeze(end_logits, axis=-1, name='y_end') model = Model(inputs=[input_ids, attention_mask], outputs=[start_logits, end_logits]) return model ###Output _____no_output_____ ###Markdown Train ###Code def get_training_dataset(x_train, y_train, batch_size, buffer_size, seed=0): dataset = tf.data.Dataset.from_tensor_slices(({'input_ids': x_train[0], 'attention_mask': x_train[1]}, (y_train[0], y_train[1]))) dataset = dataset.repeat() dataset = dataset.shuffle(2048, seed=seed) dataset = dataset.batch(batch_size, drop_remainder=True) dataset = dataset.prefetch(buffer_size) return dataset def get_validation_dataset(x_valid, y_valid, batch_size, buffer_size, repeated=False, seed=0): dataset = tf.data.Dataset.from_tensor_slices(({'input_ids': x_valid[0], 'attention_mask': x_valid[1]}, (y_valid[0], y_valid[1]))) if repeated: dataset = dataset.repeat() dataset = dataset.shuffle(2048, seed=seed) dataset = dataset.batch(batch_size, drop_remainder=True) dataset = dataset.cache() dataset = dataset.prefetch(buffer_size) return dataset AUTO = tf.data.experimental.AUTOTUNE history_list = [] for n_fold in range(config['N_FOLDS']): n_fold +=1 print('\nFOLD: %d' % (n_fold)) # Load data base_data_path = 'fold_%d/' % (n_fold) x_train = np.load(base_data_path + 'x_train.npy') y_train = np.load(base_data_path + 'y_train.npy') x_valid = np.load(base_data_path + 'x_valid.npy') y_valid = np.load(base_data_path + 'y_valid.npy') step_size = x_train.shape[1] // config['BATCH_SIZE'] # Train model model_path = 'model_fold_%d.h5' % (n_fold) model = model_fn(config['MAX_LEN']) es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1) checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True) lr = lambda: one_cycle_schedule(tf.cast(optimizer.iterations, tf.float32), total_steps=total_steps, lr_start=lr_min, lr_max=lr_max) optimizer = optimizers.Adam(learning_rate=lr) model.compile(optimizer, loss=[losses.CategoricalCrossentropy(label_smoothing=0.2, from_logits=True), losses.CategoricalCrossentropy(label_smoothing=0.2, from_logits=True)]) history = model.fit(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO, seed=SEED), validation_data=(get_validation_dataset(x_valid, y_valid, config['BATCH_SIZE'], AUTO, repeated=False, seed=SEED)), epochs=config['EPOCHS'], steps_per_epoch=step_size, callbacks=[checkpoint, es], verbose=2).history history_list.append(history) # Make predictions predict_eval_df(k_fold, model, x_train, x_valid, get_test_dataset, decode, n_fold, tokenizer, config, config['question_size']) ### Delete data dir shutil.rmtree(base_data_path) ###Output FOLD: 1 Epoch 1/2 686/686 - 179s - loss: 4.4252 - tf_op_layer_y_start_loss: 2.1905 - tf_op_layer_y_end_loss: 2.2347 - val_loss: 3.8289 - val_tf_op_layer_y_start_loss: 1.9351 - val_tf_op_layer_y_end_loss: 1.8939 Epoch 2/2 686/686 - 178s - loss: 3.7714 - tf_op_layer_y_start_loss: 1.9025 - tf_op_layer_y_end_loss: 1.8689 - val_loss: 3.7523 - val_tf_op_layer_y_start_loss: 1.9089 - val_tf_op_layer_y_end_loss: 1.8434 FOLD: 2 Epoch 1/2 686/686 - 179s - loss: 4.4480 - tf_op_layer_y_start_1_loss: 2.1996 - tf_op_layer_y_end_1_loss: 2.2484 - val_loss: 3.9117 - val_tf_op_layer_y_start_1_loss: 1.9693 - val_tf_op_layer_y_end_1_loss: 1.9424 Epoch 2/2 686/686 - 177s - loss: 3.7756 - tf_op_layer_y_start_1_loss: 1.9087 - tf_op_layer_y_end_1_loss: 1.8669 - val_loss: 3.7808 - val_tf_op_layer_y_start_1_loss: 1.9083 - val_tf_op_layer_y_end_1_loss: 1.8725 FOLD: 3 Epoch 1/2 686/686 - 178s - loss: 4.4800 - tf_op_layer_y_start_2_loss: 2.2024 - tf_op_layer_y_end_2_loss: 2.2777 - val_loss: 3.9342 - val_tf_op_layer_y_start_2_loss: 2.0019 - val_tf_op_layer_y_end_2_loss: 1.9322 Epoch 2/2 686/686 - 177s - loss: 3.8038 - tf_op_layer_y_start_2_loss: 1.9215 - tf_op_layer_y_end_2_loss: 1.8823 - val_loss: 3.7305 - val_tf_op_layer_y_start_2_loss: 1.8841 - val_tf_op_layer_y_end_2_loss: 1.8464 FOLD: 4 Epoch 1/2 686/686 - 178s - loss: 4.4547 - tf_op_layer_y_start_3_loss: 2.2093 - tf_op_layer_y_end_3_loss: 2.2454 - val_loss: 3.9517 - val_tf_op_layer_y_start_3_loss: 2.0303 - val_tf_op_layer_y_end_3_loss: 1.9214 Epoch 2/2 686/686 - 177s - loss: 3.7717 - tf_op_layer_y_start_3_loss: 1.9084 - tf_op_layer_y_end_3_loss: 1.8633 - val_loss: 3.7536 - val_tf_op_layer_y_start_3_loss: 1.9051 - val_tf_op_layer_y_end_3_loss: 1.8484 FOLD: 5 Epoch 1/2 686/686 - 179s - loss: 4.3848 - tf_op_layer_y_start_4_loss: 2.1818 - tf_op_layer_y_end_4_loss: 2.2030 - val_loss: 3.8660 - val_tf_op_layer_y_start_4_loss: 1.9551 - val_tf_op_layer_y_end_4_loss: 1.9109 Epoch 2/2 686/686 - 177s - loss: 3.7625 - tf_op_layer_y_start_4_loss: 1.9017 - tf_op_layer_y_end_4_loss: 1.8608 - val_loss: 3.7787 - val_tf_op_layer_y_start_4_loss: 1.9126 - val_tf_op_layer_y_end_4_loss: 1.8660 ###Markdown Model loss graph ###Code for n_fold in range(config['N_FOLDS']): print('Fold: %d' % (n_fold+1)) plot_metrics(history_list[n_fold]) ###Output Fold: 1 ###Markdown Model evaluation ###Code display(evaluate_model_kfold(k_fold, config['N_FOLDS']).style.applymap(color_map)) ###Output _____no_output_____ ###Markdown Visualize predictions ###Code k_fold['jaccard_mean'] = 0 for n in range(config['N_FOLDS']): k_fold['jaccard_mean'] += k_fold[f'jaccard_fold_{n+1}'] / config['N_FOLDS'] display(k_fold[['text', 'selected_text', 'sentiment', 'text_tokenCnt', 'selected_text_tokenCnt', 'jaccard', 'jaccard_mean'] + [c for c in k_fold.columns if (c.startswith('prediction_fold'))]].head(15)) ###Output _____no_output_____
BDSN_DataFrame_AnalyticsVidhya.ipynb
###Markdown ![CC-BY-SA](https://licensebuttons.net/l/by-sa/3.0/88x31.png)![alt text](http://1.bp.blogspot.com/-nqAGzznZQNo/UwS8rxjfXeI/AAAAAAAABTA/nunmRLowpps/s1600/PraxisLogo.gif)[Data Science Program](http://praxis.ac.in/Programs/business-analytics/)[Prithwis Mukerjee](http://www.yantrajaal.com) Data Frame Analytics Vidhya https://www.analyticsvidhya.com/blog/2016/10/spark-dataframe-and-operations/ Alternate https://www.analyticsvidhya.com/blog/2016/09/comprehensive-introduction-to-apache-spark-rdds-dataframes-using-pyspark/ Data Bricks https://docs.databricks.com/spark/latest/data-sources/read-csv.html Initialise ###Code !apt-get update > /dev/null !apt-get install openjdk-8-jdk-headless -qq > /dev/null #!wget -q http://apache.osuosl.org/spark/spark-2.4.0/spark-2.4.0-bin-hadoop2.7.tgz #wget -q http://apache.osuosl.org/spark/spark-2.4.4/spark-2.4.4-bin-hadoop2.7.tgz #!wget -q http://apache.osuosl.org/spark/spark-2.4.5/spark-2.4.5-bin-hadoop2.7.tgz #!wget -q http://apache.osuosl.org/spark/spark-3.0.1/spark-3.0.1-bin-hadoop3.2.tgz !wget -q http://apache.osuosl.org/spark/spark-3.1.2/spark-3.1.2-bin-hadoop3.2.tgz #!tar xf spark-2.4.0-bin-hadoop2.7.tgz #!tar xf spark-2.4.4-bin-hadoop2.7.tgz #!tar xf spark-2.4.5-bin-hadoop2.7.tgz #!tar xf spark-3.0.1-bin-hadoop3.2.tgz !tar xf spark-3.1.2-bin-hadoop3.2.tgz #!pip install -q findspark !pip install -q pyspark import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" #os.environ["SPARK_HOME"] = "/content/spark-2.4.0-bin-hadoop2.7" #os.environ["SPARK_HOME"] = "/content/spark-2.4.4-bin-hadoop2.7" #os.environ["SPARK_HOME"] = "/content/spark-2.4.5-bin-hadoop2.7" #os.environ["SPARK_HOME"] = "/content/spark-3.0.1-bin-hadoop3.2" os.environ["SPARK_HOME"] = "/content/spark-3.1.2-bin-hadoop3.2" #import findspark #findspark.init() from pyspark.sql import SparkSession spark = SparkSession.builder.master("local[*]").getOrCreate() sc = spark.sparkContext sc #from pyspark.sql import SQLContext #sqlContext = SQLContext(sc) ###Output _____no_output_____ ###Markdown Data Load ###Code #https://drive.google.com/file/d/1o-WzYvxfIXxvghazTLu3iuNqFE9IxGbw/view?usp=sharing !wget -O BDSDF_train.zip 'https://drive.google.com/uc?export=download&id=1o-WzYvxfIXxvghazTLu3iuNqFE9IxGbw' #https://drive.google.com/file/d/1UF5HGE3NGtQ9goeSvhsTnMMTUZiv7fhE/view?usp=sharing !wget -O BDSDF_test.zip 'https://drive.google.com/uc?export=download&id=1UF5HGE3NGtQ9goeSvhsTnMMTUZiv7fhE' !ls -al #!rm *.zip #!rm *.csv #from google.colab import files #datafile = files.upload() !unzip BDSDF_test.zip !unzip BDSDF_train.zip !ls -al #test = sqlContext.read.format("csv").option("header", "true").option("inferSchema", "true").load("./test.csv") test = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("./test.csv") #train = sqlContext.read.format("csv").option("header", "true").option("inferSchema", "true").load("./train.csv") train = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("./train.csv") import pandas as pd pTest = pd.read_csv("./test.csv") pTest.head(3) #train.printSchema() test.printSchema() ###Output root |-- User_ID: integer (nullable = true) |-- Product_ID: string (nullable = true) |-- Gender: string (nullable = true) |-- Age: string (nullable = true) |-- Occupation: integer (nullable = true) |-- City_Category: string (nullable = true) |-- Stay_In_Current_City_Years: string (nullable = true) |-- Marital_Status: integer (nullable = true) |-- Product_Category_1: integer (nullable = true) |-- Product_Category_2: integer (nullable = true) |-- Product_Category_3: integer (nullable = true) ###Markdown Dataframe Manipulations ###Code test.head(3) train.show() train.count(),test.count() len(train.columns), train.columns len(test.columns), test.columns train.describe().show() train.describe('Product_ID').show() train.select('User_ID','Age').show(5) train.select('Product_ID').distinct().count() test.select('Product_ID').distinct().count() train.select('Product_ID').distinct().count(),test.select('Product_ID').distinct().count() diff_cat_in_train_test=test.select('Product_ID').subtract(train.select('Product_ID')) diff_cat_in_train_test.distinct().count()# For distict count train.crosstab('Age', 'Gender').show() #train.crosstab('Age', 'Gender').describe().show() train.select('Age','Gender').dropDuplicates().show() train.dropna().count() #train.dropna().show(2) train.fillna(-1).count() #train.fillna('x').show(3) train.fillna(-1).show(2) train.filter(train.Purchase > 15000).count() train.groupby('Age').agg({'Purchase': 'mean'}).show() train.groupby('Age').agg({'Purchase': 'max'}).show() train.groupby('Age').count().show() t1 = train.sample(False, 0.2, 42) t2 = train.sample(False, 0.2, 43) t1.count(),t2.count() t1 = train.sample(False, 0.2, 1142) t2 = train.sample(False, 0.2, 2243) t1.count(),t2.count() #train.select('User_ID').rdd.map(lambda x:(x,1)).take(5) train.select('Product_ID').rdd.map(lambda x:(x,1)).take(5) train.orderBy(train.Purchase.desc()).show(10) train.select('User_ID').rdd.map(lambda x:(x,1)).take(5) ###Output _____no_output_____ ###Markdown SQL Query with Dataframe ###Code #train.registerAsTable('train_table') #train.registerTempTable("train_table") train.createOrReplaceTempView("train_table") #SQLContext is deprecated # --- not to be used #sqlContext.sql('select Product_ID from train_table').show(5) #sqlContext.sql('select Product_ID, Product_Category_1 from train_table').show(5) spark.sql('select Product_ID, Product_Category_1 from train_table').show(5) #sqlContext.sql('select Age, max(Purchase) from train_table group by Age').show() spark.sql('select Age, max(Purchase) from train_table group by Age').show() ###Output +-----+-------------+ | Age|max(Purchase)| +-----+-------------+ |18-25| 23958| |26-35| 23961| | 0-17| 23955| |46-50| 23960| |51-55| 23960| |36-45| 23960| | 55+| 23960| +-----+-------------+
module1-rnn-and-lstm/Mikio_Harman_431_RNN_and_LSTM_Assignment.ipynb
###Markdown *Data Science Unit 4 Sprint 3 Assignment 1* Recurrent Neural Networks and Long Short Term Memory (LSTM)![Monkey at a typewriter](https://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Chimpanzee_seated_at_typewriter.jpg/603px-Chimpanzee_seated_at_typewriter.jpg)It is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM.This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txtUse it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach.Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size.Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more! ###Code from tensorflow.keras.callbacks import LambdaCallback from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM from tensorflow.keras.optimizers import RMSprop import numpy as np import random import sys import os shake = os.listdir('./ws') # Read in Data data = [] for file in shake: if file[-3:] == 'rtf': with open(f'./ws/{file}', 'r') as f: data.append(f.read()) data[0] # Encode Data as Chars """ 1. Create One Giant String of Articles 2. Get an unique list of chars 3. Create lookup dictionary `char_int` and `int_char` """ giant_string = data[0] chars = list(set(data[0])) char_indices = {c:i for i,c in enumerate(chars)} indices_char = {i:c for i,c in enumerate(chars)} char_int = char_indices int_char = indices_char # Create the Sequence Data maxlen = 40 step = 5 encoded = [char_int[c] for c in giant_string] sequences = [] #40 Characters next_chars = [] # 1 Character for i in range(0, len(encoded) - maxlen, step): sequences.append(encoded[i: i + maxlen]) next_chars.append(encoded[i + maxlen]) print('sequences: ', len(sequences)) # Specify x & y x = np.zeros((len(sequences), maxlen, len(chars)), dtype=np.bool) y = np.zeros((len(sequences), len(chars)), dtype=np.bool) for i, sequence in enumerate(sequences): for t, char in enumerate(sequence): x[i,t,char] = 1 y[i, next_chars[i]] = 1 x.shape x # build the model: a single LSTM model = Sequential() model.add(LSTM(128,input_shape=(maxlen, len(chars)))) model.add(Dense(len(chars), activation='softmax')) optimizer = RMSprop(learning_rate=0.01) model.compile(loss='categorical_crossentropy', optimizer=optimizer) def sample(preds, temperature=1.0): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) text = data[0] def on_epoch_end(epoch, _): # Function invoked at end of each epoch. Prints generated text. print() print('----- Generating text after Epoch: %d' % epoch) start_index = random.randint(0, len(text) - maxlen - 1) for diversity in [0.2, 0.5, 1.0, 1.2]: print('----- diversity:', diversity) generated = '' sentence = text[start_index: start_index + maxlen] generated += sentence print('----- Generating with seed: "' + sentence + '"') sys.stdout.write(generated) for i in range(400): x_pred = np.zeros((1, maxlen, len(chars))) for t, char in enumerate(sentence): x_pred[0, t, char_indices[char]] = 1. preds = model.predict(x_pred, verbose=0)[0] next_index = sample(preds, diversity) next_char = indices_char[next_index] sentence = sentence[1:] + next_char sys.stdout.write(next_char) sys.stdout.flush() print() print_callback = LambdaCallback(on_epoch_end=on_epoch_end) model.fit(x, y, batch_size=1024, epochs=1, callbacks=[print_callback]) x[[0][0][0]:[2][2][2]] ###Output _____no_output_____
MLH_arboldecision.ipynb
###Markdown **Machine Learning for Healthcare**--- **Introducción a Modelos de Clasificación y Predicción*** Autor: José Carlos Machicao* Licencia: [GestioDinámica](http://www.gestiodinamica.com) 2021* Actualizado: 2021_10_11 Librerías externas necesarias ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import os ###Output _____no_output_____ ###Markdown Carga de base de datos ###Code ruta = 'drive/My Drive/2020 Cursos/2020 AI Salud/Curso ML Salud/' os.listdir(ruta) data = pd.read_excel(ruta + 'ai_salud_ML_002.xlsx') data = data.drop(['id'], axis=1) data.head() ###Output _____no_output_____ ###Markdown Intuición gráfica acerca de la predicción ###Code fig, axs = plt.subplots(1, 2, figsize=(10, 4), facecolor='lightblue') #clasificador = 'sexo' clasificador = 'patologia' for j, item in enumerate(data[clasificador].unique()): #print(j, item) datax = data[data[clasificador] == item] #print(datax) axs[j].hexbin(datax.edad, datax.aseo, gridsize=6) axs[j].set_xlabel('Edad') axs[j].set_ylabel('Nivel de Aseo') axs[j].set_title(item, fontsize=18) plt.show() ###Output _____no_output_____ ###Markdown Modelo por Arbol de Decisiones ###Code #@title Librerías Específicas de Modelamiento from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier data.patologia.unique() data100 = pd.get_dummies(data, drop_first=True) data100.head() modelo = DecisionTreeClassifier(max_depth=2, random_state=0) X = data100.drop(['patologia_Positiva'], axis=1) y = data100.patologia_Positiva modelo.fit(X, y) y_pred = modelo.predict(X) y_pred data['tipo_pred'] = y_pred data['tipo_orig'] = data100.patologia_Positiva data.head() pd.pivot_table(data[['tipo_pred', 'tipo_orig', 'patologia']], index=['tipo_pred'], columns=['tipo_orig'], aggfunc='count') from sklearn.tree import plot_tree plt.figure(figsize=(18,10)) plot_tree(modelo, rounded=True, fontsize=12, label='all', filled=True, max_depth=2, feature_names=X.columns, class_names=['Neg', 'Pos'], proportion=True) plt.title('Arbol de Decisión Generado por Datos', fontsize=20) plt.show() X.head() ###Output _____no_output_____ ###Markdown Ejemplo* Digamos que llega una declaración de un adulto masculino de 40 años a las 5 de la mañana registrado por un efectivo en la calle.* También una mujer de 65 años declara de manera personal en una comisaría hacia las 4pm. ###Code X_ts = np.array([ [11, 2, 1, 1, 0], [11, 2, 1, 1, 1] ]) X_ts y_ts = modelo.predict(X_ts) y_ts ###Output _____no_output_____
Regression/Random Forest/RandomForestRegressor.ipynb
###Markdown Random Forest Regressor This Code template is for regression analysis using a simple RandomForestRegressor based on the Ensemble Learning technique. It is a meta estimator that fits multiple decision trees and uses averaging to improve the predictive accuracy and control over-fitting. Required Packages ###Code import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown InitializationFilepath of CSV file ###Code #filepath file_path= "" ###Output _____no_output_____ ###Markdown List of features which are required for model training . ###Code #x_values features=[] ###Output _____no_output_____ ###Markdown Target variable for prediction. ###Code #y_value target='' ###Output _____no_output_____ ###Markdown Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ###Code df=pd.read_csv(file_path) df.head() ###Output _____no_output_____ ###Markdown Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y. ###Code X = df[features] Y = df[target] ###Output _____no_output_____ ###Markdown Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ###Code def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) ###Output _____no_output_____ ###Markdown Calling preprocessing functions on the feature and target set. ###Code x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ###Output _____no_output_____ ###Markdown Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ###Code f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ###Output _____no_output_____ ###Markdown Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ###Code X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) ###Output _____no_output_____ ###Markdown ModelA random forest is a meta estimator that fits a number of classifying decision trees on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is controlled with the max_samples parameter if bootstrap=True (default), otherwise the whole dataset is used to build each tree. Model Tuning Parameters 1. n_estimators : int, default=100> The number of trees in the forest. 2. criterion : {“mae”, “mse”}, default=”mse”> The function to measure the quality of a split. Supported criteria are “mse” for the mean squared error, which is equal to variance reduction as feature selection criterion, and “mae” for the mean absolute error. 3. max_depth : int, default=None> The maximum depth of the tree. 4. max_features : {“auto”, “sqrt”, “log2”}, int or float, default=”auto”> The number of features to consider when looking for the best split: 5. bootstrap : bool, default=True> Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree. 6. oob_score : bool, default=False> Whether to use out-of-bag samples to estimate the generalization accuracy. 7. n_jobs : int, default=None> The number of jobs to run in parallel. fit, predict, decision_path and apply are all parallelized over the trees. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors. See Glossary for more details. 8. random_state : int, RandomState instance or None, default=None> Controls both the randomness of the bootstrapping of the samples used when building trees (if bootstrap=True) and the sampling of the features to consider when looking for the best split at each node (if max_features ). 9. verbose : int, default=0> Controls the verbosity when fitting and predicting. ###Code model = RandomForestRegressor(n_jobs = -1,random_state = 123) model.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Model AccuracyWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.> **score**: The **score** function returns the coefficient of determination R2 of the prediction. ###Code print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100)) ###Output Accuracy score 96.14 % ###Markdown > **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ###Code y_pred=model.predict(X_test) print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100)) print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred))) print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred))) ###Output R2 Score: 96.14 % Mean Absolute Error 2.34 Mean Squared Error 11.18 ###Markdown Feature ImportancesThe Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction. ###Code plt.figure(figsize=(8,6)) n_features = len(X.columns) plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), X.columns) plt.xlabel("Feature importance") plt.ylabel("Feature") plt.ylim(-1, n_features) ###Output _____no_output_____ ###Markdown Prediction PlotFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis. ###Code plt.figure(figsize=(14,10)) plt.plot(range(20),y_test[0:20], color = "green") plt.plot(range(20),model.predict(X_test[0:20]), color = "red") plt.legend(["Actual","prediction"]) plt.title("Predicted vs True Value") plt.xlabel("Record number") plt.ylabel(target) plt.show() ###Output _____no_output_____ ###Markdown Random Forest Regressor This Code template is for regression analysis using a simple RandomForestRegressor based on the Ensemble Learning technique. It is a meta estimator that fits multiple decision trees and uses averaging to improve the predictive accuracy and control over-fitting. Required Packages ###Code import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown InitializationFilepath of CSV file ###Code #filepath file_path= "" ###Output _____no_output_____ ###Markdown List of features which are required for model training . ###Code #x_values features=[] ###Output _____no_output_____ ###Markdown Target variable for prediction. ###Code #y_value target='' ###Output _____no_output_____ ###Markdown Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ###Code df=pd.read_csv(file_path) df.head() ###Output _____no_output_____ ###Markdown Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y. ###Code X = df[features] Y = df[target] ###Output _____no_output_____ ###Markdown Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ###Code def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) ###Output _____no_output_____ ###Markdown Calling preprocessing functions on the feature and target set. ###Code x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ###Output _____no_output_____ ###Markdown Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ###Code f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ###Output _____no_output_____ ###Markdown Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ###Code X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) ###Output _____no_output_____ ###Markdown ModelA random forest is a meta estimator that fits a number of classifying decision trees on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is controlled with the max_samples parameter if bootstrap=True (default), otherwise the whole dataset is used to build each tree. Model Tuning Parameters 1. n_estimators : int, default=100> The number of trees in the forest. 2. criterion : {“mae”, “mse”}, default=”mse”> The function to measure the quality of a split. Supported criteria are “mse” for the mean squared error, which is equal to variance reduction as feature selection criterion, and “mae” for the mean absolute error. 3. max_depth : int, default=None> The maximum depth of the tree. 4. max_features : {“auto”, “sqrt”, “log2”}, int or float, default=”auto”> The number of features to consider when looking for the best split: 5. bootstrap : bool, default=True> Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree. 6. oob_score : bool, default=False> Whether to use out-of-bag samples to estimate the generalization accuracy. 7. n_jobs : int, default=None> The number of jobs to run in parallel. fit, predict, decision_path and apply are all parallelized over the trees. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors. See Glossary for more details. 8. random_state : int, RandomState instance or None, default=None> Controls both the randomness of the bootstrapping of the samples used when building trees (if bootstrap=True) and the sampling of the features to consider when looking for the best split at each node (if max_features ). 9. verbose : int, default=0> Controls the verbosity when fitting and predicting. ###Code model = RandomForestRegressor(n_jobs = -1,random_state = 123) model.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Model AccuracyWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.> **score**: The **score** function returns the coefficient of determination R2 of the prediction. ###Code print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100)) ###Output Accuracy score 96.14 % ###Markdown > **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ###Code y_pred=model.predict(X_test) print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100)) print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred))) print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred))) ###Output R2 Score: 96.14 % Mean Absolute Error 2.34 Mean Squared Error 11.18 ###Markdown Feature ImportancesThe Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction. ###Code plt.figure(figsize=(8,6)) n_features = len(X.columns) plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), X.columns) plt.xlabel("Feature importance") plt.ylabel("Feature") plt.ylim(-1, n_features) ###Output _____no_output_____ ###Markdown Prediction PlotFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis. ###Code plt.figure(figsize=(14,10)) plt.plot(range(20),y_test[0:20], color = "green") plt.plot(range(20),model.predict(X_test[0:20]), color = "red") plt.legend(["Actual","prediction"]) plt.title("Predicted vs True Value") plt.xlabel("Record number") plt.ylabel(target) plt.show() ###Output _____no_output_____ ###Markdown Random Forest Regressor Required Packages ###Code import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown InitializationFilepath of CSV file ###Code #filepath file_path= "" ###Output _____no_output_____ ###Markdown List of features which are required for model training . ###Code #x_values features=[] ###Output _____no_output_____ ###Markdown Target variable for prediction. ###Code #y_value target='' ###Output _____no_output_____ ###Markdown Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ###Code df=pd.read_csv(file_path) df.head() ###Output _____no_output_____ ###Markdown Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y. ###Code X = df[features] Y = df[target] ###Output _____no_output_____ ###Markdown Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ###Code def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) ###Output _____no_output_____ ###Markdown Calling preprocessing functions on the feature and target set. ###Code x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ###Output _____no_output_____ ###Markdown Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ###Code f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ###Output _____no_output_____ ###Markdown Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ###Code X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) ###Output _____no_output_____ ###Markdown ModelA random forest is a meta estimator that fits a number of classifying decision trees on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is controlled with the max_samples parameter if bootstrap=True (default), otherwise the whole dataset is used to build each tree. Model Tuning Parameters 1. n_estimators : int, default=100> The number of trees in the forest. 2. criterion : {“mae”, “mse”}, default=”mse”> The function to measure the quality of a split. Supported criteria are “mse” for the mean squared error, which is equal to variance reduction as feature selection criterion, and “mae” for the mean absolute error. 3. max_depth : int, default=None> The maximum depth of the tree. 4. max_features : {“auto”, “sqrt”, “log2”}, int or float, default=”auto”> The number of features to consider when looking for the best split: 5. bootstrap : bool, default=True> Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree. 6. oob_score : bool, default=False> Whether to use out-of-bag samples to estimate the generalization accuracy. 7. n_jobs : int, default=None> The number of jobs to run in parallel. fit, predict, decision_path and apply are all parallelized over the trees. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors. See Glossary for more details. 8. random_state : int, RandomState instance or None, default=None> Controls both the randomness of the bootstrapping of the samples used when building trees (if bootstrap=True) and the sampling of the features to consider when looking for the best split at each node (if max_features ). 9. verbose : int, default=0> Controls the verbosity when fitting and predicting. ###Code model = RandomForestRegressor(n_jobs = -1,random_state = 123) model.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Model AccuracyWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.> **score**: The **score** function returns the coefficient of determination R2 of the prediction. ###Code print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100)) ###Output Accuracy score 96.14 % ###Markdown > **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ###Code y_pred=model.predict(X_test) print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100)) print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred))) print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred))) ###Output R2 Score: 96.14 % Mean Absolute Error 2.34 Mean Squared Error 11.18 ###Markdown Feature ImportancesThe Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction. ###Code plt.figure(figsize=(8,6)) n_features = len(X.columns) plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), X.columns) plt.xlabel("Feature importance") plt.ylabel("Feature") plt.ylim(-1, n_features) ###Output _____no_output_____ ###Markdown Prediction PlotFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis. ###Code plt.figure(figsize=(14,10)) plt.plot(range(20),y_test[0:20], color = "green") plt.plot(range(20),model.predict(X_test[0:20]), color = "red") plt.legend(["Actual","prediction"]) plt.title("Predicted vs True Value") plt.xlabel("Record number") plt.ylabel(target) plt.show() ###Output _____no_output_____
site/public/courses/DS-2.1/Notebooks/remote_simple_svm.ipynb
###Markdown Learning Objectives By the end of this class, you should be able to...- Introduce a classifier, that seperates different classes we have in our dataset, with a maximum margin known as a Support Vector Machine (SVM)- Modify the SVM for datasets that are not linearly separable- Adjust the SVM for an unbalanced dataset Support Vector Machine (SVM)- SVM is a supervised machine learning model for classification tasks. It's really good at doing both classification and regression simultaneously. - If you have a lot of data that needs to be classified, an SVM can help you achieve that- For two dimensions (when we have two features), assume the target has two classes. The SVM will obtain the best line that seperates the data into two groups Question: which one of the above lines is the best choice for separating the data? Why? What are Support Vectors in SVM?**Support vectors** are the datapoints that lie closest to the decision boundary (best line, seen in red below) Brain HealthWe have medical data on brain health through the following [Brain Health Dataset](./Datasets/SVM_Dataset1.csv)- Two features (X1 and X2 columns) are given that are releted to brain chemistry: - Serotonin - Dopamine - The target (y column) shows brain health by indicating having (-1) or not having (1) depression. - 1 means subject does not have depression - -1 means subject does have depression ###Code import pandas as pd df=pd.read_csv('SVM_Dataset1.csv', index_col=0) print(df) ###Output _____no_output_____ ###Markdown Activity: Lets build our X_train and Y_train arrays**Complete this activity groups of 3:****Hint:** We'll need to use numpy here, as well as the [zip function](https://www.geeksforgeeks.org/create-pandas-dataframe-from-lists-using-zip/) ###Code import numpy as np X1=df['X1'] X2=df['X2'] X_train=np.array(list(zip(X1,X2))) print(X_train) y_train=df['y'].values print(y_train) ###Output [ 1 1 1 1 -1 -1 -1] ###Markdown Activity: Assign color and label to each target class**Complete this activity groups of 3:****Hint:** To do this, build two lists whose color/label values match with the values of the `y_train` array at the corresponding indices. ###Code color_ls = [] # if y == 1, then the value at the same index in color_ls will be 'b' # otherwise, it will be 'r' for k in y_train: if k == 1: color_ls.append('b') else: color_ls.append('r') print(color_ls) label = [] # if y == 1, then the value at the same index in label will be 'H' # otherwise, it will be 'NH' for k in y_train: if k == 1: label.append('H') else: label.append('NH') print(label) ###Output _____no_output_____ ###Markdown Activity: Create a scatter plot of data with labels**Complete this activity groups of 3:****Hints:**- There's some functions in [matplotlib.pyplot](https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.html) that will help us here.- How can the [enumerate function](https://www.geeksforgeeks.org/enumerate-in-python/) help us here? ###Code import matplotlib.pyplot as plt # k is our index, (i,j) are our Serotonin and Dopamine pairings. # For example, here are the values on the first iteration of the loop: # k == 0, (i, j) == (2.947814, 6.626878) for k, (i,j) in enumerate(X_train): # add the datapoint to a scatter plot, # using the k'th color in colors_ls plt.scatter(i, j, c = color_ls[k]) # 0.02 to the right and above the point, # add the k'th text label in label plt.text(i+0.02, j+0.02, label[k]) ###Output _____no_output_____ ###Markdown How to obtain the best line using sklearn**Code-along with Milad**sklearn has an [SVM module](https://scikit-learn.org/stable/modules/svm.htmlsvm) you can import that will help you find the best fit line. Look into how the `SVC` and `fit` functions can help you ###Code from sklearn import svm # svm_classifier = svm.SVC(kernel='poly',C=1, degree=2) # We want to use a linear classification svm_classifier = svm.SVC(kernel='linear', C=10) # find the best fit line svm_classifier.fit(X_train, y_train) # Helper function to plot the best fit line, # as well as marking the closest data points to the line, # through dotted lines (margins) def plot_decision_boundary(clf, X, y): x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max),np.arange(x2_min, x2_max)) Z = clf.decision_function(np.array([xx1.ravel(), xx2.ravel()]).T).reshape(xx1.shape) plt.contour(xx1, xx2, Z, colors='b', levels=[-1, 0, 1], alpha=0.4, linestyles=['--', '-', '--']) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) # pass in the trained model and data points, # and plot the best fit line + margins plot_decision_boundary(svm_classifier, X_train, y_train) # plot data points + color/labels for k, (i,j) in enumerate(X_train): plt.scatter(i, j, c = color_ls[k]) plt.text(i+0.02, j+0.02, label[k]) # Number of Support Vectors for each class: svm_classifier.n_support_ # What are those Support Vectors: svm_classifier.support_vectors_ # Obtain the slope (weight) and intercept for the best fit line equation: # a + b weight=svm_classifier.coef_ # c intercept=svm_classifier.intercept_ print(weight) print(intercept) ###Output [[2.90336685 1.20121959]] [-14.73321143] ###Markdown From the above weight and intecept results, we can say the best line is:$ax_1+bx_2+c =0$, where $a = 2.90336685$, $b = 1.20121959$ and $c = -14.73321143$ Activity: Check that the points (3, 5) and (2, 7.5) are very close to the best line:- Use `a`, `b`, and `c` that we just calculated ###Code print(weight[0][0]*3+weight[0][1]*5+intercept[0]) print(weight[0][0]*2+weight[0][1]*7.5+intercept[0]) ###Output -0.017012931258699737 0.08266919494858627 ###Markdown Activity: Am I healthy?- I went to a medical lab and they measured my Serotonin and Dopamine which was 3 and 6, respectively.- Can we use the SVM classifier to see if I am deppressed? ###Code svm_classifier.predict([[3,6]]) ###Output _____no_output_____ ###Markdown Good, I am not depressed :) SVM Training For Non-Linearly Separable DataBased on the [SVM_Dataset2 dataset](./Datasets/SVM_Dataset2.csv), it is possible that we can not find a _line_ that separates the two classes. Solution: Use Polynomial as the Kernel What if even polynomial can not separate the two groups? Solution: Use Radial Basis Function (RBF) as the Kernel Kernels in SVM and their parameters:Type of kernels:1. Linear1. Polynomial1. RBF (Gaussian)There are two parameters for each of these:- Gamma (for RBF only). This is the "spread" of the decision region (kernel)- C (for all of them). This is the threshold for misclassifying data.Read [this article on SVM parameters](https://chrisalbon.com/machine_learning/support_vector_machines/svc_parameters_using_rbf_kernel/) for more information! (Optional) What if the classes are unbalanced?Let's go through this example together to learn about assigning **class weights** for SVMs:http://scikit-learn.org/stable/auto_examples/svm/plot_separating_hyperplane_unbalanced.html Activity: Obtain which line (black or red) from the previous example will have the lowest error**Complete this activity in groups of 3****Hints:**Follow these steps:1. Train two SVM models: one with class weights, and one without1. Pass the dataset into your SVM models1. Compare the model predictions with known classes1. Report which one has the lowest mistakes (error rate)**Use the following code to get started (includes the dataset to use:)** ###Code import numpy as np import matplotlib.pyplot as plt from sklearn import svm from sklearn.datasets import make_blobs # we create two clusters of random points n_samples_1 = 1000 n_samples_2 = 100 centers = [[0.0, 0.0], [2.0, 2.0]] clusters_std = [1.5, 0.5] X, y = make_blobs(n_samples=[n_samples_1, n_samples_2], centers=centers, cluster_std=clusters_std, random_state=0, shuffle=False) ###Output _____no_output_____ ###Markdown Resource: All the code we used for SVM_Dataset1: ###Code import numpy as np import pandas as pd from sklearn import svm import matplotlib.pyplot as plt df=pd.read_csv('SVM_Dataset1.csv', index_col=0) X1=df['X1'] X2=df['X2'] X_train=np.array(list(zip(X1,X2))) y_train=df['y'].values svm_classifier = svm.SVC(kernel='linear',C=10) # svm_classifier = svm.SVC(kernel='rbf', gamma=0.1, C=10) svm_classifier.fit(X_train, y_train) color_ls = [] for k in y_train: if k == 1: color_ls.append('b') else: color_ls.append('r') color_ls label = [] for k in y_train: if k == 1: label.append('H') else: label.append('NH') label def plot_decision_boundary(clf, X, y): x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max),np.arange(x2_min, x2_max)) Z = clf.decision_function(np.array([xx1.ravel(), xx2.ravel()]).T).reshape(xx1.shape) plt.contourf(xx1, xx2, Z, colors='k', levels=[-1, 0, 1], alpha=0.4, linestyles=['--', '-', '--']) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) plot_decision_boundary(svm_classifier, X_train, y_train) for k, (i,j) in enumerate(X_train): plt.scatter(i, j, c = color_ls[k]) plt.text(i+0.02, j+0.02, label[k]) svm_classifier.support_vectors_ ###Output _____no_output_____ ###Markdown Resource: Code for SVM_Dataset 2: ###Code # Apply different SVM kernels, (linear, polynomial or RBF) to obtain the best classifier for SVM_Dataset2.csv import numpy as np import pandas as pd from sklearn import svm import matplotlib.pyplot as plt df=pd.read_csv('SVM_Dataset2.csv') X1=df['x1'] X2=df['x2'] X_train=np.array(list(zip(X1,X2))) y_train=df['y'].values # svm_classifier = svm.SVC(kernel='linear', C=10) #svm_classifier = svm.SVC(kernel='poly', C=10) svm_classifier = svm.SVC(kernel='poly',C=1, degree=2) # svm_classifier = svm.SVC(kernel='rbf', gamma=0.1, C=100) svm_classifier.fit(X_train, y_train) color_ls = [] for k in y_train: if k == 1: color_ls.append('b') else: color_ls.append('r') color_ls label = [] for k in y_train: if k == 1: label.append('H') else: label.append('NH') label def plot_decision_boundary(clf, X, y): x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max),np.arange(x2_min, x2_max)) Z = clf.decision_function(np.array([xx1.ravel(), xx2.ravel()]).T).reshape(xx1.shape) plt.contourf(xx1, xx2, Z, colors='k', levels=[-1, 0, 1], alpha=0.4, linestyles=['--', '-', '--']) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) plot_decision_boundary(svm_classifier, X_train, y_train) for k, (i,j) in enumerate(X_train): plt.scatter(i, j, c = color_ls[k]) plt.text(i+0.02, j+0.02, label[k]) svm_classifier.support_vectors_ ###Output _____no_output_____
LDA(Topic Modelling).ipynb
###Markdown Topic Modelling using LDA ###Code import re import pandas as pd pd.set_option('display.max_colwidth', -1) import numpy as np from pprint import pprint import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel import spacy import pyLDAvis import pyLDAvis.gensim import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer, SnowballStemmer import nltk nltk.download("stopwords") stop_words = stopwords.words("english") stop_words += ['from', 'subject', 're', 'edu', 'use'] ###Output _____no_output_____ ###Markdown Import the 20-Newsgroups dataset ###Code # Import Dataset df = pd.read_json('https://raw.githubusercontent.com/selva86/datasets/master/newsgroups.json') print(df['target_names'].unique()) df.head() ###Output ['rec.autos' 'comp.sys.mac.hardware' 'comp.graphics' 'sci.space' 'talk.politics.guns' 'sci.med' 'comp.sys.ibm.pc.hardware' 'comp.os.ms-windows.misc' 'rec.motorcycles' 'talk.religion.misc' 'misc.forsale' 'alt.atheism' 'sci.electronics' 'comp.windows.x' 'rec.sport.hockey' 'rec.sport.baseball' 'soc.religion.christian' 'talk.politics.mideast' 'talk.politics.misc' 'sci.crypt'] ###Markdown Data Cleaning- Remove noise- Tokenization and removing punctuation using gensim.utils.simple_process- remove stopwords and words with length <= 2- lemmatization ###Code data = df['content'].values.tolist() # Remove Emails data = [re.sub('\S*@\S*\s?', '', sent) for sent in data] # Remove new line characters data = [re.sub('\s+', ' ', sent) for sent in data] # Remove distracting single quotes data = [re.sub("\'", "", sent) for sent in data] pprint(data[:1]) #use gensim simple_process to tokenize words and remove punctuations def tokenize(list_sentence): tokenized = [] for sentence in list_sentence: tokenized.append(simple_preprocess(sentence, deacc=True)) return tokenized tokenized_data = tokenize(data) tokenized_data[0][:10] def remove_stopwords(tokenized_data): token_no_stop = [] for doc in tokenized_data: token_no_stop.append([word for word in doc if word not in stop_words and len(word)>2]) return token_no_stop def lemmatization(tokens): for pos in ['v', 'n', 'a']: for idx, doc in enumerate(tokens): tokens[idx] = [WordNetLemmatizer().lemmatize(word, pos=pos) for word in doc] return tokens clean_data = lemmatization(remove_stopwords(tokenized_data)) print(clean_data[0]) ###Output ['wheres', 'thing', 'car', 'nntp', 'post', 'host', 'rac', 'wam', 'umd', 'organization', 'university', 'maryland', 'college', 'park', 'line', 'wonder', 'anyone', 'could', 'enlighten', 'car', 'saw', 'day', 'door', 'sport', 'car', 'look', 'late', 'early', 'call', 'bricklin', 'door', 'really', 'small', 'addition', 'front', 'bumper', 'separate', 'rest', 'body', 'know', 'anyone', 'tellme', 'model', 'name', 'engine', 'spec', 'year', 'production', 'car', 'make', 'history', 'whatever', 'info', 'funky', 'look', 'car', 'please', 'mail', 'thank', 'bring', 'neighborhood', 'lerxst'] ###Markdown Create dictionary and Bag of Words- dictionary: mapping of integers to words- Bag of Words: the term frequency of each word in documents ###Code id2word = corpora.Dictionary(clean_data) id2word.filter_extremes(no_below=15, no_above=0.1) #Bag of Words corpus = [id2word.doc2bow(data) for data in clean_data] corpus[0][:10] #human readable version (term frequency) doc1 = corpus[0] term_freq = [(id2word[k], v) for k, v in doc1] term_freq[:10] ###Output _____no_output_____ ###Markdown Building the topic model ###Code lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=id2word, num_topics=20, random_state=100, chunksize=100, passes=10, per_word_topics=True) pprint(lda_model.print_topics(-1)) coherence_model_lda = CoherenceModel(model=lda_model, texts=clean_data, dictionary=id2word, coherence='c_v') coherence_lda = coherence_model_lda.get_coherence() print('\nCoherence Score: ', coherence_lda) pyLDAvis.enable_notebook() vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word) print(vis) document_topic = pd.DataFrame() topic_distribution = [doc[0] for doc in lda_model[corpus]] for i, topics in enumerate(topic_distribution): topics = sorted(topics, key = lambda x: x[1], reverse=True) topic_num, topic_pct = topics[0][0], topics[0][1] topic_keywords = '.'.join([word for word, p in lda_model.show_topic(topic_num)]) document_topic = document_topic.append(pd.Series([int(topic_num), round(topic_pct, 3), topic_keywords]), ignore_index=True) contents = df['target_names'] document_topic = pd.concat([document_topic, contents], axis=1) document_topic.columns = ['topic_num', 'topic_pct', 'topic_keywords', 'target_names'] ###Output _____no_output_____ ###Markdown Compare the generated topics (topic_keywords) to the target topics ###Code document_topic.head(10) ###Output _____no_output_____
pytorch/chapter_recurrent-modern/lstm.ipynb
###Markdown 长短期记忆网络(LSTM):label:`sec_lstm`长期以来,隐变量模型存在着长期信息保存和短期输入缺失的问题。解决这一问题的最早方法之一是长短期存储器(long short-term memory,LSTM) :cite:`Hochreiter.Schmidhuber.1997`。它有许多与门控循环单元( :numref:`sec_gru`)一样的属性。有趣的是,长短期记忆网络的设计比门控循环单元稍微复杂一些,却比门控循环单元早诞生了近20年。 门控记忆元可以说,长短期记忆网络的设计灵感来自于计算机的逻辑门。长短期记忆网络引入了*记忆元*(memory cell),或简称为*单元*(cell)。有些文献认为记忆元是隐状态的一种特殊类型,它们与隐状态具有相同的形状,其设计目的是用于记录附加的信息。为了控制记忆元,我们需要许多门。其中一个门用来从单元中输出条目,我们将其称为*输出门*(output gate)。另外一个门用来决定何时将数据读入单元,我们将其称为*输入门*(input gate)。我们还需要一种机制来重置单元的内容,由*遗忘门*(forget gate)来管理,这种设计的动机与门控循环单元相同,能够通过专用机制决定什么时候记忆或忽略隐状态中的输入。让我们看看这在实践中是如何运作的。 输入门、忘记门和输出门就如在门控循环单元中一样,当前时间步的输入和前一个时间步的隐状态作为数据送入长短期记忆网络的门中,如 :numref:`lstm_0`所示。它们由三个具有sigmoid激活函数的全连接层处理,以计算输入门、遗忘门和输出门的值。因此,这三个门的值都在$(0, 1)$的范围内。![长短期记忆模型中的输入门、遗忘门和输出门](../img/lstm-0.svg):label:`lstm_0`我们来细化一下长短期记忆网络的数学表达。假设有$h$个隐藏单元,批量大小为$n$,输入数为$d$。因此,输入为$\mathbf{X}_t \in \mathbb{R}^{n \times d}$,前一时间步的隐状态为$\mathbf{H}_{t-1} \in \mathbb{R}^{n \times h}$。相应地,时间步$t$的门被定义如下:输入门是$\mathbf{I}_t \in \mathbb{R}^{n \times h}$,遗忘门是$\mathbf{F}_t \in \mathbb{R}^{n \times h}$,输出门是$\mathbf{O}_t \in \mathbb{R}^{n \times h}$。它们的计算方法如下:$$\begin{aligned}\mathbf{I}_t &= \sigma(\mathbf{X}_t \mathbf{W}_{xi} + \mathbf{H}_{t-1} \mathbf{W}_{hi} + \mathbf{b}_i),\\\mathbf{F}_t &= \sigma(\mathbf{X}_t \mathbf{W}_{xf} + \mathbf{H}_{t-1} \mathbf{W}_{hf} + \mathbf{b}_f),\\\mathbf{O}_t &= \sigma(\mathbf{X}_t \mathbf{W}_{xo} + \mathbf{H}_{t-1} \mathbf{W}_{ho} + \mathbf{b}_o),\end{aligned}$$其中$\mathbf{W}_{xi}, \mathbf{W}_{xf}, \mathbf{W}_{xo} \in \mathbb{R}^{d \times h}$和$\mathbf{W}_{hi}, \mathbf{W}_{hf}, \mathbf{W}_{ho} \in \mathbb{R}^{h \times h}$是权重参数,$\mathbf{b}_i, \mathbf{b}_f, \mathbf{b}_o \in \mathbb{R}^{1 \times h}$是偏置参数。 候选记忆元由于还没有指定各种门的操作,所以先介绍*候选记忆元*(candidate memory cell)$\tilde{\mathbf{C}}_t \in \mathbb{R}^{n \times h}$。它的计算与上面描述的三个门的计算类似,但是使用$\tanh$函数作为激活函数,函数的值范围为$(-1, 1)$。下面导出在时间步$t$处的方程:$$\tilde{\mathbf{C}}_t = \text{tanh}(\mathbf{X}_t \mathbf{W}_{xc} + \mathbf{H}_{t-1} \mathbf{W}_{hc} + \mathbf{b}_c),$$其中$\mathbf{W}_{xc} \in \mathbb{R}^{d \times h}$和$\mathbf{W}_{hc} \in \mathbb{R}^{h \times h}$是权重参数,$\mathbf{b}_c \in \mathbb{R}^{1 \times h}$是偏置参数。候选记忆元的如 :numref:`lstm_1`所示。![长短期记忆模型中的候选记忆元](../img/lstm-1.svg):label:`lstm_1` 记忆元在门控循环单元中,有一种机制来控制输入和遗忘(或跳过)。类似地,在长短期记忆网络中,也有两个门用于这样的目的:输入门$\mathbf{I}_t$控制采用多少来自$\tilde{\mathbf{C}}_t$的新数据,而遗忘门$\mathbf{F}_t$控制保留多少过去的记忆元$\mathbf{C}_{t-1} \in \mathbb{R}^{n \times h}$的内容。使用按元素乘法,得出:$$\mathbf{C}_t = \mathbf{F}_t \odot \mathbf{C}_{t-1} + \mathbf{I}_t \odot \tilde{\mathbf{C}}_t.$$如果遗忘门始终为$1$且输入门始终为$0$,则过去的记忆元$\mathbf{C}_{t-1}$将随时间被保存并传递到当前时间步。引入这种设计是为了缓解梯度消失问题,并更好地捕获序列中的长距离依赖关系。这样我们就得到了计算记忆元的流程图,如 :numref:`lstm_2`。![在长短期记忆网络模型中计算记忆元](../img/lstm-2.svg):label:`lstm_2` 隐状态最后,我们需要定义如何计算隐状态$\mathbf{H}_t \in \mathbb{R}^{n \times h}$,这就是输出门发挥作用的地方。在长短期记忆网络中,它仅仅是记忆元的$\tanh$的门控版本。这就确保了$\mathbf{H}_t$的值始终在区间$(-1, 1)$内:$$\mathbf{H}_t = \mathbf{O}_t \odot \tanh(\mathbf{C}_t).$$只要输出门接近$1$,我们就能够有效地将所有记忆信息传递给预测部分,而对于输出门接近$0$,我们只保留记忆元内的所有信息,而不需要更新隐状态。 :numref:`lstm_3`提供了数据流的图形化演示。![在长短期记忆模型中计算隐状态](../img/lstm-3.svg):label:`lstm_3` 从零开始实现现在,我们从零开始实现长短期记忆网络。与 :numref:`sec_rnn_scratch`中的实验相同,我们首先加载时光机器数据集。 ###Code import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) ###Output _____no_output_____ ###Markdown [**初始化模型参数**]接下来,我们需要定义和初始化模型参数。如前所述,超参数`num_hiddens`定义隐藏单元的数量。我们按照标准差$0.01$的高斯分布初始化权重,并将偏置项设为$0$。 ###Code def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xi, W_hi, b_i = three() # 输入门参数 W_xf, W_hf, b_f = three() # 遗忘门参数 W_xo, W_ho, b_o = three() # 输出门参数 W_xc, W_hc, b_c = three() # 候选记忆元参数 # 输出层参数 W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) # 附加梯度 params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.requires_grad_(True) return params ###Output _____no_output_____ ###Markdown 定义模型在[**初始化函数**]中,长短期记忆网络的隐状态需要返回一个*额外*的记忆元,单元的值为0,形状为(批量大小,隐藏单元数)。因此,我们得到以下的状态初始化。 ###Code def init_lstm_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device)) ###Output _____no_output_____ ###Markdown [**实际模型**]的定义与我们前面讨论的一样:提供三个门和一个额外的记忆元。请注意,只有隐状态才会传递到输出层,而记忆元$\mathbf{C}_t$不直接参与输出计算。 ###Code def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * torch.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H, C) ###Output _____no_output_____ ###Markdown [**训练**]和预测让我们通过实例化 :numref:`sec_rnn_scratch`中引入的`RNNModelScratch`类来训练一个长短期记忆网络,就如我们在 :numref:`sec_gru`中所做的一样。 ###Code vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) ###Output perplexity 1.1, 19827.3 tokens/sec on cuda:0 time traveller for so it will be convenient to speak of himwas e ###Markdown [**简洁实现**]使用高级API,我们可以直接实例化`LSTM`模型。高级API封装了前文介绍的所有配置细节。这段代码的运行速度要快得多,因为它使用的是编译好的运算符而不是Python来处理之前阐述的许多细节。 ###Code num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) ###Output perplexity 1.0, 297699.3 tokens/sec on cuda:0 time traveller with a slight accession ofcheerfulness really thi traveller with a slight accession ofcheerfulness really thi
Source/Chapter02/LinearModels/Regression/sgdRegressor.ipynb
###Markdown SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate).The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection.This implementation works with data represented as dense numpy arrays of floating point values for the features. ###Code import matplotlib.pyplot as plt import mglearn from sklearn.linear_model import SGDRegressor from sklearn.model_selection import train_test_split %matplotlib inline X, y = mglearn.datasets.load_extended_boston() X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) sgdRegressor = SGDRegressor().fit(X_train, y_train) print("Training set score: {:.2f}".format(sgdRegressor.score(X_train, y_train))) print("Test set score: {:.2f}".format(sgdRegressor.score(X_test, y_test))) ###Output Training set score: 0.58 Test set score: 0.42 ###Markdown This is a really bad bad behavior ###Code # Testing with a different alpha sgdRegressor10 = SGDRegressor(alpha=10).fit(X_train, y_train) print("Training set score: {:.2f}".format(sgdRegressor10.score(X_train, y_train))) print("Test set score_ {:.2f}".format(sgdRegressor10.score(X_test, y_test))) ###Output Training set score: 0.07 Test set score_ 0.06 ###Markdown It's even worse ###Code # Testing with a different alpha sgdRegressor01 = SGDRegressor(alpha=0.001).fit(X_train, y_train) print("Training set score: {:.2f}".format(sgdRegressor01.score(X_train, y_train))) print("Test set score_ {:.2f}".format(sgdRegressor01.score(X_test, y_test))) plt.plot(sgdRegressor.coef_, 's', label="SGDRegressor alpha=0.000001") plt.plot(sgdRegressor10.coef_, '^', label="SGDRegressor alpha=10") plt.plot(sgdRegressor01.coef_, 'v', label="SGDRegressor alpha=.1") plt.xlabel("Coefficient index") plt.ylabel("Coefficient magnitude") plt.ylim(-25, 25) plt.legend() ###Output _____no_output_____ ###Markdown Trying to tune-in additional parameters to get the expected value, or at least a better aproximation ###Code sgdRegressorTuneIn1 = SGDRegressor(loss="epsilon_insensitive", penalty="l2", n_iter=1000).fit(X_train, y_train) print("Train score= {}".format(sgdRegressorTuneIn1.score(X_train, y_train))) print("Test score= {}".format(sgdRegressorTuneIn1.score(X_test, y_test))) ###Output /usr/lib64/python3.6/site-packages/sklearn/linear_model/stochastic_gradient.py:152: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead. DeprecationWarning) ###Markdown It starts behaving better... ###Code sgdRegressorTuneIn1 = SGDRegressor(loss="epsilon_insensitive", penalty="l2", n_iter=100000) sgdRegressorTuneIn1.fit(X_train, y_train) print("Train score= {}".format(sgdRegressorTuneIn1.score(X_train, y_train))) print("Test score= {}".format(sgdRegressorTuneIn1.score(X_test, y_test))) ###Output /usr/lib64/python3.6/site-packages/sklearn/linear_model/stochastic_gradient.py:152: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead. DeprecationWarning) ###Markdown Increasing the number of iterations is helping, butlet'stry to also tune in using alpha ###Code sgdRegressorTuneIn1 = SGDRegressor(loss="squared_loss", penalty="l2", alpha=0.000001, n_iter=100000) sgdRegressorTuneIn1.fit(X_train, y_train) print("Train score= {}".format(sgdRegressorTuneIn1.score(X_train, y_train))) print("Test score= {}".format(sgdRegressorTuneIn1.score(X_test, y_test))) sgdRegressorTuneIn1 = SGDRegressor(loss="squared_epsilon_insensitive", penalty="l2", alpha=0.000001, n_iter=100000) sgdRegressorTuneIn1.fit(X_train, y_train) print("Train score= {}".format(sgdRegressorTuneIn1.score(X_train, y_train))) print("Test score= {}".format(sgdRegressorTuneIn1.score(X_test, y_test))) sgdRegressorTuneIn1 = SGDRegressor(loss="huber", penalty="l2", alpha=0.000001, n_iter=100000) sgdRegressorTuneIn1.fit(X_train, y_train) print("Train score= {}".format(sgdRegressorTuneIn1.score(X_train, y_train))) print("Test score= {}".format(sgdRegressorTuneIn1.score(X_test, y_test))) sgdRegressorTuneIn1 = SGDRegressor(loss="epsilon_insensitive", penalty="l2", alpha=0.000001, n_iter=100000) sgdRegressorTuneIn1.fit(X_train, y_train) print("Train score= {}".format(sgdRegressorTuneIn1.score(X_train, y_train))) print("Test score= {}".format(sgdRegressorTuneIn1.score(X_test, y_test))) # After seen previous results, the Squared_loss is the one with better performance in both, # training and test, so we try to tune-in it sgdRegressorTuneIn1 = SGDRegressor(loss="squared_loss", penalty="l2", alpha=0.00001, n_iter=100000) sgdRegressorTuneIn1.fit(X_train, y_train) print("Train score= {}".format(sgdRegressorTuneIn1.score(X_train, y_train))) print("Test score= {}".format(sgdRegressorTuneIn1.score(X_test, y_test))) ###Output /usr/lib64/python3.6/site-packages/sklearn/linear_model/stochastic_gradient.py:152: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead. DeprecationWarning)
ranzcr-ensemble.ipynb
###Markdown RANZCR_Ensemble![](https://storage.googleapis.com/kaggle-competitions/kaggle/23870/logos/header.png?t=2020-12-01-04-28-05)Serious complications can occur as a result of malpositioned lines and tubes in patients. Doctors and nurses frequently use checklists for placement of lifesaving equipment to ensure they follow protocol in managing patients. Yet, these steps can be time consuming and are still prone to human error, especially in stressful situations when hospitals are at capacity.Hospital patients can have catheters and lines inserted during the course of their admission and serious complications can arise if they are positioned incorrectly. Nasogastric tube malpositioning into the airways has been reported in up to 3% of cases, with up to 40% of these cases demonstrating complications [1-3]. Airway tube malposition in adult patients intubated outside the operating room is seen in up to 25% of cases [4,5]. The likelihood of complication is directly related to both the experience level and specialty of the proceduralist. Early recognition of malpositioned tubes is the key to preventing risky complications (even death), even more so now that millions of COVID-19 patients are in need of these tubes and lines.The gold standard for the confirmation of line and tube positions are chest radiographs. However, a physician or radiologist must manually check these chest x-rays to verify that the lines and tubes are in the optimal position. Not only does this leave room for human error, but delays are also common as radiologists can be busy reporting other scans. Deep learning algorithms may be able to automatically detect malpositioned catheters and lines. Once alerted, clinicians can reposition or remove them to avoid life-threatening complications. This ensemble includes models from 3 parts of the project:- Xception part(`ranzcr-xception-tpu-baseline.ipynb`, `ranzcr-xception-tpu-prediction.ipynb`)- EfficientNetB7 part(`effnetb7-tpu.ipynb`, `effnetb7-tpu-prediction.ipynb`)- EfficientNetB4_CV part(`ranzcr-efb4-cv-tr.ipynb`, `ranzcr-efb4-cv-pr.ipynb`) ###Code !pip install /kaggle/input/kerasapplications -q !pip install /kaggle/input/efficientnet-keras-source-code/ -q --no-deps import efficientnet.tfkeras as efn import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow.keras import models, layers from tensorflow.keras.preprocessing import image from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau from tensorflow.keras.applications import Xception, EfficientNetB7, InceptionResNetV2 from tensorflow.keras.optimizers import Adam import tensorflow_addons as tfa # ignoring warnings import warnings warnings.simplefilter("ignore") import os, cv2 from PIL import Image # TPU or GPU detection # Detect hardware, return appropriate distribution strategy # try: # tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # print(f'Running on TPU {tpu.master()}') # except ValueError: # tpu = None # if tpu: # tf.config.experimental_connect_to_cluster(tpu) # tf.tpu.experimental.initialize_tpu_system(tpu) # strategy = tf.distribute.experimental.TPUStrategy(tpu) # else: # strategy = tf.distribute.get_strategy() # AUTO = tf.data.experimental.AUTOTUNE # REPLICAS = strategy.num_replicas_in_sync # print(f'REPLICAS: {REPLICAS}') WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) # Data train = pd.read_csv(os.path.join(WORK_DIR, "train.csv")) train_images = WORK_DIR + "/train/" + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = WORK_DIR + "/test/" + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, "train_annotations.csv")) print('Labels:\n', '*'*20, '\n', label_cols.values) print('*'*50) train.head() # Main parameters BATCH_SIZE = 8 * 1 STEPS_PER_EPOCH = len(train) * (1 / 7 * 6) / BATCH_SIZE VALIDATION_STEPS = len(train) * (1 / 7 * 1) / BATCH_SIZE EPOCHS = 30 TARGET_SIZE_1 = 550 TARGET_SIZE_2 = 750 ###Output _____no_output_____ ###Markdown EffNetB7 ###Code def build_decoder(with_labels = True, target_size = (TARGET_SIZE_2, TARGET_SIZE_2), ext = 'jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels = 3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels = 3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32) / 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels = True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) p_rotate = tf.random.uniform([], 0, 1.0, dtype = tf.float32) p_pixel_1 = tf.random.uniform([], 0, 1.0, dtype = tf.float32) p_pixel_2 = tf.random.uniform([], 0, 1.0, dtype = tf.float32) p_pixel_3 = tf.random.uniform([], 0, 1.0, dtype = tf.float32) # p_crop = tf.random.uniform([], 0, 1.0, dtype = tf.float32) if p_rotate > .75: img = tf.image.rot90(img, k = 3) # rotate 270º elif p_rotate > .5: img = tf.image.rot90(img, k = 2) # rotate 180º elif p_rotate > .25: img = tf.image.rot90(img, k = 1) # rotate 90º if p_pixel_1 >= .6: img = tf.image.random_saturation(img, lower = 0.75, upper = 1.25) if p_pixel_2 >= .6: img = tf.image.random_contrast(img, lower = 0.75, upper = 1.25) if p_pixel_3 >= .4: img = tf.image.random_brightness(img, max_delta = 0.1) # if p_crop > .7: # if p_crop > .9: # img = tf.image.central_crop(img, central_fraction=.75) # elif p_crop > .8: # img = tf.image.central_crop(img, central_fraction=.85) # else: # img = tf.image.central_crop(img, central_fraction=.95) # elif p_crop > .4: # crop_size = tf.random.uniform([], int(TARGET_SIZE * .85), TARGET_SIZE, dtype = tf.int32) # img = tf.image.random_crop(img, size = [crop_size, crop_size, 3]) # img = tf.image.resize(img, size = [TARGET_SIZE, TARGET_SIZE]) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset(paths, labels = None, bsize = 32, cache = True, decode_fn = None, augment_fn = None, augment = True, repeat = True, shuffle = 1024, cache_dir = ""): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else (paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls = AUTO) dset = dset.cache(cache_dir) if cache else dset dset = dset.map(augment_fn, num_parallel_calls = AUTO) if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle) if shuffle else dset dset = dset.batch(bsize).prefetch(AUTO) return dset test_df_B7 = build_dataset( test_images, bsize = BATCH_SIZE, repeat = False, shuffle = False, augment = False, cache = False) test_df_B7 ###Output _____no_output_____ ###Markdown Xception ###Code def build_decoder(with_labels = True, target_size = (TARGET_SIZE_2, TARGET_SIZE_2), ext = 'jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels = 3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels = 3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32) / 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels = True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) img = tf.image.adjust_brightness(img, 0.1) # rotate = tf.random.uniform([], 0, 1.0, dtype = tf.float32) # if rotate > .75: # img = tf.image.rot90(img, k = 3) # elif rotate > .5: # img = tf.image.rot90(img, k = 2) # elif rotate > .25: # img = tf.image.rot90(img, k = 1) # saturation = tf.random.uniform([], 0, 1.0, dtype = tf.float32) # if saturation >= .5: # img = tf.image.random_saturation(img, lower = 0.9, upper = 1.1) # contrast = tf.random.uniform([], 0, 1.0, dtype = tf.float32) # if contrast >= .5: # img = tf.image.random_contrast(img, lower = 0.9, upper = 1.1) # brightness = tf.random.uniform([], 0, 1.0, dtype = tf.float32) # if brightness >= .5: # img = tf.image.random_brightness(img, max_delta = 0.1) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset(paths, labels = None, bsize = 32, cache = True, decode_fn = None, augment_fn = None, augment = True, repeat = True, shuffle = 1024, cache_dir = ""): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else (paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls = AUTO) dset = dset.cache(cache_dir) if cache else dset dset = dset.map(augment_fn, num_parallel_calls = AUTO) if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle) if shuffle else dset dset = dset.batch(bsize).prefetch(AUTO) return dset test_df_Xcep = build_dataset( test_images, bsize = BATCH_SIZE, repeat = False, shuffle = False, augment = False, cache = False) test_df_Xcep ###Output _____no_output_____ ###Markdown EffNetB4_CV ###Code def build_decoder(with_labels = True, target_size = (TARGET_SIZE_1, TARGET_SIZE_1), ext = 'jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels = 3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels = 3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32) / 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels = True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) p_rotate = tf.random.uniform([], 0, 1.0, dtype = tf.float32) p_pixel_1 = tf.random.uniform([], 0, 1.0, dtype = tf.float32) p_pixel_2 = tf.random.uniform([], 0, 1.0, dtype = tf.float32) p_pixel_3 = tf.random.uniform([], 0, 1.0, dtype = tf.float32) # p_crop = tf.random.uniform([], 0, 1.0, dtype = tf.float32) if p_rotate > .75: img = tf.image.rot90(img, k = 3) # rotate 270º elif p_rotate > .5: img = tf.image.rot90(img, k = 2) # rotate 180º elif p_rotate > .25: img = tf.image.rot90(img, k = 1) # rotate 90º if p_pixel_1 >= .6: img = tf.image.random_saturation(img, lower = 0.75, upper = 1.25) if p_pixel_2 >= .6: img = tf.image.random_contrast(img, lower = 0.75, upper = 1.25) if p_pixel_3 >= .4: img = tf.image.random_brightness(img, max_delta = 0.1) # if p_crop > .7: # if p_crop > .9: # img = tf.image.central_crop(img, central_fraction=.75) # elif p_crop > .8: # img = tf.image.central_crop(img, central_fraction=.85) # else: # img = tf.image.central_crop(img, central_fraction=.95) # elif p_crop > .4: # crop_size = tf.random.uniform([], int(TARGET_SIZE * .85), TARGET_SIZE, dtype = tf.int32) # img = tf.image.random_crop(img, size = [crop_size, crop_size, 3]) # img = tf.image.resize(img, size = [TARGET_SIZE, TARGET_SIZE]) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset(paths, labels = None, bsize = 32, cache = True, decode_fn = None, augment_fn = None, augment = True, repeat = True, shuffle = 1024, cache_dir = ""): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else (paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls = AUTO) dset = dset.cache(cache_dir) if cache else dset dset = dset.map(augment_fn, num_parallel_calls = AUTO) if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle) if shuffle else dset dset = dset.batch(bsize).prefetch(AUTO) return dset test_df_B4 = build_dataset( test_images, bsize = BATCH_SIZE, repeat = False, shuffle = False, augment = False, cache = False) test_df_B4 ###Output _____no_output_____ ###Markdown EffNetB7_architecture ###Code # def create_model(): # conv_base = efn.EfficientNetB7(include_top = False, weights = 'imagenet', # input_shape = (TARGET_SIZE, TARGET_SIZE, 3)) # model = conv_base.output # model = layers.GlobalAveragePooling2D()(model) # model = layers.Dropout(0.5)(model) # model = layers.Dense(11, activation = "sigmoid")(model) # model = models.Model(conv_base.input, model) # model.compile(optimizer = Adam(lr = 0.00025), # loss = [tfa.losses.SigmoidFocalCrossEntropy(alpha = 0.5, gamma = 2)], # metrics = [tf.keras.metrics.AUC(multi_label = True)]) # return model ###Output _____no_output_____ ###Markdown Xception_architecture ###Code # def create_model(): # conv_base = Xception(include_top = False, weights = 'imagenet', # input_shape = (TARGET_SIZE, TARGET_SIZE, 3)) # model = conv_base.output # model = layers.GlobalAveragePooling2D()(model) # model = layers.Dropout(0.3)(model) # model = layers.Dense(11, activation = "sigmoid")(model) # model = models.Model(conv_base.input, model) # model.compile(optimizer = Adam(lr = 0.001), # loss = tfa.losses.SigmoidFocalCrossEntropy(alpha = 0.5, gamma = 2), # metrics = [tf.keras.metrics.AUC(multi_label = True)]) # return model ###Output _____no_output_____ ###Markdown EffNetB4_architecture ###Code # def create_model(): # conv_base = efn.EfficientNetB4(include_top = False, weights = None, # input_shape = (TARGET_SIZE, TARGET_SIZE, 3)) # model = conv_base.output # model = layers.GlobalAveragePooling2D()(model) # model = layers.Dropout(0.5)(model) # model = layers.Dense(11, activation = "sigmoid")(model) # model = models.Model(conv_base.input, model) # model.compile(optimizer = Adam(lr = 0.0002), # loss = tfa.losses.SigmoidFocalCrossEntropy(alpha = 0.5, gamma = 2), # metrics = [tf.keras.metrics.AUC(multi_label = True)]) # return model ###Output _____no_output_____ ###Markdown Prediction ###Code # B7 model_B7 = models.load_model('../input/effnetb7-tpu/Efnet_750_B7_TPU.h5') ss[label_cols] += model_B7.predict(test_df_B7, verbose = 1) * 0.5 # Xcep model_Xcep = models.load_model('../input/ranzcr-xception-tpu-baseline/Xception_750_TPU.h5') ss[label_cols] += model_Xcep.predict(test_df_Xcep, verbose = 1) * 0.3 # B4_CV model_0 = models.load_model('../input/ranzcr-efb4-cv-tr/EfB4_550_0.h5') model_1 = models.load_model('../input/ranzcr-efb4-cv-tr/EfB4_550_1.h5') model_2 = models.load_model('../input/ranzcr-efb4-cv-tr/EfB4_550_2.h5') model_3 = models.load_model('../input/ranzcr-efb4-cv-tr/EfB4_550_3.h5') model_4 = models.load_model('../input/ranzcr-efb4-cv-tr/EfB4_550_4.h5') model_5 = models.load_model('../input/ranzcr-efb4-cv-tr/EfB4_550_5.h5') model_6 = models.load_model('../input/ranzcr-efb4-cv-tr/EfB4_550_6.h5') models_b4 = [model_0, model_1, model_2, model_3, model_4, model_5, model_6] FOLDS = 7 i = 1 for model in models_b4: print('Predicting model #{}'.format(i)) i += 1 ss[label_cols] += model.predict(test_df_B4, verbose = 1) / FOLDS * 0.2 ###Output Predicting model #1 448/448 [==============================] - 241s 538ms/step Predicting model #2 448/448 [==============================] - 243s 543ms/step Predicting model #3 448/448 [==============================] - 243s 542ms/step Predicting model #4 448/448 [==============================] - 244s 544ms/step Predicting model #5 448/448 [==============================] - 243s 543ms/step Predicting model #6 448/448 [==============================] - 245s 548ms/step Predicting model #7 448/448 [==============================] - 245s 547ms/step ###Markdown FINAL SUBMISSION (Private Score - 0.96117) ###Code ss.to_csv('submission.csv', index = False) ss ###Output _____no_output_____
src/Yolo_Thread/examples.ipynb
###Markdown This notebook contains software developed by Ultralytics LLC, and **is freely available for redistribution under the GPL-3.0 license**. For more information please visit https://github.com/ultralytics/yolov3 and https://www.ultralytics.com. ###Code import time import glob import torch import os from IPython.display import Image, clear_output print('PyTorch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU')) ###Output PyTorch 1.1.0 _CudaDeviceProperties(name='Tesla K80', major=3, minor=7, total_memory=11441MB, multi_processor_count=13) ###Markdown Clone repository and download COCO 2014 dataset (20GB): ###Code !git clone https://github.com/ultralytics/yolov3 # clone !bash yolov3/data/get_coco_dataset_gdrive.sh # copy COCO2014 dataset (19GB) %cd yolov3 ###Output Cloning into 'yolov3'... remote: Enumerating objects: 61, done. remote: Counting objects: 100% (61/61), done. remote: Compressing objects: 100% (44/44), done. remote: Total 4781 (delta 35), reused 37 (delta 17), pack-reused 4720 Receiving objects: 100% (4781/4781), 4.74 MiB | 6.95 MiB/s, done. Resolving deltas: 100% (3254/3254), done. % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 388 0 388 0 0 2455 0 --:--:-- --:--:-- --:--:-- 2440 100 18.8G 0 18.8G 0 0 189M 0 --:--:-- 0:01:42 --:--:-- 174M /content/yolov3 ###Markdown Run `detect.py` to perform inference on images in `data/samples` folder: ###Code !python3 detect.py # Image(filename='output/zidane.jpg', width=600) ###Output Namespace(cfg='cfg/yolov3-spp.cfg', conf_thres=0.3, data='data/coco.data', device='', fourcc='mp4v', half=False, img_size=416, nms_thres=0.5, output='output', source='data/samples', view_img=False, weights='weights/yolov3-spp.weights') Using CPU p*** cfg/yolov3-spp.cfg image 1/11 data/samples/bird1.jpeg: 256x416 1 birds, Done. (0.503s) image 2/11 data/samples/bird2.jpeg: 288x416 8 birds, Done. (0.510s) image 3/11 data/samples/bird3.jpeg: 288x416 3 birds, Done. (0.694s) image 4/11 data/samples/bird4.jpeg: 320x416 1 birds, Done. (0.673s) image 5/11 data/samples/bird5.jpeg: 288x416 10 birds, Done. (0.608s) image 6/11 data/samples/bird6.jpeg: 416x416 Done. (0.717s) image 7/11 data/samples/bus.jpg: 416x320 4 persons, 1 buss, 1 handbags, Done. (0.592s) image 8/11 data/samples/mybird1.jpg: 416x320 1 birds, Done. (0.601s) image 9/11 data/samples/mybird2.jpg: 416x320 Done. (0.684s) image 10/11 data/samples/myvird3.jpg: 288x416 Done. (0.502s) image 11/11 data/samples/zidane.jpg: 256x416 2 persons, 1 ties, Done. (0.467s) Results saved to /home/labmin/Desktop/YOLOv3-complete-pruning-master/output Done. (6.897s) ###Markdown Run `train.py` to train YOLOv3-SPP starting from a darknet53 backbone: ###Code !python3 train.py --data data/coco_64img.data --img-size 320 --epochs 3 --nosave ###Output _____no_output_____ ###Markdown Run `test.py` to evaluate the performance of a trained darknet or PyTorch model: ###Code !python3 test.py --data data/coco.data --save-json --img-size 416 # 0.565 mAP ###Output Namespace(batch_size=16, cfg='cfg/yolov3-spp.cfg', conf_thres=0.001, data='data/coco.data', img_size=416, iou_thres=0.5, nms_thres=0.5, save_json=True, weights='weights/yolov3-spp.weights') Using CUDA device0 _CudaDeviceProperties(name='Tesla K80', total_memory=11441MB) Downloading https://pjreddie.com/media/files/yolov3-spp.weights % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 240M 100 240M 0 0 17.9M 0 0:00:13 0:00:13 --:--:-- 20.3M Class Images Targets P R mAP F1: 100% 313/313 [11:14<00:00, 3.02s/it] all 5e+03 3.58e+04 0.107 0.749 0.557 0.182 person 5e+03 1.09e+04 0.138 0.846 0.723 0.238 bicycle 5e+03 316 0.0663 0.696 0.474 0.121 car 5e+03 1.67e+03 0.0682 0.781 0.586 0.125 motorcycle 5e+03 391 0.149 0.785 0.657 0.25 airplane 5e+03 131 0.17 0.931 0.853 0.287 bus 5e+03 261 0.177 0.824 0.778 0.291 train 5e+03 212 0.18 0.892 0.832 0.3 truck 5e+03 352 0.106 0.656 0.497 0.183 boat 5e+03 475 0.0851 0.724 0.483 0.152 traffic light 5e+03 516 0.0448 0.723 0.485 0.0844 fire hydrant 5e+03 83 0.183 0.904 0.861 0.304 stop sign 5e+03 84 0.0838 0.881 0.791 0.153 parking meter 5e+03 59 0.066 0.627 0.508 0.119 bench 5e+03 473 0.0329 0.609 0.338 0.0625 bird 5e+03 469 0.0836 0.623 0.47 0.147 cat 5e+03 195 0.275 0.821 0.735 0.412 dog 5e+03 223 0.219 0.834 0.771 0.347 horse 5e+03 305 0.149 0.872 0.806 0.254 sheep 5e+03 321 0.199 0.822 0.693 0.321 cow 5e+03 384 0.155 0.753 0.65 0.258 elephant 5e+03 284 0.219 0.933 0.897 0.354 bear 5e+03 53 0.414 0.868 0.837 0.561 zebra 5e+03 277 0.205 0.884 0.831 0.333 giraffe 5e+03 170 0.202 0.929 0.882 0.331 backpack 5e+03 384 0.0457 0.63 0.333 0.0853 umbrella 5e+03 392 0.0874 0.819 0.596 0.158 handbag 5e+03 483 0.0244 0.592 0.214 0.0468 tie 5e+03 297 0.0611 0.727 0.492 0.113 suitcase 5e+03 310 0.13 0.803 0.56 0.223 frisbee 5e+03 109 0.134 0.862 0.778 0.232 skis 5e+03 282 0.0624 0.695 0.406 0.114 snowboard 5e+03 92 0.0958 0.717 0.504 0.169 sports ball 5e+03 236 0.0715 0.716 0.622 0.13 kite 5e+03 399 0.142 0.744 0.533 0.238 baseball bat 5e+03 125 0.0807 0.712 0.576 0.145 baseball glove 5e+03 139 0.0606 0.655 0.482 0.111 skateboard 5e+03 218 0.0926 0.794 0.684 0.166 surfboard 5e+03 266 0.0806 0.789 0.606 0.146 tennis racket 5e+03 183 0.106 0.836 0.734 0.188 bottle 5e+03 966 0.0653 0.712 0.441 0.12 wine glass 5e+03 366 0.0912 0.667 0.49 0.161 cup 5e+03 897 0.0707 0.708 0.486 0.128 fork 5e+03 234 0.0521 0.594 0.404 0.0958 knife 5e+03 291 0.0375 0.526 0.266 0.0701 spoon 5e+03 253 0.0309 0.553 0.22 0.0585 bowl 5e+03 620 0.0754 0.763 0.492 0.137 banana 5e+03 371 0.0922 0.69 0.368 0.163 apple 5e+03 158 0.0492 0.639 0.227 0.0914 sandwich 5e+03 160 0.104 0.662 0.454 0.179 orange 5e+03 189 0.052 0.598 0.265 0.0958 broccoli 5e+03 332 0.0898 0.774 0.373 0.161 carrot 5e+03 346 0.0534 0.659 0.272 0.0989 hot dog 5e+03 164 0.121 0.604 0.484 0.201 pizza 5e+03 224 0.109 0.804 0.637 0.192 donut 5e+03 237 0.149 0.755 0.594 0.249 cake 5e+03 241 0.0964 0.643 0.495 0.168 chair 5e+03 1.62e+03 0.0597 0.712 0.424 0.11 couch 5e+03 236 0.125 0.767 0.567 0.214 potted plant 5e+03 431 0.0531 0.791 0.473 0.0996 bed 5e+03 195 0.185 0.826 0.725 0.302 dining table 5e+03 634 0.062 0.801 0.502 0.115 toilet 5e+03 179 0.209 0.95 0.835 0.342 tv 5e+03 257 0.115 0.922 0.773 0.204 laptop 5e+03 237 0.172 0.814 0.714 0.284 mouse 5e+03 95 0.0716 0.853 0.696 0.132 remote 5e+03 241 0.058 0.772 0.506 0.108 keyboard 5e+03 117 0.0813 0.897 0.7 0.149 cell phone 5e+03 291 0.0381 0.646 0.396 0.072 microwave 5e+03 88 0.155 0.841 0.727 0.262 oven 5e+03 142 0.073 0.824 0.556 0.134 toaster 5e+03 11 0.121 0.636 0.212 0.203 sink 5e+03 211 0.0581 0.848 0.579 0.109 refrigerator 5e+03 107 0.0827 0.897 0.755 0.151 book 5e+03 1.08e+03 0.0519 0.564 0.166 0.0951 clock 5e+03 292 0.083 0.818 0.731 0.151 vase 5e+03 353 0.0817 0.745 0.522 0.147 scissors 5e+03 56 0.0494 0.625 0.427 0.0915 teddy bear 5e+03 245 0.14 0.816 0.635 0.24 hair drier 5e+03 11 0.0714 0.273 0.106 0.113 toothbrush 5e+03 77 0.043 0.61 0.305 0.0803 loading annotations into memory... Done (t=5.40s) creating index... index created! Loading and preparing results... DONE (t=2.65s) creating index... index created! Running per image evaluation... Evaluate annotation type *bbox* DONE (t=58.87s). Accumulating evaluation results... DONE (t=7.76s). Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.337 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.568 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.350 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.152 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.359 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.496 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.279 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.432 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.460 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.257 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.494 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.623 ###Markdown Reproduce tutorial training runs and plot training results: ###Code !python3 train.py --data data/coco_16img.data --batch-size 16 --accumulate 1 --nosave && mv results.txt results_coco_16img.txt # CUSTOM TRAINING EXAMPLE !python3 train.py --data data/coco_64img.data --batch-size 16 --accumulate 1 --nosave && mv results.txt results_coco_64img.txt !python3 -c "from utils import utils; utils.plot_results()" # plot training results Image(filename='results.png', width=800) ###Output _____no_output_____ ###Markdown Extras below--- ###Code !git pull %cd yolov3 %ls # Unit Tests !python3 detect.py # detect 2 persons, 1 tie !python3 test.py --data data/coco_32img.data # test mAP = 0.8 !python3 train.py --data data/coco_32img.data --epochs 3 --nosave # train 3 epochs # Evolve Hyperparameters !python3 train.py --data data/coco.data --img-size 320 --epochs 1 --evolve ###Output _____no_output_____
python/module-2-sample-deviation.ipynb
###Markdown Module 2 Sample Mean and Sample Standard Deviation ###Code import numpy as np from math import sqrt ###Output _____no_output_____ ###Markdown Pb 6.1 Setup ###Code X = [500, 2000, 2500, 5000, 10000] n = [96, 107, 130, 89, 78] n_groups = len(n) n_people = sum(n) print("n_groups=", n_groups, " n_people=", n_people) ###Output n_groups= 5 n_people= 500 ###Markdown Sample Mean ###Code x_bar = sum([Xi*ni for Xi, ni in zip(X, n)])/n_people print("x_bar=", x_bar) ###Output x_bar= 3624.0 ###Markdown Equation 6.3 from Montgomery probably looks tempting, right? It turns out you can't use this directly on the table of values given. Why? Because group data is given, whereas Equation 6.3 sums over individuals. Sample Standard Deviation (Long Way) A simple approach is to convert the group data to data of individuals. This is not the best approach, but it illustrates use of Eq 6.3. ###Code X[0] [X[0]]*5 print([X[0]]*n[0]) print([[xsub]*nsub for xsub, nsub in zip(X, n)]) ###Output [[500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500], [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000], [2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500], [5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000], [10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000]] ###Markdown This gives us a (ragged) list of lists; however, a flattened version will be easier to deal with. From a quick Google search "flatten ragged list of lists", we find an example of how to do this and some sources:https://stackabuse.com/python-how-to-flatten-list-of-lists/https://stackoverflow.com/questions/9057379/correct-and-efficient-way-to-flatten-array-in-numpy-in-pythonhttps://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists ###Code def flatten(regular_list): return [item for sublist in regular_list for item in sublist] print(flatten(individuals)) ###Output [500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 2500, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000] ###Markdown All together, this would look like: ###Code def expand_groups(X, n): persons = flatten([[xsub]*nsub for xsub, nsub in zip(X, n)]) return persons x = expand_groups(X, n) print("length=",len(x)) print("mean=",np.mean(x)) ###Output length= 500 mean= 3624.0 ###Markdown Now that we have a flat list of all 500 people and their corresponding deposits, we can plug this directly into Eq 6.3 from the book: ![image.png](attachment:image.png) ###Code std = sqrt(sum([(xi-x_bar)**2 for xi in x])/(n_people-1)) print("sample standard deviation=", std) ###Output sample standard deviation= 3076.451382065451 ###Markdown Sample Standard Deviation (Better Way) Analytical Formula If you keep reading in Section 6.1 of the textbook, however, it turns there is an easier way to calculate this (Eq 6.4). ![image.png](attachment:image.png) Now that the $x_i$ no longer appear directly next to $\bar{x}$, and because for a single group with m people, we can write: $\sum_{i=1}^{m} x^{2}=m x^{2}$ we can rewrite the following in terms of "groups": $\sum_{i=1}^{n_{people}} x_{i}^{2} = \sum_{i=1}^{n_{groups}} n_i X_{i}^{2}$ and $\sum_{i=1}^{n_{people}} x_{i} = \sum_{i=1}^{n_{groups}} n_i X_{i}$ Substituting this back in, we get: $\frac{\sum_{i=1}^{n_{people}} x_{i}^{2}-\frac{\left(\sum_{i=1}^{n_{people}} x_{i}\right)^{2}}{n_{people}}}{n_{people}-1} = \frac{\sum_{i=1}^{n_{groups}} n_i X_{i}^{2}-\frac{\left(\sum_{i=1}^{n_{groups}} (X_{i}n_i)\right)^{2}}{n_{people}}}{n_{people}-1}$ Coded Version And finally, to convert the RHS of this equation to code: ###Code def frequency_sample_std_dev(X, n): """Sample standard deviation for X and n, where X[i] is the quantity each person in group i has, and n[i] is the number of people in group i.""" n_groups = len(n) n_people = sum(n) lhs_numerator = sum([ni*Xi**2 for Xi, ni in zip(X, n)]) rhs_numerator = sum([Xi*ni for Xi, ni in zip(X,n)])**2/n_people denominator = n_people-1 var = (lhs_numerator - rhs_numerator) / denominator std = sqrt(var) return std std = frequency_sample_std_dev(X, n) print(std) ###Output 3076.451382065451 ###Markdown Example with More Data If we had an entirely different set of values or even a different problem with the same format, we can compute this simply as: ###Code X2 = [500, 2000, 2500, 5000, 10000, 12000, 16000, 20000, 22500] n2 = [96, 107, 130, 89, 78, 48, 158, 62, 95] std2 = frequency_sample_std_dev(X2, n2) print("sample standard deviation=",std2) ###Output sample standard deviation= 7755.100282255846 ###Markdown Other Code Sources This technique is simple enough that it's likely someone has already written some code and made it publicly available. Using a few different search queries: "sample standard deviation python", "sample standard deviation with repeats python", and finally settling on the following webpage: [Weighted standard deviation in NumPy](https://stackoverflow.com/questions/2413522/weighted-standard-deviation-in-numpy). Having the right keywords is very important (a search for weighted standard deviation would quickly pull up what we're looking for), and we also need to make sure that the code we're using is appropriate for the use-case. Here are a few examples from the linked StackOverflow post: Population Average and Standard Deviation ###Code def weighted_avg_and_std(values, weights): """ Return the weighted average and standard deviation. values, weights -- Numpy ndarrays with the same shape. """ average = np.average(values, weights=weights) # Fast and numerically precise: variance = np.average((values-average)**2, weights=weights) return (average, sqrt(variance)) print(weighted_avg_and_std(X, n)) ###Output (3624.0, 3073.3733909175435) ###Markdown Sample Average and Standard Deviation Note that this gave a "population" standard deviation rather than a "sample" standard deviation. This can be corrected as follows: ###Code def weighted_sample_avg_std(values, weights): """ Return the weighted average and weighted sample standard deviation. values, weights -- Numpy ndarrays with the same shape. Assumes that weights contains only integers (e.g. how many samples in each group). See also https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Frequency_weights """ average = np.average(values, weights=weights) variance = np.average((values-average)**2, weights=weights) variance = variance*sum(weights)/(sum(weights)-1) return (average, sqrt(variance)) print(weighted_sample_avg_std(X, n)) ###Output (3624.0, 3076.451382065451)
B_Submissions_Kopuru_competition/2021-05-19_submit/Batch_RandomForest/workerbee05_HEX_years.ipynb
###Markdown Random Forest Model (months) with previous clustering In preparation for the May 19th submission to the Kopuru challenge ###Code # Base packages ----------------------------------- import pandas as pd import numpy as np from matplotlib import pyplot as plt # Linear Regression ------------------------------- from statsmodels.formula.api import ols # SKLearn ----------------------------------------- from sklearn.model_selection import train_test_split # Random Forest Regressort------------------------ from sklearn.ensemble import RandomForestRegressor ###Output _____no_output_____ ###Markdown Loading and massaging data ###Code # Importing datasets from GitHub as Pandas Dataframes queen_train = pd.read_csv("../Feeder_years/WBds03_QUEENtrain_years.csv") #2018+2019 test df queen_predict = pd.read_csv("../Feeder_years/WBds03_QUEENpredict_years.csv") #2018+2019 test df queen_cluster = pd.read_csv("../../../Other_open_data/cluster.csv") queen_train = queen_train.loc[queen_train.year_x == 2019,:].copy(True) # Adding cluster labels queen_train = pd.merge(queen_train, queen_cluster, how = 'left', left_on = 'municip_code', right_on = 'CODIGO MUNICIPIO') queen_predict = pd.merge(queen_predict, queen_cluster, how = 'left', left_on = 'municip_code', right_on = 'CODIGO MUNICIPIO') queen_train.columns queen_train.drop(columns=['year_x','year_offset','species','municip_name','municip_code','station_code','CODIGO MUNICIPIO'], inplace=True) ###Output _____no_output_____ ###Markdown Model ###Code # Instantiate the models with 100 decision trees rf_0 = RandomForestRegressor(n_estimators = 1000, random_state = 42) rf_1 = RandomForestRegressor(n_estimators = 1000, random_state = 42) rf_2 = RandomForestRegressor(n_estimators = 1000, random_state = 42) rf_3 = RandomForestRegressor(n_estimators = 1000, random_state = 42) # Train the model by cluster train_x_0 = queen_train.loc[queen_train.Cluster == 0 ,:].drop('waspbust_id', axis = 1) train_y_0 = queen_train.loc[queen_train.Cluster == 0 ,:].loc[:, 'waspbust_id'] train_x_1 = queen_train.loc[queen_train.Cluster == 1 ,:].drop('waspbust_id', axis = 1) train_y_1 = queen_train.loc[queen_train.Cluster == 1 ,:].loc[:, 'waspbust_id'] train_x_2 = queen_train.loc[queen_train.Cluster == 2 ,:].drop('waspbust_id', axis = 1) train_y_2 = queen_train.loc[queen_train.Cluster == 2 ,:].loc[:, 'waspbust_id'] train_x_3 = queen_train.loc[queen_train.Cluster == 3 ,:].drop('waspbust_id', axis = 1) train_y_3 = queen_train.loc[queen_train.Cluster == 3 ,:].loc[:, 'waspbust_id'] rf_0.fit(train_x_0, train_y_0) rf_1.fit(train_x_1, train_y_1) rf_2.fit(train_x_2, train_y_2) rf_3.fit(train_x_3, train_y_3) ###Output _____no_output_____ ###Markdown Feature importance ###Code # Get numerical feature importances feature_list_0 = list(train_x_0.columns) # List of tuples with variable and importance importances_0 = list(rf_0.feature_importances_) feature_importances_0 = [(feature, round(importance, 4)) for feature, importance in zip(feature_list_0, importances_0)] # Sort the feature importances by most important first feature_importances_0 = sorted(feature_importances_0, key = lambda x: x[1], reverse = True) # Print out the feature and importances [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances_0]; # Get numerical feature importances feature_list_1 = list(train_x_1.columns) # List of tuples with variable and importance importances_1 = list(rf_1.feature_importances_) feature_importances_1 = [(feature, round(importance, 4)) for feature, importance in zip(feature_list_1, importances_1)] # Sort the feature importances by most important first feature_importances_1 = sorted(feature_importances_1, key = lambda x: x[1], reverse = True) # Print out the feature and importances [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances_1]; # Get numerical feature importances feature_list_2 = list(train_x_2.columns) # List of tuples with variable and importance importances_2 = list(rf_2.feature_importances_) feature_importances_2 = [(feature, round(importance, 4)) for feature, importance in zip(feature_list_2, importances_2)] # Sort the feature importances by most important first feature_importances_2 = sorted(feature_importances_2, key = lambda x: x[1], reverse = True) # Print out the feature and importances [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances_2]; # Get numerical feature importances feature_list_3 = list(train_x_3.columns) # List of tuples with variable and importance importances_3 = list(rf_3.feature_importances_) feature_importances_3 = [(feature, round(importance, 4)) for feature, importance in zip(feature_list_3, importances_3)] # Sort the feature importances by most important first feature_importances_3 = sorted(feature_importances_3, key = lambda x: x[1], reverse = True) # Print out the feature and importances [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances_3]; ###Output Variable: food_apple Importance: 0.1493 Variable: lev_min Importance: 0.1408 Variable: colonies_amount Importance: 0.1265 Variable: wind_max_avg Importance: 0.077 Variable: lev_max Importance: 0.0588 Variable: wind_max Importance: 0.0587 Variable: temp_max_avg Importance: 0.0525 Variable: hum Importance: 0.0375 Variable: rain_1mm Importance: 0.0317 Variable: rain_cum Importance: 0.0316 Variable: rain_max_day Importance: 0.0273 Variable: food_kiwi Importance: 0.0247 Variable: rain Importance: 0.0241 Variable: lev_mid Importance: 0.0228 Variable: population Importance: 0.021 Variable: sun Importance: 0.0201 Variable: temp_max_abs Importance: 0.018 Variable: food_fruit Importance: 0.0171 Variable: rain_max_10 Importance: 0.0146 Variable: wind_avg Importance: 0.0112 Variable: temp_min_abs Importance: 0.011 Variable: temp_avg Importance: 0.009 Variable: freez Importance: 0.0079 Variable: food_txakoli Importance: 0.0045 Variable: food_blueberry Importance: 0.0024 Variable: food_pear Importance: 0.0 Variable: food_raspberry Importance: 0.0 Variable: Cluster Importance: 0.0 ###Markdown New model with relevant variables ###Code # Train new models by cluster train2_x_0 = queen_train.loc[queen_train.Cluster == 0 ,['food_fruit', 'food_txakoli', 'population', 'lev_max']] train2_x_1 = queen_train.loc[queen_train.Cluster == 1 ,['colonies_amount','rain_cum','food_fruit']] train2_x_2 = queen_train.loc[queen_train.Cluster == 2 ,['population', 'rain_max_day', 'lev_min', 'hum']] train2_x_3 = queen_train.loc[queen_train.Cluster == 3 ,['food_apple', 'lev_min', 'colonies_amount']] rf_0.fit(train2_x_0, train_y_0) rf_1.fit(train2_x_1, train_y_1) rf_2.fit(train2_x_2, train_y_2) rf_3.fit(train2_x_3, train_y_3) ###Output _____no_output_____ ###Markdown Predictions ###Code queen_predict_0 = queen_predict.loc[queen_predict.Cluster == 0, :].copy(True) queen_predict_1 = queen_predict.loc[queen_predict.Cluster == 1 , :].copy(True) queen_predict_2 = queen_predict.loc[queen_predict.Cluster == 2 ,:].copy(True) queen_predict_3 = queen_predict.loc[queen_predict.Cluster == 3 ,:].copy(True) predictions_0 = rf_0.predict(queen_predict_0.loc[:,['food_fruit', 'food_txakoli', 'population', 'lev_max']]) predictions_1 = rf_1.predict(queen_predict_1.loc[:,['colonies_amount','rain_cum','food_fruit']]) predictions_2 = rf_2.predict(queen_predict_2.loc[:,['population', 'rain_max_day', 'lev_min', 'hum']]) predictions_3 = rf_3.predict(queen_predict_3.loc[:,['food_apple', 'lev_min', 'colonies_amount']]) queen_predict_0['nests_2020'] = predictions_0 queen_predict_1['nests_2020'] = predictions_1 queen_predict_2['nests_2020'] = predictions_2 queen_predict_3['nests_2020'] = predictions_3 HEX_0 = queen_predict_0.loc[:,['municip_code','municip_name','nests_2020']].groupby(by=['municip_code','municip_name'], as_index=False).sum().round().dropna() HEX_1 = queen_predict_1.loc[:,['municip_code','municip_name','nests_2020']].groupby(by=['municip_code','municip_name'], as_index=False).sum().round().dropna() HEX_2 = queen_predict_2.loc[:,['municip_code','municip_name','nests_2020']].groupby(by=['municip_code','municip_name'], as_index=False).sum().round().dropna() HEX_3 = queen_predict_3.loc[:,['municip_code','municip_name','nests_2020']].groupby(by=['municip_code','municip_name'], as_index=False).sum().round().dropna() aux = pd.DataFrame({"municip_code":[48020, 48022, 48071, 48088,48074,48051], "municip_name":['Bilbao','Karrantza Harana/Valle de Carranza','Muskiz', 'Ubide','Urduña/Orduña','Lanestosa'], "nests_2020":[0, 1, 0, 0, 1, 1]}) HEX = HEX_0.append(HEX_1, ignore_index = True).append(HEX_2, ignore_index = True).append(HEX_3, ignore_index = True).append(aux, ignore_index = True) HEX.columns = ['CODIGO MUNICIPIO','NOMBRE MUNICIPIO','NIDOS 2020'] HEX.to_csv('WaspBusters_20210519_RandomForestyears_v2.csv', index=False) ###Output _____no_output_____
Skin Lesion Detection.ipynb
###Markdown Recognize Skin Lesions using Transfer Learning ###Code from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import shutil tf.__version__ ###Output _____no_output_____ ###Markdown Setup Input Pipeline Set up and prepare data for training. The data has to be in a folder containing a folder for each class with its respective name.We use `ImageDataGenerator` to rescale the images, as the model needs a downscaled version of the pictures.Create the train generator and specify where the train dataset directory, image size, batch size.Create the validation generator with similar approach as the train generator with the flow_from_directory() method. ###Code from PIL import Image, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True IMAGE_SIZE = 224 # 224 for MobileNetV2 BATCH_SIZE = 64 base_dir = "C:/Users/po/Projekte/Medical App/Data/KAGGLE/train" datagen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1./255, rotation_range=90, horizontal_flip=True, vertical_flip=True, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.05, validation_split=0.2) train_generator = datagen.flow_from_directory( base_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, subset='training') val_generator = datagen.flow_from_directory( base_dir, target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, subset='validation') # Print data shapes for image_batch, label_batch in train_generator: break image_batch.shape, label_batch.shape # Save labels in text file print(train_generator.class_indices) labels = '\n'.join(sorted(train_generator.class_indices.keys())) with open('C:/Users/po/Projekte/Medical App/labels.txt', 'w') as f: f.write(labels) ###Output _____no_output_____ ###Markdown Create the base model from the pre-trained convnetsCreate the base model from the **MobileNet V2** model developed at Google, and pre-trained on the ImageNet dataset, a large dataset of 1.4M images and 1000 classes of web images.First, pick which intermediate layer of MobileNet V2 will be used for feature extraction. A common practice is to use the output of the very last layer before the flatten operation, the so-called "bottleneck layer". The reasoning here is that the following fully-connected layers will be too specialized to the task the network was trained on, and thus the features learned by these layers won't be very useful for a new task. The bottleneck features, however, retain much generality.Let's instantiate an MobileNet V2 model pre-loaded with weights trained on ImageNet. By specifying the `include_top=False` argument, we load a network that doesn't include the classification layers at the top, which is ideal for feature extraction. ###Code IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3) # Create the base model from the pre-trained model Inception V3 base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet') ###Output _____no_output_____ ###Markdown Feature extractionYou will freeze the convolutional base created from the previous step and use that as a feature extractor, add a classifier on top of it and train the top-level classifier. ###Code base_model.trainable = False ###Output _____no_output_____ ###Markdown Add a classification head ###Code model = tf.keras.Sequential([ base_model, tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(2, activation = 'softmax') ]) for layer in base_model.layers: layer.trainable = True ###Output _____no_output_____ ###Markdown Compile the modelYou must compile the model before training it. Since there are two classes, use a binary cross-entropy loss. ###Code model.compile(optimizer=tf.keras.optimizers.Adam(), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() print('Number of trainable variables = {}'.format(len(model.trainable_variables))) ###Output _____no_output_____ ###Markdown Train the model ###Code epochs = 5 for i in range(1): history = model.fit(train_generator, epochs=epochs, validation_data=val_generator) tf.keras.models.save_model(model, "models/kaggle_model_" + str(i) + ".h5") ###Output _____no_output_____ ###Markdown Learning curvesLet's take a look at the learning curves of the training and validation accuracy/loss when using the MobileNet V2 base model as a fixed feature extractor. ###Code acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] plt.figure(figsize=(8, 8)) plt.subplot(2, 1, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.ylabel('Accuracy') plt.ylim([min(plt.ylim()),1]) plt.title('Training and Validation Accuracy') plt.subplot(2, 1, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.ylabel('Cross Entropy') plt.ylim([0,1.0]) plt.title('Training and Validation Loss') plt.xlabel('epoch') plt.show() ###Output _____no_output_____ ###Markdown Fine tuningIn our feature extraction experiment, you were only training a few layers on top of an MobileNet V2 base model. The weights of the pre-trained network were **not** updated during training.One way to increase performance even further is to train (or "fine-tune") the weights of the top layers of the pre-trained model alongside the training of the classifier you added. The training process will force the weights to be tuned from generic features maps to features associated specifically to our dataset. Un-freeze the top layers of the model All you need to do is unfreeze the `base_model` and set the bottom layers be un-trainable. Then, recompile the model (necessary for these changes to take effect), and resume training. ###Code base_model.trainable = True # Let's take a look to see how many layers are in the base model print("Number of layers in the base model: ", len(base_model.layers)) # Fine tune from this layer onwards fine_tune_at = 100 # Freeze all the layers before the `fine_tune_at` layer for layer in base_model.layers[:fine_tune_at]: layer.trainable = False ###Output _____no_output_____ ###Markdown Compile the modelCompile the model using a much lower training rate. ###Code model.compile(loss='categorical_crossentropy', optimizer = tf.keras.optimizers.Adam(1e-5), metrics=['accuracy']) model.summary() print('Number of trainable variables = {}'.format(len(model.trainable_variables))) ###Output _____no_output_____ ###Markdown Continue Train the model ###Code epochs = 5 for i in range(10): history = model.fit(train_generator, epochs=epochs, validation_data=val_generator) tf.keras.models.save_model(model, "models/kaggle2_model_" + str(i) + ".h5") model.save("test1.h5") #model.save_model("test2.h5") tf.keras.models.save_model(model, "test3.h5") model.save_weights("test4.h5") ###Output _____no_output_____ ###Markdown Convert to TFLite Saved the model using `tf.saved_model.save` and then convert the saved model to a tf lite compatible format. ###Code # Save tf.keras model in HDF5 format. keras_file = "kaggle_model_4.h5" tf.keras.models.save_model(model, keras_file) # Convert to TensorFlow Lite model. converter = tf.lite.TFLiteConverter.from_keras_model_file(keras_file) tflite_model = converter.convert() open("converted_model.tflite", "wb").write(tflite_model) ###Output _____no_output_____ ###Markdown Let's take a look at the learning curves of the training and validation accuracy/loss, when fine tuning the last few layers of the MobileNet V2 base model and training the classifier on top of it. The validation loss is much higher than the training loss, so you may get some overfitting.You may also get some overfitting as the new training set is relatively small and similar to the original MobileNet V2 datasets. ###Code acc = history_fine.history['acc'] val_acc = history_fine.history['val_acc'] loss = history_fine.history['loss'] val_loss = history_fine.history['val_loss'] plt.figure(figsize=(8, 8)) plt.subplot(2, 1, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.ylabel('Accuracy') plt.ylim([min(plt.ylim()),1]) plt.title('Training and Validation Accuracy') plt.subplot(2, 1, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.ylabel('Cross Entropy') plt.ylim([0,1.0]) plt.title('Training and Validation Loss') plt.xlabel('epoch') plt.show() ###Output _____no_output_____ ###Markdown Summary:* **Using a pre-trained model for feature extraction**: When working with a small dataset, it is common to take advantage of features learned by a model trained on a larger dataset in the same domain. This is done by instantiating the pre-trained model and adding a fully-connected classifier on top. The pre-trained model is "frozen" and only the weights of the classifier get updated during training.In this case, the convolutional base extracted all the features associated with each image and you just trained a classifier that determines the image class given that set of extracted features.* **Fine-tuning a pre-trained model**: To further improve performance, one might want to repurpose the top-level layers of the pre-trained models to the new dataset via fine-tuning.In this case, you tuned your weights such that your model learned high-level features specific to the dataset. This technique is usually recommended when the training dataset is large and very similar to the orginial dataset that the pre-trained model was trained on. ###Code from keras.preprocessing import image from keras.applications import imagenet_utils, mobilenet directory = "C:/Users/po/Projekte/Medical App/Data/Kaggle/train/malignant/" max_ = 0 fn = None for filename in os.listdir(directory): img = image.load_img(os.path.join(directory, filename), target_size=(224, 224)) img_array = image.img_to_array(img) img_array = np.expand_dims(img_array, axis=0) pImg = mobilenet.preprocess_input(img_array) prediction = model.predict(pImg) if prediction[0][1] > max_: max_ = prediction[0][1] fn = filename print(max_) print(fn) ###Output _____no_output_____
extensions.ipynb
###Markdown Extensions and compiled code High-performance python typically means using extension modules (read: other languages) Why extend? * Execution speed, obviously * Vectorizing the algorithm is difficult, time-consuming, and inefficient * Vectorization may be memory-limitiing (unless you use `memmap`, but that's another ball of wax) Why should I not extend? * It's not python, so you are typically dealing with the compile of code and linking to shared libraries. Yuck. Installing and redistributing the code just got hard. * It's not python. Python is sooo much easier. What choices to I have? * Direct calls to compiled libraries: `ctypes` * Just-in-time compilation: `numba`, `PyPy`, and the now defunct `psyco` * Building python bindings: hand-binding, `SWIG`, `f2py`, and `cython` Using just-in-time compilation: `numba` * `jit` ###Code %%file numba_mandel.py """ Compute and plot the Mandelbrot set using matplotlib. """ import numpy as np import matplotlib.pylab as mpl from numba import jit @jit def mandel(x, y, max_iters): """ Given the real and imaginary parts of a complex number, determine if it is a candidate for membership in the Mandelbrot set given a fixed number of iterations. """ c = complex(x,y) z = 0j for i in range(max_iters): z = z*z + c if z.real * z.real + z.imag * z.imag >= 4: return 255 * i // max_iters return 255 @jit(nopython=True) def create_fractal(min_x, max_x, min_y, max_y, image, iters): height = image.shape[0] width = image.shape[1] pixel_size_x = (max_x - min_x) / width pixel_size_y = (max_y - min_y) / height for x in range(width): real = min_x + x * pixel_size_x for y in range(height): imag = min_y + y * pixel_size_y color = mandel(real, imag, iters) image[y, x] = color return image image = np.zeros((700, 1400), dtype=np.uint8) import time start = time.time() create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20) print "took %s" % (time.time() - start) # mpl.imshow(image) # mpl.gray() # mpl.show() !python2.7 numba_mandel.py %%file bubblesort.py """ demonstrate compiled extensions using bubblesort """ import numpy as np from numba import jit def bubblesort(items): length = len(items) swapped = 1 for i in range(0, length): if swapped: swapped = 0 for ele in range(0, length-i-1): if items[ele] > items[ele + 1]: temp = items[ele + 1] items[ele + 1] = items[ele] items[ele] = temp swapped = 1 return items jitbubblesort = jit(bubblesort) import time for N in (100,100,1000,1000,10000): randoms = np.random.randint(0,1000,(N)).tolist() print "For N=%s" % N start = time.time() x = bubblesort(randoms) print "%s: python" % (time.time() - start) assert np.all(sorted(randoms) == x) start = time.time() x = jitbubblesort(randoms) print "%s: numba" % (time.time() - start) assert np.all(sorted(randoms) == x) print '' # EOF !python2.7 bubblesort.py ###Output For N=100 0.00119304656982: python 0.191457986832: numba For N=100 0.00129985809326: python 8.01086425781e-05: numba For N=1000 0.120245933533: python 0.000295877456665: numba For N=1000 0.120244026184: python 0.000308990478516: numba For N=10000 11.8938641548: python 0.00219893455505: numba ###Markdown * `jit` with type annotations See: http://numba.pydata.org/numba-doc/dev/user/examples.html * `autojit` ###Code import numpy as np from numba import autojit import time def fib(N): x = np.zeros(N, dtype=np.float64) for i in range(N): if i == 0: x[i] = 0 elif i == 1: x[i] = 1 else: x[i] = x[i - 1] + x[i - 2] return x jitfib = autojit(fib) for N in (100,1000,10000): print "For N=%s" % N start = time.time() x = fib(N) print "%s: python" % (time.time() - start) start = time.time() x = jitfib(N) print "%s: numba" % (time.time() - start) print '' ###Output For N=100 7.39097595215e-05: python 0.119683027267: numba For N=1000 0.000465154647827: python 1.4066696167e-05: numba For N=10000 0.00473999977112: python 4.91142272949e-05: numba ###Markdown Using python bindings: `cython` * Write python code, compile it as C -- from the commandline with `cython` ###Code %%file pi.pyx def pied( int num ) : return num * 3.14159265359 def vec_pied( int r ) : retList = [] cdef unsigned int i for i in range(r): retList.append( 3.14159 * i ) return retList %%file setup.py from distutils.core import setup from distutils.extension import Extension from Cython.Build import cythonize import numpy as np extensions=[ Extension("pi", ["pi.pyx"], include_dirs=[np.get_include()]), ] setup( ext_modules=cythonize(extensions) ) !python2.7 setup.py build_ext --inplace import pi print pi.pied(1) print pi.vec_pied(4) ###Output 3.14159265359 [0.0, 3.14159, 6.28318, 9.424769999999999] ###Markdown * Write python code, compile it as C -- interactively in `IPython` ###Code %load_ext Cython %%cython def cy_bubblesort(items): length = len(items) swapped = 1 for i in range(0, length): if swapped: swapped = 0 for ele in range(0, length-i-1): if items[ele] > items[ele + 1]: temp = items[ele + 1] items[ele + 1] = items[ele] items[ele] = temp swapped = 1 return items import time for N in (100,100,1000,1000,10000): randoms = np.random.randint(0,1000,(N)).tolist() print "For N=%s" % N start = time.time() x = cy_bubblesort(randoms) print "%s: Cython" % (time.time() - start) assert np.all(sorted(randoms) == x) print '' ###Output For N=100 0.000560998916626: Cython For N=100 0.000540971755981: Cython For N=1000 0.0564270019531: Cython For N=1000 0.0560281276703: Cython For N=10000 5.55099987984: Cython ###Markdown * Python code with type annotations ###Code def py_fib(n): a, b = 0, 1 for i in range(n): b, a = a + b, b return a %timeit py_fib(10000) %%cython def cy_fib(n): a, b = 0, 1 for i in range(n): b, a = a + b, b return a %timeit cy_fib(10000) %%file fib.pyx def cy_fib(int n): cdef int a = 0 cdef int b = 1 cdef int tmp for i in range(n): tmp = b b = a + b a = tmp return a %%file setup2.py from distutils.core import setup from Cython.Build import cythonize setup( name = "cython fib", ext_modules = cythonize('fib.pyx'), # accepts a glob pattern ) !python2.7 setup2.py build_ext --inplace import fib %timeit fib.cy_fib(10000) ###Output 100000 loops, best of 3: 5.78 µs per loop ###Markdown * Aside: line profiling with `cython -a` ###Code !cython-2.7 -a fib.pyx ###Output _____no_output_____ ###Markdown See: [result](fib.html) * Working with contiguous memory: `cython` with `numpy` ###Code %%cython import numpy as np from numpy cimport float_t def numcy_fib(int N): cdef int i cdef float_t[::1] x = np.zeros(N, dtype=float) for i in range(N): if i == 0: x[i] = 0 elif i == 1: x[i] = 1 else: x[i] = x[i - 1] + x[i - 2] return x %timeit numcy_fib(10000) ###Output The slowest run took 4.08 times longer than the fastest. This could mean that an intermediate result is being cached 10000 loops, best of 3: 31.4 µs per loop
Potential-Energy-For-Internal-Waves.ipynb
###Markdown Potential Energy Methodology A couple different ways of estimating potential energy seem to come up in the literature so this notebook is dedicated to testing and comparing a couple of the methods. The chosen method will be loaded into the final work. ###Code # Load Data and relevant modules - this is common to both wayss %matplotlib inline import numpy as np import scipy.signal as sig import matplotlib.pyplot as plt import data_load import gsw import oceans as oc import pandas as pd import internal_waves_calculations as iwc import warnings import cmocean # Probably Shouldn't do this but they annoy me warnings.simplefilter("ignore") pd.options.display.max_rows = 3000 pd.options.display.max_columns = 22 # load data and cut off bottom (its all nans) ladcp, ctd = data_load.load_data() strain = np.genfromtxt('strain.csv', delimiter=',') wl_max = 900 wl_min = 400 ctd_bin_size = 1024 ladcp_bin_size = 1024 nfft = 1024 U, V, p_ladcp = oc.loadLADCP(ladcp) S, T, p_ctd, lat, lon = oc.loadCTD(ctd) rho = gsw.density.rho(S, T, p_ctd) maxDepth = 4000 idx_ladcp = p_ladcp[:, -1] <= maxDepth idx_ctd = p_ctd[:, -1] <= maxDepth strain = strain[idx_ctd, :] S = S[idx_ctd,:] T = T[idx_ctd,:] rho = rho[idx_ctd,:] p_ctd = p_ctd[idx_ctd, :] U = U[idx_ladcp, :] V = V[idx_ladcp, :] p_ladcp = p_ladcp[idx_ladcp, :] # Bin CTD data ctd_bins = oc.binData(S, p_ctd[:, 0], ctd_bin_size) # Bin Ladcp Data ladcp_bins = oc.binData(U, p_ladcp[:, 0], ladcp_bin_size) # Depth and lat/long grids (For plots) depths = np.vstack([np.nanmean(p_ctd[binIn]) for binIn in ctd_bins]) dist = gsw.distance(lon, lat) dist = np.cumsum(dist)/1000 dist = np.append(0,dist) ###Output /Users/mdevana/anaconda3/lib/python3.6/site-packages/cmocean/tools.py:76: MatplotlibDeprecationWarning: The is_string_like function was deprecated in version 2.1. if not mpl.cbook.is_string_like(rgbin[0]): ###Markdown **WARNING : The adiabatic leveling is a super slow function so try to avoid running a bunch of times for no reason** ###Code # Calculate potential energy density spectrum using adiabatic leveling # --- This is the part that needs tweaking I think # Adiabatic leveling following Bray and Fofonoff 1981 - # the actual code is a python version of Alex Forryan's Matlab code # Order = order of polynomial fit to use order = 1 # Pressure window - See Bray and Fofonoff 1981 for details pressure_range = 400 # axis = the depth increases on axis = 0 # Use Adiabtic Leveling from Oceans Library N2_ref, N2, strain, p_mid, rho_bar = oc.adiabatic_level(S, T, p_ctd, lat, pressure_range=pressure_range, order=order, axis=axis, ) ###Output _____no_output_____ ###Markdown Isopyncal displacements from density surfaces**V1 - Waterman et al. 2013-using density surfaces**This uses the equation:$$ \eta = \frac{\rho - \rho_{ref}}{\frac{d\rho_{ref}}{dz}} $$where rho reference is neutral density. I am going to try it with neutral density and with the adiabatically leveled density surfaces and see how different it is and why. $\frac{d\rho}{dz}$ is computed by differencing the reference density surfaces over a 400 meter vertical window. According to *Waterman et al. 2013* this window should not make a big difference. for the reference density the mean of all the adiabatically leveled profiles is taken as the stratification varies signficantly across the profile. ###Code rho_ref = np.nanmean(rho_bar, axis=1) # Stick a nan to the top to make it the same size as the rho array rho_ref = np.hstack((0, rho_ref)) # set differnece window to 400 meters win = 400 # since all the data is on a normalized pressure grid use a single vertical vector to make it easier to handle z = -1*gsw.z_from_p(p_ctd[:,0], lat[:,0]) dz = np.nanmean(np.diff(z)) step = int(np.floor(.5*win/dz)) eta = np.full_like(rho, np.nan) for i in range(rho.shape[0]): # If in the TOP half of the profile the window needs to be adjusted if i - step < 0: lower = 0 upper = int(2*step) # If in the BOTTOM half of the profile the window needs to be adjusted elif i + step > (rho.shape[0] - 1): lower = int(rho.shape[0] - 2*step) upper = -1 else: upper = i + step lower = i - step drefdz = (rho_ref[upper] - rho_ref[lower])/win eta[i,:] = (rho[i,:] - rho_ref[i])/drefdz ###Output _____no_output_____ ###Markdown Get Spectrum to see how it workedThe kinetic energy calculations seem straight forward so they are just loaded in using the internal waves KE calculation function rather than show the whole code. Using these the wave components are calculated to see effects of different ways of potential energy calculations. **See main lee_wave.ipynb notebook for full details** ###Code # Calculate KE spectrums (m2/s2) z_ladcp = -1*gsw.z_from_p(p_ladcp, lat) KE, KE_grid, KE_psd, Uprime, Vprime, ke_peaks = iwc.KE_UV(U, V, z_ladcp, ladcp_bins, wl_min, wl_max, lc=wl_min-50, nfft=1024, detrend='constant') # Calculate PE spectrum using eta from above (m2/s2) z_ctd = -1*gsw.z_from_p(p_ctd, lat) PE, PE_grid, eta_psd, N2mean, pe_peaks = iwc.PE(N2, z, eta, wl_min, wl_max, ctd_bins, nfft=1024, detrend=False) # Plot spectra to see what happened m_plot = np.array([(1)/wl_max, (1)/wl_max, (1)/wl_min, (1)/wl_min]) plt.figure(figsize=[12,6]) plt.loglog(KE_grid, KE_psd.T, linewidth=.6, c='b', alpha=.05) plt.loglog(KE_grid, np.nanmean(KE_psd, axis=0).T, lw=2, c='b') ylims = plt.gca().get_ylim() ylim1 = np.array([ylims[0], ylims[1]]) plt.plot(m_plot[2:], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.plot(m_plot[:2], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.ylim(ylims) plt.ylabel('Kinetic Energy Density') plt.xlabel('Vertical Wavenumber') plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2) plt.loglog(PE_grid, .5*np.nanmean(N2)*eta_psd.T, lw=.6, c='r', alpha=.05) plt.loglog(PE_grid, .5*np.nanmean(N2)*np.nanmean(eta_psd, axis=0).T, lw=2, c='r') plt.plot(m_plot[2:], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.plot(m_plot[:2], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.ylim(ylims) # plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2) plt.ylabel('Potential Energy Density (m2/s2)') plt.xlabel('Vertical Wavenumber') # plt.xlim(.0005, .01) ###Output _____no_output_____ ###Markdown Get Wave Components (Frequency, horizontal wavenumber) ###Code Etotal = 1027*(KE + PE) # Multiply by density to get Joules # wave components f = np.nanmean(gsw.f(lat)) # version 2 omega calculation - where did this come from? omega = f*np.sqrt(((KE+PE)/(KE-PE))) # Waterman et al. 2012 (Ithink) m = (2*np.pi)/800 kh = m*np.sqrt(((f**2 - omega**2)/(omega**2 - N2mean))) # Waterman et al. 2012 kh2 = (m/np.sqrt(N2mean))*(np.sqrt(omega**2 - f**2)) # Where (meyer i think?) lambdaH = 1e-3*(2*np.pi)/kh lambdaH2 = 1e-3*(2*np.pi)/kh2 # version 2 omega calculation Rw = KE/PE # Unsure what to do with this just yet. table = oc.display(lambdaH, index=depths.flatten()) table.style.set_caption('Horizontal Wavelength V1') ###Output _____no_output_____ ###Markdown Attempt with neutral densities and polyfit ###Code # Neutral Densities rho_neutral = np.genfromtxt('neutral_densities.csv', delimiter=',') rho_n = rho_neutral[idx_ctd,:] # Poly fit to neutral density to get reference profiles ref = [] for cast in rho_n.T: fitrev = oc.vert_polyFit2(cast, p_ctd[:, 0], 100, deg=2) ref.append(fitrev) ref = np.vstack(ref).T eta = oc.isopycnal_displacements(rho_n, ref, p_ctd, lat) ref = np.nanmean(ref, axis=1) # recalculate spectrum z_ctd = -1*gsw.z_from_p(p_ctd, lat) PE, PE_grid, eta_psd, N2mean, pe_peaks = iwc.PE(N2, z, eta, wl_min, wl_max, ctd_bins, nfft=1024, detrend=False) # Plot spectra to see what happened m_plot = np.array([(1)/wl_max, (1)/wl_max, (1)/wl_min, (1)/wl_min]) plt.figure(figsize=[12,6]) plt.loglog(KE_grid, KE_psd.T, linewidth=.6, c='b', alpha=.05) plt.loglog(KE_grid, np.nanmean(KE_psd, axis=0).T, lw=2, c='b') ylims = plt.gca().get_ylim() ylim1 = np.array([ylims[0], ylims[1]]) plt.plot(m_plot[2:], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.plot(m_plot[:2], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.ylim(ylims) plt.ylabel('Kinetic Energy Density') plt.xlabel('Vertical Wavenumber') plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2) plt.loglog(PE_grid, .5*np.nanmean(N2)*eta_psd.T, lw=.6, c='r', alpha=.05) plt.loglog(PE_grid, .5*np.nanmean(N2)*np.nanmean(eta_psd, axis=0).T, lw=2, c='r') plt.plot(m_plot[2:], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.plot(m_plot[:2], ylim1, lw=1, c='k', alpha=.5, linestyle='dotted') plt.ylim(ylims) # plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2) plt.ylabel('Potential Energy Density (m2/s2)') plt.xlabel('Vertical Wavenumber') # plt.xlim(.0005, .01) Etotal = 1027*(KE + PE) # Multiply by density to get Joules # wave components f = np.nanmean(gsw.f(lat)) # version 2 omega calculation - where did this come from? omega = f*np.sqrt(((KE+PE)/(KE-PE))) # Waterman et al. 2012 (Ithink) m = (2*np.pi)/800 kh = m*np.sqrt(((f**2 - omega**2)/(omega**2 - N2mean))) # Waterman et al. 2012 kh2 = (m/np.sqrt(N2mean))*(np.sqrt(omega**2 - f**2)) # Where (meyer i think?) lambdaH = 1e-3*(2*np.pi)/kh lambdaH2 = 1e-3*(2*np.pi)/kh2 # version 2 omega calculation Rw = KE/PE # Unsure what to do with this just yet. table = oc.display(Etotal, index=depths.flatten()) table.style.set_caption('Horizontal Wavelength V1') table = oc.display(lambdaH, index=depths.flatten()) table.style.set_caption('Horizontal Wavelength V1') plt.rcParams.update({'font.size':12}) ref = np.nanmean(ref, axis=1) ref.shape ###Output _____no_output_____
notebooks/Dogs vs Cats using VGG16.ipynb
###Markdown Dogs vs Cats using VGG16___ **Author** : Aman Hussain **Email** : [email protected] **Description** : Classifying images of dogs and cats by finetuning the VGG16 model Import Libraries Scientific Computing Stack ###Code import numpy as np import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown Custom Packages ###Code import os, json from helper import utils from helper.utils import plots from helper import vgg16 from helper.vgg16 import Vgg16 ###Output Using cuDNN version 5103 on context None Mapped name None to device cuda0: Tesla K80 (0000:00:04.0) Using Theano backend. ###Markdown Declaring paths & global parameters The path to the dataset is defined here. It will point to the sample folder which contains lesser number of images for quick and iterative training on the local machine. For the final training, on the cloud we must change the path to the one commented out below. ###Code # path = '../data/dogscats/sample/' path = '../data/dogscats/' ###Output _____no_output_____ ###Markdown The default batchsize for training and validation purposes ###Code batchsize = 64 ###Output _____no_output_____ ###Markdown Data Exploration Instantiating the VGG16 class which implements the required utility methods ###Code vgg = Vgg16() ###Output _____no_output_____ ###Markdown Getting the training and validation batches ###Code batches = vgg.get_batches(path+'train', batch_size=batchsize) val_batches = vgg.get_batches(path+'valid', batch_size=batchsize) ###Output Found 20000 images belonging to 2 classes. Found 5000 images belonging to 2 classes. ###Markdown Visualizing the images, only if we are exploring the samples ###Code imgs, labels = next(batches) val_imgs, val_labels = next(val_batches) labels = ['dog' if i[0]==0 else 'cat' for i in labels] val_labels = ['dog' if i[0]==0 else 'cat' for i in val_labels] plots(val_imgs[:5], figsize=(20,10), titles=val_labels) ###Output _____no_output_____ ###Markdown Finetuning ###Code vgg.finetune(batches) %%time vgg.fit(batches, val_batches, nb_epoch=3) ###Output Epoch 1/3 20000/20000 [==============================] - 464s - loss: 0.1246 - acc: 0.9666 - val_loss: 0.0561 - val_acc: 0.9828 Epoch 2/3 20000/20000 [==============================] - 463s - loss: 0.0957 - acc: 0.9775 - val_loss: 0.0614 - val_acc: 0.9826 Epoch 3/3 20000/20000 [==============================] - 463s - loss: 0.0958 - acc: 0.9768 - val_loss: 0.0538 - val_acc: 0.9852 CPU times: user 31min 11s, sys: 5min 7s, total: 36min 19s Wall time: 23min 39s ###Markdown Model Testing Due to the quirkiness of the ImageDataGenerator.flow_from_directory() used by vgg.get_batches(), we have to make a sub directory under test directory by the name 'subdir_for_keras_ImageDataGenerator'. ###Code batch_size = len(os.listdir(path+'test'+'/subdir_for_keras_ImageDataGenerator')) ###Output _____no_output_____ ###Markdown Keras ImageDataGenerator does not return the filenames and loads them in the same order as os.listdir() returns. Here, we extract the filenames which will serve as the indexes. ###Code img_index = os.listdir(path+'test'+'/subdir_for_keras_ImageDataGenerator') img_index = [os.path.splitext(file)[0] for file in img_index] ###Output _____no_output_____ ###Markdown With the class_mode set to None, it will return only the batch of images without labels ###Code testbatch = vgg.get_batches(path+'test', shuffle=False, batch_size=batch_size, class_mode=None) test_imgs = next(testbatch) ###Output _____no_output_____ ###Markdown Manually verifying the predicitons ###Code plots(test_imgs[:5]) probab, prediction, prediction_labels = vgg.predict(test_imgs[:5], details = True) print(prediction_labels, probab, prediction) img_index[:5] ###Output _____no_output_____ ###Markdown We confirm the predictions manually. Here, we make the predictions using our trained model ###Code %%time probab, prediction, prediction_labels = vgg.predict(test_imgs, details = True) ###Output CPU times: user 3min 4s, sys: 49 s, total: 3min 53s Wall time: 3min 53s ###Markdown Results Preparing to save the predictions as submissions to the Kaggle competetion ###Code np.save(path+'submissions/index', img_index) np.save(path+'submissions/probab', probab) np.save(path+'submissions/prediction', prediction) np.save(path+'submissions/prediction_labels', prediction_labels) ###Output _____no_output_____ ###Markdown Save the trained model ###Code vgg.model.save("../models/vgg_dogsVScats.h5") for predicted, index in enumerate(prediction): # When a cat is predicted, get the complimentary value if predicted == 0: probab[index] = 1 - probab[index] img_index.insert(0, 'id') labels_pred = [str(label) for label in prediction] labels_pred.insert(0, 'label') labels_prob = [str(label) for label in probab] labels_prob.insert(0, 'label') submission_array_pred = np.vstack((img_index, labels_pred)).T.astype('str') submission_array_prob = np.vstack((img_index, labels_prob)).T.astype('str') ###Output _____no_output_____ ###Markdown Saving the array as a CSV ###Code np.savetxt(path+'submissions/submission_pred.csv', submission_array_pred, delimiter=",", fmt='%1s') np.savetxt(path+'submissions/submission_prob.csv', submission_array_prob, delimiter=",", fmt='%1s') ###Output _____no_output_____ ###Markdown ___ Improving the Score by using Probabilities Loading the values predicted by the model ###Code img_index = np.load(path+'submissions/index.npy') probab = np.load(path+'submissions/probab.npy') prediction = np.load(path+'submissions/prediction.npy') ###Output _____no_output_____ ###Markdown Visualising the distribution of probabilities ###Code plt.hist(probab) ###Output _____no_output_____ ###Markdown Since the kaggle competetion evaluates results based on log loss, it heavily penalises values which are 1 or 0. So, we manually modify the 1 and 0 to read 0.95 and 0.05. ###Code np.unique(prediction) prediction = prediction.astype(float) prediction[prediction == 1] = 0.95 prediction[prediction == 0] = 0.05 np.unique(prediction) ###Output _____no_output_____ ###Markdown Now, we will prepare the list for submission ###Code img_index = img_index.tolist() img_index.insert(0, 'id') labels_pred = [str(label) for label in prediction] labels_pred.insert(0, 'label') submission_array_pred = np.vstack((img_index, labels_pred)).T.astype('str') ###Output _____no_output_____ ###Markdown Saving the new submission file ###Code np.savetxt(path+'submissions/submission_pred_mod.csv', submission_array_pred, delimiter=",", fmt='%1s') ###Output _____no_output_____
Kaggle/KaggleExercise_Pipelines.ipynb
###Markdown **[Intermediate Machine Learning Micro-Course Home Page](https://www.kaggle.com/learn/intermediate-machine-learning)**--- In this exercise, you will use **pipelines** to improve the efficiency of your machine learning code. SetupThe questions below will give you feedback on your work. Run the following cell to set up the feedback system. ###Code # Set up code checking from learntools.core import binder binder.bind(globals()) from learntools.ml_intermediate.ex4 import * print("Setup Complete") ###Output Setup Complete ###Markdown You will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course). ![Ames Housing dataset image](https://i.imgur.com/lTJVG4e.png)Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`. ###Code import pandas as pd from sklearn.model_selection import train_test_split # Read the data X_full = pd.read_csv('../input/train.csv', index_col='Id') X_test_full = pd.read_csv('../input/test.csv', index_col='Id') # Remove rows with missing target, separate target from predictors X_full.dropna(axis=0, subset=['SalePrice'], inplace=True) y = X_full.SalePrice X_full.drop(['SalePrice'], axis=1, inplace=True) # Break off validation set from training data X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0) # "Cardinality" means the number of unique values in a column # Select categorical columns with relatively low cardinality (convenient but arbitrary) categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object"] # Select numerical columns numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] # Keep selected columns only my_cols = categorical_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_test_full[my_cols].copy() X_train.head() ###Output _____no_output_____ ###Markdown The next code cell uses code from the tutorial to preprocess the data and train a model. Run this code without changes. ###Code from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error # Preprocessing for numerical data numerical_transformer = SimpleImputer(strategy='constant') # Preprocessing for categorical data categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore')) ]) # Bundle preprocessing for numerical and categorical data preprocessor = ColumnTransformer( transformers=[ ('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols) ]) # Define model model = RandomForestRegressor(n_estimators=100, random_state=0) # Bundle preprocessing and modeling code in a pipeline clf = Pipeline(steps=[('preprocessor', preprocessor), ('model', model) ]) # Preprocessing of training data, fit model clf.fit(X_train, y_train) # Preprocessing of validation data, get predictions preds = clf.predict(X_valid) print('MAE:', mean_absolute_error(y_valid, preds)) ###Output MAE: 17861.780102739725 ###Markdown The code yields a value around 17862 for the mean absolute error (MAE). In the next step, you will amend the code to do better. Step 1: Improve the performance Part ANow, it's your turn! In the code cell below, define your own preprocessing steps and random forest model. Fill in values for the following variables:- `numerical_transformer`- `categorical_transformer`- `model`To pass this part of the exercise, you need only define valid preprocessing steps and a random forest model. ###Code # Preprocessing for numerical data numerical_transformer = SimpleImputer() # Your code here # Preprocessing for categorical data categorical_transformer = Pipeline( steps = [ ('imp', SimpleImputer(strategy = 'most_frequent')), ('ohe', OneHotEncoder(handle_unknown="ignore")) ]) # Bundle preprocessing for numerical and categorical data preprocessor = ColumnTransformer( transformers=[ ('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols) ]) # Define model model = RandomForestRegressor(n_estimators = 150, random_state = 163) # Your code here # Check your answer step_1.a.check() # #%%RM_IF(PROD)% # # Preprocessing for numerical data # numerical_transformer = SimpleImputer(strategy='constant') # # Preprocessing for categorical data # categorical_transformer = Pipeline(steps=[ # ('imputer', SimpleImputer(strategy='constant')), # ('onehot', OneHotEncoder(handle_unknown='ignore')) # ]) # # Bundle preprocessing for numerical and categorical data # preprocessor = ColumnTransformer( # transformers=[ # ('num', numerical_transformer, numerical_cols), # ('cat', categorical_transformer, categorical_cols) # ]) # # Define model # model = RandomForestRegressor(n_estimators=100, random_state=0) step_1.a.assert_check_passed() # Lines below will give you a hint or solution code #step_1.a.hint() #step_1.a.solution() ###Output _____no_output_____ ###Markdown Part BRun the code cell below without changes.To pass this step, you need to have defined a pipeline in **Part A** that achieves lower MAE than the code above. You're encouraged to take your time here and try out many different approaches, to see how low you can get the MAE! (_If your code does not pass, please amend the preprocessing steps and model in Part A._) ###Code # Bundle preprocessing and modeling code in a pipeline my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', model) ]) # Preprocessing of training data, fit model my_pipeline.fit(X_train, y_train) # Preprocessing of validation data, get predictions preds = my_pipeline.predict(X_valid) # Evaluate the model score = mean_absolute_error(y_valid, preds) print('MAE:', score) # Check your answer step_1.b.check() #%%RM_IF(PROD) step_1.b.assert_check_passed() # Line below will give you a hint #step_1.b.hint() ###Output _____no_output_____ ###Markdown Step 2: Generate test predictionsNow, you'll use your trained model to generate predictions with the test data. ###Code # Preprocessing of test data, fit model preds_test = my_pipeline.predict(X_test) # Your code here # Check your answer step_2.check() # #%%RM_IF(PROD)% # preds_test = my_pipeline.predict(X_test) step_2.assert_check_passed() # Lines below will give you a hint or solution code #step_2.hint() #step_2.solution() ###Output _____no_output_____ ###Markdown Run the next code cell without changes to save your results to a CSV file that can be submitted directly to the competition. ###Code # Save test predictions to file output = pd.DataFrame({'Id': X_test.index, 'SalePrice': preds_test}) output.to_csv('submission.csv', index=False) ###Output _____no_output_____
001-Jupyter/001-Tutorials/002-IPython-Cookbook/chapter03_notebook/05_custom_notebook.ipynb
###Markdown 3.5. Configuring the Jupyter Notebook ###Code %ls ~/.jupyter/jupyter_notebook_config.py %cat ~/.jupyter/jupyter_notebook_config.py ###Output _____no_output_____ ###Markdown ```c.ContentsManager.untitled_notebook = 'MyNotebook'``` ###Code %ls ~/.jupyter/nbconfig/ %cat ~/.jupyter/nbconfig/notebook.json %%javascript var cell = Jupyter.notebook.get_selected_cell(); var config = cell.config; var patch = { CodeCell:{ cm_config: {autoCloseBrackets: false} } } config.update(patch) %cat ~/.jupyter/nbconfig/notebook.json from notebook.services.config import ConfigManager c = ConfigManager() c.get('notebook').get('CodeCell') c.update('notebook', {"CodeCell": {"cm_config": {"autoCloseBrackets": True}}}) %cat ~/.jupyter/nbconfig/notebook.json ###Output _____no_output_____
Lection3.ipynb
###Markdown Функції ґенератори Оголошення ###Code def F(a, b, c): aa = 0 while True: if aa == c: return yield a a = b b = a + b aa += 1 ###Output _____no_output_____ ###Markdown Використання ###Code if __name__ == '__main__': fib = F(0, 1, 10) for i in range(12): fib_num = fib.__next__() print("{}: {}".format(i, fib_num)) fib = F(0, 1, 10) for i in fib: print(i) ###Output 0 1 2 4 8 16 32 64 128 256 ###Markdown Співпрограми (Coroutines) Оголошення ###Code import string from random import choice, randint def to_upper(): while True: st= (yield) print("ping: You said: {}".format(st)) print("pong: We said: {}".format(st.upper())) ###Output _____no_output_____ ###Markdown Використання ###Code if __name__ == '__main__': up = to_upper() up.__next__() for i in range(15): min_char = 8 max_char = 12 allchar = string.ascii_letters \ + string.punctuation \ + string.digits password = "".join( choice(allchar) for x in range(randint(min_char, max_char) ) ) up.send(password) ###Output ping: You said: DJNSVa9Uf pong: We said: DJNSVA9UF ping: You said: U*0N3-7jK:W pong: We said: U*0N3-7JK:W ping: You said: M$b]Jl5b%6C) pong: We said: M$B]JL5B%6C) ping: You said: +MFo>e0E<."9 pong: We said: +MFO>E0E<."9 ping: You said: K,e?I<S^;2!9 pong: We said: K,E?I<S^;2!9 ping: You said: !(;QW2SU\ pong: We said: !(;QW2SU\ ping: You said: ]HvM`&)+-` pong: We said: ]HVM`&)+-` ping: You said: M2f*KXQYo,=% pong: We said: M2F*KXQYO,=% ping: You said: &s;>E"GRB` pong: We said: &S;>E"GRB` ping: You said: ZabtTUA/`MjT pong: We said: ZABTTUA/`MJT ping: You said: Xw2Nns"hOM pong: We said: XW2NNS"HOM ping: You said: !TY"J<!,~5 pong: We said: !TY"J<!,~5 ping: You said: %zv87G3KcqEB pong: We said: %ZV87G3KCQEB ping: You said: Y>-#,58E] pong: We said: Y>-#,58E] ping: You said: y2P^>Ot&& pong: We said: Y2P^>OT&& ###Markdown Замикання (closures) Оголошення ###Code def countdown(num: int): def next(): nonlocal num r = num num -= 1 return r return next ###Output _____no_output_____ ###Markdown Використання ###Code if __name__ == '__main__': nadia = countdown(10) print(dir(nadia)) while True: v = nadia() if v < -150: break print(v) ###Output _____no_output_____
notebooks/pre_paper_draft/group_meeting_plots.ipynb
###Markdown Creation of the trajectory lists and putting them in dictionnariesFormat of the dictionnaries : trajectories[region][rev/non_rev/syn/non_syn] ###Code regions = ["env", "pol", "gag"] trajectories = {} for region in regions: # Create the dictionary with the different regions tmp_trajectories = create_all_patient_trajectories(region) tmp_trajectories = [traj for traj in tmp_trajectories if traj.t[-1] != 0] trajectories[region] = tmp_trajectories # Split into sub dictionnaries (rev, non_rev and all) rev = [traj for traj in trajectories[region] if traj.reversion == True] non_rev = [traj for traj in trajectories[region] if traj.reversion == False] syn = [traj for traj in trajectories[region] if traj.synonymous == True] non_syn = [traj for traj in trajectories[region] if traj.synonymous == False] trajectories[region] = {"rev": rev, "non_rev": non_rev, "syn": syn, "non_syn": non_syn, "all": trajectories[region]} ###Output _____no_output_____ ###Markdown Computation of the mean activity in time ###Code def get_mean_in_time(trajectories, nb_bins=15, freq_range=[0.4, 0.6]): """ Computes the mean frequency in time of a set of trajectories from the point they are seen in the freq_range window. Returns the middle of the time bins and the computed frequency mean. """ # Create bins and select trajectories going through the freq_range time_bins = np.linspace(-950, 2000, nb_bins) trajectories = [traj for traj in trajectories if np.sum(np.logical_and( traj.frequencies >= freq_range[0], traj.frequencies < freq_range[1]), dtype=bool)] # Offset trajectories to set t=0 at the point they are seen in the freq_range and adds all the frequencies / times # to arrays for later computation of mean t_traj = np.array([]) f_traj = np.array([]) for traj in trajectories: idx = np.where(np.logical_and(traj.frequencies >= freq_range[0], traj.frequencies < freq_range[1]))[0][0] traj.t = traj.t - traj.t[idx] t_traj = np.concatenate((t_traj, traj.t)) f_traj = np.concatenate((f_traj, traj.frequencies)) # Binning of all the data in the time bins filtered_fixed = [traj for traj in trajectories if traj.fixation == "fixed"] filtered_lost = [traj for traj in trajectories if traj.fixation == "lost"] freqs, fixed, lost = [], [], [] for ii in range(len(time_bins) - 1): freqs = freqs + [f_traj[np.logical_and(t_traj >= time_bins[ii], t_traj < time_bins[ii + 1])]] fixed = fixed + [len([traj for traj in filtered_fixed if traj.t[-1] < time_bins[ii]])] lost = lost + [len([traj for traj in filtered_lost if traj.t[-1] < time_bins[ii]])] # Computation of the mean in each bin, active trajectories contribute their current frequency, # fixed contribute 1 and lost contribute 0 mean = [] for ii in range(len(freqs)): mean = mean + [np.sum(freqs[ii]) + fixed[ii]] mean[-1] /= (len(freqs[ii]) + fixed[ii] + lost[ii]) nb_active = [len(freq) for freq in freqs] nb_dead = [fixed[ii] + lost[ii] for ii in range(len(fixed))] return 0.5 * (time_bins[1:] + time_bins[:-1]), mean, nb_active, nb_dead means = {} freq_ranges = [[0.2, 0.4], [0.4, 0.6], [0.6, 0.8]] times = [] for freq_range in freq_ranges: means[str(freq_range)] = {} for region in regions: means[str(freq_range)][region] = {} for key in trajectories[region].keys(): times, means[str(freq_range)][region][key], _, _ = get_mean_in_time(trajectories[region][key], freq_range=freq_range) ###Output _____no_output_____ ###Markdown Mean frequency in time plot ###Code import matplotlib.gridspec as gridspec colors = ["r", "b", "g"] fontsize=16 fontsize2 = 20 fig, axs = plt.subplots(ncols=3, nrows=2, figsize=(16,10), sharex=True, sharey=True) # fig.tight_layout() for idx_row, split_type in enumerate([["rev", "non_rev"], ["syn", "non_syn"]]): for idx_col, region in enumerate(regions): for idx_colors,freq_range in enumerate(freq_ranges): axs[idx_row, idx_col].plot(times, means[str(freq_range)][region][split_type[0]], f'{colors[idx_colors]}-') axs[idx_row, idx_col].plot(times, means[str(freq_range)][region][split_type[1]], f'{colors[idx_colors]}--') axs[idx_row, idx_col].grid() if idx_row == 1: axs[idx_row,idx_col].set_xlabel("Time [days]", fontsize=fontsize) if idx_col == 0: axs[idx_row,idx_col].set_ylabel("Frequency", fontsize=fontsize) axs[idx_row,idx_col].text(-0.3, 0.45, split_type[0], transform=axs[idx_row,idx_col].transAxes, fontsize=fontsize2) if idx_row == 0: axs[idx_row,idx_col].text(0.45, 1.1, region, transform=axs[idx_row,idx_col].transAxes, fontsize=fontsize2) fig.subplots_adjust(wspace=0.05, hspace=0.05) plt.savefig("Mean_frequency_in_time", format="png") ###Output _____no_output_____ ###Markdown Activity plots Creation of the trajectory lists and putting them in dictionnariesFormat of the dictionnaries : trajectories[region][rev/non_rev/syn/non_syn] ###Code regions = ["env", "pol", "gag"] trajectories = {} for region in regions: # Create the dictionary with the different regions tmp_trajectories = create_all_patient_trajectories(region) tmp_trajectories = [traj for traj in tmp_trajectories if traj.t[-1] != 0] trajectories[region] = tmp_trajectories # Split into sub dictionnaries (rev, non_rev and all) rev = [traj for traj in trajectories[region] if traj.reversion == True] non_rev = [traj for traj in trajectories[region] if traj.reversion == False] syn = [traj for traj in trajectories[region] if traj.synonymous == True] non_syn = [traj for traj in trajectories[region] if traj.synonymous == False] trajectories[region] = {"rev": rev, "non_rev": non_rev, "syn": syn, "non_syn": non_syn, "all": trajectories[region]} normalize = True activities = {} for region in regions: tmp_dict = {} for traj_type in ["rev", "non_rev", "syn", "non_syn"]: tmp_dict[traj_type] = get_average_activity(trajectories[region][traj_type], normalize) activities[region] = tmp_dict time_bins = activities["env"]["rev"]["time_bins"] ###Output _____no_output_____ ###Markdown Plot by regions ###Code # Red is fixed, blue is lost, green is active colors = ["r", "b", "g"] fontsize=16 fontsize2 = 20 fig, axs = plt.subplots(ncols=3, nrows=2, figsize=(16,10), sharex=True, sharey=True) for idx_row, split_type in enumerate([["rev", "non_rev"], ["syn", "non_syn"]]): for idx_col, region in enumerate(regions): for idx_colors, activity in enumerate(["fixed", "lost", "active"]): axs[idx_row, idx_col].plot(time_bins, activities[region][split_type[0]][activity], f'{colors[idx_colors]}-') axs[idx_row, idx_col].plot(time_bins, activities[region][split_type[1]][activity], f'{colors[idx_colors]}--') axs[idx_row, idx_col].grid() if idx_row == 1: axs[idx_row,idx_col].set_xlabel("Time [days]", fontsize=fontsize) if idx_col == 0: axs[idx_row,idx_col].set_ylabel("Frequency", fontsize=fontsize) axs[idx_row,idx_col].text(-0.3, 0.45, split_type[0], transform=axs[idx_row,idx_col].transAxes, fontsize=fontsize2) if idx_row == 0: axs[idx_row,idx_col].text(0.45, 1.1, region, transform=axs[idx_row,idx_col].transAxes, fontsize=fontsize2) fig.subplots_adjust(wspace=0.05, hspace=0.05) plt.savefig("Activity_regions", format="png") ###Output _____no_output_____ ###Markdown Plot by activity type ###Code # Red is env, blue is pol, green is gag colors = ["r", "b", "g"] fontsize=16 fontsize2 = 20 fig, axs = plt.subplots(ncols=3, nrows=2, figsize=(16,10), sharex=True, sharey=True) for idx_row, split_type in enumerate([["rev", "non_rev"], ["syn", "non_syn"]]): for idx_col, region in enumerate(regions): for idx_colors, activity in enumerate(["fixed", "lost", "active"]): axs[idx_row, idx_colors].plot(time_bins, activities[region][split_type[0]][activity], f'{colors[idx_col]}-') axs[idx_row, idx_colors].plot(time_bins, activities[region][split_type[1]][activity], f'{colors[idx_col]}--') axs[idx_row, idx_colors].grid() if idx_row == 1: axs[idx_row,idx_colors].set_xlabel("Time [days]", fontsize=fontsize) if idx_colors == 0: axs[idx_row,idx_colors].set_ylabel("Frequency", fontsize=fontsize) axs[idx_row,idx_colors].text(-0.3, 0.45, split_type[0], transform=axs[idx_row,idx_colors].transAxes, fontsize=fontsize2) if idx_row == 0: axs[idx_row,idx_colors].text(0.45, 1.1, activity, transform=axs[idx_row,idx_colors].transAxes, fontsize=fontsize2) fig.subplots_adjust(wspace=0.05, hspace=0.05) plt.savefig("Activity_type", format="png") ###Output _____no_output_____ ###Markdown Proba_fix plots ###Code from proba_fix import get_proba_fix regions = ["env", "pol", "gag"] trajectories = {} for region in regions: # Create the dictionary with the different regions tmp_trajectories = create_all_patient_trajectories(region) # tmp_trajectories = [traj for traj in tmp_trajectories if traj.t[-1] != 0] trajectories[region] = tmp_trajectories # Split into sub dictionnaries (rev, non_rev and all) rev = [traj for traj in trajectories[region] if traj.reversion == True] non_rev = [traj for traj in trajectories[region] if traj.reversion == False] syn = [traj for traj in trajectories[region] if traj.synonymous == True] non_syn = [traj for traj in trajectories[region] if traj.synonymous == False] trajectories[region] = {"rev": rev, "non_rev": non_rev, "syn": syn, "non_syn": non_syn, "all": trajectories[region]} pfix = {} for region in regions: pfix[region] = {} for key in trajectories[region].keys(): tmp_freq_bin, tmp_proba, tmp_err = get_proba_fix(trajectories[region][key]) pfix[region][key] = {"freq_bin": tmp_freq_bin, "proba": tmp_proba, "error": tmp_err} colors = ["r", "b", "g"] fontsize=16 fontsize2 = 20 fig, axs = plt.subplots(ncols=3, nrows=2, figsize=(16,10), sharex=True, sharey=True) for idx_row, split_type in enumerate([["rev", "non_rev"], ["syn", "non_syn"]]): for idx_col, region in enumerate(regions): axs[idx_row, idx_col].errorbar(pfix[region][split_type[0]]["freq_bin"], pfix[region][split_type[0]]["proba"], pfix[region][split_type[0]]["error"]) axs[idx_row, idx_col].errorbar(pfix[region][split_type[1]]["freq_bin"], pfix[region][split_type[1]]["proba"], pfix[region][split_type[1]]["error"]) axs[idx_row, idx_col].plot([0,1], [0,1], "k--") plt.xlim([-0.1, 1.1]) plt.ylim([-0.1, 1.1]) axs[idx_row, idx_col].grid() if idx_row == 1: axs[idx_row,idx_col].set_xlabel("Frequency bin", fontsize=fontsize) if idx_col == 0: axs[idx_row,idx_col].set_ylabel("P_fix", fontsize=fontsize) axs[idx_row,idx_col].text(-0.3, 0.45, split_type[0], transform=axs[idx_row,idx_col].transAxes, fontsize=fontsize2) if idx_row == 0: axs[idx_row,idx_col].text(0.45, 1.1, region, transform=axs[idx_row,idx_col].transAxes, fontsize=fontsize2) fig.subplots_adjust(wspace=0.05, hspace=0.05) plt.savefig("P_fix", format="png") ###Output _____no_output_____ ###Markdown Trajectory characterisation ###Code regions = ["env", "pol", "gag"] fast_sweeps = [157, 10, 7] colors = ["r", "b", "g"] freq_min = 0.05 fontsize=16 fontsize2 = 20 plt.figure(figsize=(16,10)) plt.title("Trajectory length distribution", fontsize=fontsize2) for idx,region in enumerate(regions): trajectories = create_all_patient_trajectories(region) trajectories_2 = [traj for traj in trajectories if (np.sum(traj.frequencies > freq_min, dtype=bool) or traj.fixation=="fixed")] hist, bins = np.histogram([len(traj.t) for traj in trajectories], range=[0,12], bins=12) hist2, bins = np.histogram([len(traj.t) for traj in trajectories_2], range=[0,12], bins=12) hist[0] = fast_sweeps[idx] hist2[0] = fast_sweeps[idx] plt.plot(bins[:-1], hist, ".-", color=colors[idx]) plt.plot(bins[:-1], hist2, ".--", color=colors[idx]) plt.grid(which='both') plt.yscale('log') plt.xlabel("Number of points", fontsize=fontsize) plt.ylabel("# of trajectories", fontsize=fontsize) plt.savefig("Traj_length_distribution", format="png") plt.show() ###Output _____no_output_____
notebooks/Entity/Product/Preprocessing/Entity_Product_Histogram.ipynb
###Markdown This notebook was used to visualize information about cluster distributions and to join information to final category domains to save them for later use ###Code import os import pandas as pd import progressbar import json data_path = '../src/data' mapping_corpus_path = data_path + r'/product/lspc2020_to_tablecorpus' mapping_corpus_path_2 = data_path + r'/product/lspc2020_to_tablecorpus/Cleaned' table_corpus_path = data_path + r'/product/product_top100/cleaned' table_corpus_path_with_id = data_path + r'/product/product_top100/cleaned/with_id' table_corpus_path2 = data_path + r'/product/product_minimum3/cleaned/with_id' mapping_corpus_path_all = data_path + r'/product/lspcV2020' ###Output _____no_output_____ ###Markdown Get information from preprocessed cluster amounts to derive histograms ###Code # get dictionary data_path_2 = '../src/data/product/lspc2020_to_tablecorpus/Cleaned/allocation_amount_only_set_dict.json' with open(data_path_2) as f: data_2=json.load(f) df_set=pd.DataFrame.from_dict(data_2, orient='index') df_set=df_set.reset_index().rename(columns={0:"Amount",'index':"cluster_id"}) # We discard all clusters with less than 2 entries, cause we cannot match anything there, so 1,6 million clusters remain df_set=df_set[df_set['Amount']>1] df_set.set_index('cluster_id').describe().T.round() df_set.plot(x='cluster_id',y='Amount',kind='hist') # filter on clusters with more entries to get information, here are still 20k clusters left df_15=df_set[df_set['Amount']>15] df_15 df_15.set_index('cluster_id').describe().T.round() df_15.plot(x='cluster_id',y='Amount',kind='hist') # filter on clusters with more entries to get information, here are still 20k clusters left df_150=df_set[df_set['Amount']>150] df_150 df_150.set_index('cluster_id').describe().T.round() df_150.plot(x='cluster_id',y='Amount',kind='hist') df_15_500=df_15[df_15['Amount']<500] df_15_500.plot(x='cluster_id',y='Amount',kind='hist') ###Output _____no_output_____ ###Markdown This code part was used in an earlier stage to join data of electronics and clothes with complete information provided in the corpus ###Code # get dictionaries electronics_path = '../src/data/product/product_electronics_v2/electronics_dict.json' clothes_path = '../src/data/product/product_clothes_v2/clothes_dict.json' mapping_corpus_path_2 = '../src/data/product/lspc2020_to_tablecorpus/Cleaned' with open(electronics_path) as f: electronics_data=json.load(f) with open(clothes_path) as f: clothes_data=json.load(f) #clean the dictionaries by getting rid of the first key cleaned_dictionary_electronics={} for value in electronics_data.values(): cleaned_dictionary_electronics.update(value) #clean the dictionaries by getting rid of the first key cleaned_dictionary_clothes={} for value in clothes_data.values(): cleaned_dictionary_clothes.update(value) #put the dictionaries into dataframes df_electronics=pd.DataFrame.from_dict(cleaned_dictionary_electronics, orient='index') df_clothes=pd.DataFrame.from_dict(cleaned_dictionary_clothes, orient='index') # use the cleaned tables to append into a series and the get a dataframe from that with the remaining cluster ids count = 0 data=[] with progressbar.ProgressBar(max_value=len(zip_files_mapping)) as bar: for zip_file in zip_files_mapping: print('/{}'.format(zip_file)) df = pd.read_json(mapping_corpus_path_2 + '/{}'.format(zip_file), compression='gzip', lines=True) data.append(df) count += 1 bar.update(count) df_large= pd.concat(data, ignore_index=True) #df.large.to_json(os.path.join(mapping_corpus_path_2, 'concatentation'), compression='gzip', orient='records', lines=True) zip_files_mapping = [file for file in os.listdir(mapping_corpus_path_all) if file.endswith('.json.gz')] # use the cleaned tables to append into a series and the get a dataframe from that with the remaining cluster ids count = 0 data=[] with progressbar.ProgressBar(max_value=len(zip_files_mapping)) as bar: for zip_file in zip_files_mapping: print('/{}'.format(zip_file)) df = pd.read_json(mapping_corpus_path_all + '/{}'.format(zip_file), compression='gzip', lines=True) data.append(df) count += 1 bar.update(count) df_large_all= pd.concat(data, ignore_index=True) #match product information to cleaned clusters df_large_matched = df_large.merge(df_large_all[['cluster_id','url','name','description','brand']], left_on=['cluster_id','url'], right_on = ['cluster_id','url'], how='left') df_large_matched.to_json(os.path.join(mapping_corpus_path_2, 'df_large_matched'), compression='gzip', orient='records', lines=True) #fill up missing values in both product category data frames to be able to split the tuples up df_electronics_filtered=df_electronics.applymap(lambda x: [0,0] if x is None else x) df_clothes_filtered=df_clothes.applymap(lambda x: [0,0] if x is None else x) # clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_electronics_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_electronics_filtered.columns)) as bar: for i in range(len(df_electronics_filtered.columns)): df_electronics_cleaned = df_electronics_cleaned.append(pd.DataFrame(df_electronics_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_electronics_filtered.index)) count += 1 bar.update(count) #clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_clothes_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_clothes_filtered.columns)) as bar: for i in range(len(df_clothes_filtered.columns)): df_clothes_cleaned = df_clothes_cleaned.append(pd.DataFrame(df_clothes_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_clothes_filtered.index)) count += 1 bar.update(count) #rename the columns to be able to join them into the cluster_id table df_electronics_cleaned=df_electronics_cleaned.reset_index().rename(columns={'index':"brand"}) df_electronics_cleaned #rename the columns to be able to join them into the cluster_id table df_clothes_cleaned=df_clothes_cleaned.reset_index().rename(columns={'index':"brand"}) df_clothes_cleaned #join the tables to the cluster tables by using left joins #filled up zero values will be discarded by the join condition df_joined_electronics = df_large.merge(df_electronics_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_electronics = pd.read_json(os.path.join(mapping_corpus_path_2, 'joined_electronics'), compression='gzip', orient='records', lines=True) #join the tables to the cluster tables by using left joins #filled up zero values will be discarded by the join condition df_joined_clothes = df_large.merge(df_clothes_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_clothes = pd.read_json(os.path.join(mapping_corpus_path_2, 'joined_clothes'), compression='gzip', orient='records', lines=True) df_joined_clothes ###Output _____no_output_____ ###Markdown Cluster statistics for product category electronics ###Code df_joined_electronics = pd.read_json(os.path.join(mapping_corpus_path_2, 'joined_electronics_v2'), compression='gzip', orient='records', lines=True) df_grouped_electronics = df_joined_electronics.groupby('cluster_id').count() # only look at clusters that have at least one brand associated df_set_electronics = df_grouped_electronics[df_grouped_electronics['brand_y']>0].reset_index()[['cluster_id','table_id']].rename(columns={'table_id':'Amount'}) # We discard all clusters with less than 2 entries, cause we cannot match anything there, so 1,6 million clusters remain df_set_electronics=df_set_electronics[df_set_electronics['Amount']>1] df_set_electronics df_set_electronics.set_index('cluster_id').describe().T.round() df_set_electronics.plot(x='cluster_id',y='Amount',kind='hist') # filter on clusters with more entries to get information, here are still 20k clusters left df_10_electronics=df_set_electronics[df_set_electronics['Amount']>10] df_10_electronics df_10_electronics.set_index('cluster_id').describe().T.round() df_10_electronics.plot(x='cluster_id',y='Amount',kind='hist') df_15_electronics=df_set_electronics[df_set_electronics['Amount']>15] df_15_electronics df_15_electronics.set_index('cluster_id').describe().T.round() df_15_electronics.plot(x='cluster_id',y='Amount',kind='hist') df_25_electronics=df_set_electronics[df_set_electronics['Amount']>25] df_25_electronics df_25_electronics.set_index('cluster_id').describe().T.round() df_25_electronics.plot(x='cluster_id',y='Amount',kind='hist') #merge brand name to cluster amount df_cluster_brand = df_15_electronics[df_15_electronics['Amount']<400].merge(df_joined_electronics.dropna()[['cluster_id','brand_y']].drop_duplicates('cluster_id', keep='last'), left_on=['cluster_id'], right_on = ['cluster_id'], how='left') df_cluster_brand #get the top clusters per brand df_top_clusters = df_cluster_brand.sort_values(['Amount'], ascending=False).drop_duplicates(subset=["brand_y"], keep="first") df_top_clusters ###Output _____no_output_____ ###Markdown Cluster statistics for product category clothes ###Code df_joined_clothes = pd.read_json(os.path.join(mapping_corpus_path_2, 'joined_clothes_v2'), compression='gzip', orient='records', lines=True) df_grouped_clothes = df_joined_clothes.groupby('cluster_id').count() # only look at clusters that have at least one brand associated df_set_clothes = df_grouped_clothes[df_grouped_clothes['brand_y']>0].reset_index()[['cluster_id','table_id']].rename(columns={'table_id':'Amount'}) # We discard all clusters with less than 2 entries, cause we cannot match anything there, so 1,6 million clusters remain df_set_clothes=df_set_clothes[df_set_clothes['Amount']>1] df_set_clothes df_set_clothes.set_index('cluster_id').describe().T.round() df_set_clothes.plot(x='cluster_id',y='Amount',kind='hist') df_10_clothes=df_set_clothes[df_set_clothes['Amount']>10] df_10_clothes df_10_clothes.set_index('cluster_id').describe().T.round() df_15_clothes=df_set_clothes[df_set_clothes['Amount']>15] df_15_clothes df_15_clothes.set_index('cluster_id').describe().T.round() df_15_clothes.plot(x='cluster_id',y='Amount',kind='hist') df_joined_clothes[df_joined_clothes['cluster_id']==78499693] #merge brand name to cluster amount df_cluster_brand_clothes = df_15_clothes[df_15_clothes['Amount']<400].merge(df_joined_clothes.dropna()[['cluster_id','brand_y']].drop_duplicates('cluster_id', keep='last'), left_on=['cluster_id'], right_on = ['cluster_id'], how='left') df_cluster_brand_clothes #get the top clusters per brand df_top_clusters_clothes = df_cluster_brand_clothes.sort_values(['Amount'], ascending=False).drop_duplicates(subset=["brand_y"], keep="first") df_top_clusters_clothes df_joined_clothes[(df_joined_clothes['cluster_id']==22374915)] ###Output _____no_output_____ ###Markdown Get information about the cluster distribution per table to get a first glance at which tables for which clusters have an overlap to get good training dat Have at first a look at electronic products¶ ###Code df_grouped_electronics_tables = df_joined_electronics.groupby('table_id').count() # only look at clusters that have at least one brand associated df_set_electronics_tables = df_grouped_electronics_tables[df_grouped_electronics_tables['brand']>0].reset_index()[['cluster_id','table_id']].rename(columns={'cluster_id':'Amount'}) df_set_electronics_tables.set_index('table_id').describe().T.round() df_75_electronics_tables=df_set_electronics_tables[df_set_electronics_tables['Amount']>75] df_75_electronics_tables df_75_electronics_tables.set_index('table_id').describe().T.round() df_75_electronics_tables.plot(x='table_id',y='Amount',kind='hist') df_150_electronics_tables=df_set_electronics_tables[df_set_electronics_tables['Amount']>150] df_150_electronics_tables df_150_electronics_tables.set_index('table_id').describe().T.round() df_150_electronics_tables.plot(x='table_id',y='Amount',kind='hist') df_joined_clothes df_grouped_clothes_tables = df_joined_clothes.groupby('table_id').count() # only look at clusters that have at least one brand associated df_set_clothes_tables = df_grouped_clothes_tables[df_grouped_clothes_tables['brand']>0].reset_index()[['cluster_id','table_id']].rename(columns={'cluster_id':'Amount'}) df_set_clothes_tables.set_index('table_id').describe().T.round() df_75_clothes_tables=df_set_clothes_tables[df_set_clothes_tables['Amount']>75] df_75_clothes_tables df_75_clothes_tables.set_index('table_id').describe().T.round() df_75_clothes_tables.plot(x='table_id',y='Amount',kind='hist') df_150_clothes_tables=df_set_clothes_tables[df_set_clothes_tables['Amount']>150] df_150_clothes_tables df_150_clothes_tables.set_index('table_id').describe().T.round() df_150_clothes_tables.plot(x='table_id',y='Amount',kind='hist') df_large = pd.read_json(os.path.join(mapping_corpus_path_2, 'df_large_matched.json'), compression='gzip', orient='records', lines=True) ###Output _____no_output_____ ###Markdown Use the defined approach for cleaning and then joining all information to every single set of different categories to save them for later use ###Code # get dictionaries electronics_path = '../src/data/product/product_electronics_v3/electronics_dict.json' clothes_path = '../src/data/product/product_clothes_v3/clothes_dict.json' bikes_path = '../src/data/product/product_bikes/bikes_dict.json' cars_path = '../src/data/product/product_cars/cars_dict.json' drugstore_path = '../src/data/product/product_drugstore/drugstore_dict.json' technology_path = '../src/data/product/product_technology/technology_dict.json' tools_path = '../src/data/product/product_tools/tools_dict.json' mapping_corpus_path_2 = '../src/data/product/lspc2020_to_tablecorpus/Cleaned' with open(electronics_path) as f: electronics_data=json.load(f) with open(clothes_path) as f: clothes_data=json.load(f) with open(bikes_path) as f: bikes_data=json.load(f) with open(cars_path) as f: cars_data=json.load(f) with open(drugstore_path) as f: drugstore_data=json.load(f) with open(technology_path) as f: technology_data=json.load(f) with open(tools_path) as f: tools_data=json.load(f) #clean the dictionaries by getting rid of the first key cleaned_dictionary_electronics={} for value in electronics_data.values(): cleaned_dictionary_electronics.update(value) #clean the dictionaries by getting rid of the first key cleaned_dictionary_clothes={} for value in clothes_data.values(): cleaned_dictionary_clothes.update(value) #clean the dictionaries by getting rid of the first key cleaned_dictionary_bikes={} for value in bikes_data.values(): cleaned_dictionary_bikes.update(value) #clean the dictionaries by getting rid of the first key cleaned_dictionary_cars={} for value in cars_data.values(): cleaned_dictionary_cars.update(value) #clean the dictionaries by getting rid of the first key cleaned_dictionary_drugstore={} for value in drugstore_data.values(): cleaned_dictionary_drugstore.update(value) #clean the dictionaries by getting rid of the first key cleaned_dictionary_technology={} for value in technology_data.values(): cleaned_dictionary_technology.update(value) #clean the dictionaries by getting rid of the first key cleaned_dictionary_tools={} for value in tools_data.values(): cleaned_dictionary_tools.update(value) #put the dictionaries into dataframes df_electronics=pd.DataFrame.from_dict(cleaned_dictionary_electronics, orient='index') df_clothes=pd.DataFrame.from_dict(cleaned_dictionary_clothes, orient='index') df_bikes=pd.DataFrame.from_dict(cleaned_dictionary_bikes, orient='index') df_cars=pd.DataFrame.from_dict(cleaned_dictionary_cars, orient='index') df_drugstore=pd.DataFrame.from_dict(cleaned_dictionary_drugstore, orient='index') df_technology=pd.DataFrame.from_dict(cleaned_dictionary_technology, orient='index') df_tools=pd.DataFrame.from_dict(cleaned_dictionary_tools, orient='index') #fill up missing values in both product category data frames to be able to split the tuples up df_electronics_filtered=df_electronics.applymap(lambda x: [0,0] if x is None else x) df_clothes_filtered=df_clothes.applymap(lambda x: [0,0] if x is None else x) #fill up missing values in both product category data frames to be able to split the tuples up df_bikes_filtered=df_bikes.applymap(lambda x: [0,0] if x is None else x) df_cars_filtered=df_cars.applymap(lambda x: [0,0] if x is None else x) #fill up missing values in both product category data frames to be able to split the tuples up df_drugstore_filtered=df_drugstore.applymap(lambda x: [0,0] if x is None else x) df_technology_filtered=df_technology.applymap(lambda x: [0,0] if x is None else x) #fill up missing values in both product category data frames to be able to split the tuples up df_tools_filtered=df_tools.applymap(lambda x: [0,0] if x is None else x) # clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_electronics_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_electronics_filtered.columns)) as bar: for i in range(len(df_electronics_filtered.columns)): df_electronics_cleaned = df_electronics_cleaned.append(pd.DataFrame(df_electronics_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_electronics_filtered.index)) count += 1 bar.update(count) #clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_clothes_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_clothes_filtered.columns)) as bar: for i in range(len(df_clothes_filtered.columns)): df_clothes_cleaned = df_clothes_cleaned.append(pd.DataFrame(df_clothes_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_clothes_filtered.index)) count += 1 bar.update(count) # clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_bikes_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_bikes_filtered.columns)) as bar: for i in range(len(df_bikes_filtered.columns)): df_bikes_cleaned = df_bikes_cleaned.append(pd.DataFrame(df_bikes_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_bikes_filtered.index)) count += 1 bar.update(count) # clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_cars_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_cars_filtered.columns)) as bar: for i in range(len(df_cars_filtered.columns)): df_cars_cleaned = df_cars_cleaned.append(pd.DataFrame(df_cars_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_cars_filtered.index)) count += 1 bar.update(count) # clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_drugstore_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_drugstore_filtered.columns)) as bar: for i in range(len(df_drugstore_filtered.columns)): df_drugstore_cleaned = df_drugstore_cleaned.append(pd.DataFrame(df_drugstore_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_drugstore_filtered.index)) count += 1 bar.update(count) # clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_technology_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_technology_filtered.columns)) as bar: for i in range(len(df_technology_filtered.columns)): df_technology_cleaned = df_technology_cleaned.append(pd.DataFrame(df_technology_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_technology_filtered.index)) count += 1 bar.update(count) # clean up the tables #split up tuples in in each column for each brand into two different columns table_id and row_id and concatente these rows df_tools_cleaned=pd.DataFrame(columns=['table_id', 'row_id']) count = 0 with progressbar.ProgressBar(max_value=len(df_tools_filtered.columns)) as bar: for i in range(len(df_tools_filtered.columns)): df_tools_cleaned = df_tools_cleaned.append(pd.DataFrame(df_tools_filtered[i].tolist(),columns=['table_id', 'row_id'], index=df_tools_filtered.index)) count += 1 bar.update(count) #rename the columns to be able to join them into the cluster_id table df_electronics_cleaned=df_electronics_cleaned.reset_index().rename(columns={'index':"brand"}) df_clothes_cleaned=df_clothes_cleaned.reset_index().rename(columns={'index':"brand"}) df_bikes_cleaned=df_bikes_cleaned.reset_index().rename(columns={'index':"brand"}) df_cars_cleaned=df_cars_cleaned.reset_index().rename(columns={'index':"brand"}) df_drugstore_cleaned=df_drugstore_cleaned.reset_index().rename(columns={'index':"brand"}) df_technology_cleaned=df_technology_cleaned.reset_index().rename(columns={'index':"brand"}) df_tools_cleaned=df_tools_cleaned.reset_index().rename(columns={'index':"brand"}) df_joined_electronics= df_large.merge(df_electronics_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_clothes = df_large.merge(df_clothes_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_bikes = df_large.merge(df_bikes_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_cars = df_large.merge(df_cars_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_drugstore = df_large.merge(df_drugstore_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_technology = df_large.merge(df_technology_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_tools = df_large.merge(df_tools_cleaned, left_on=['table_id','row_id'], right_on = ['table_id','row_id'], how='left') df_joined_electronics.to_json(mapping_corpus_path_2 + '/joined_electronics_v3.json', compression='gzip', orient='records', lines=True) df_joined_clothes.to_json(mapping_corpus_path_2 + '/joined_clothes_v3.json', compression='gzip', orient='records', lines=True) df_joined_bikes.to_json(mapping_corpus_path_2 + '/joined_bikes.json', compression='gzip', orient='records', lines=True) df_joined_cars.to_json(mapping_corpus_path_2 + '/joined_cars.json', compression='gzip', orient='records', lines=True) df_joined_drugstore.to_json(mapping_corpus_path_2 + '/joined_drugstore.json', compression='gzip', orient='records', lines=True) df_joined_technology.to_json(mapping_corpus_path_2 + '/joined_technology.json', compression='gzip', orient='records', lines=True) df_joined_tools.to_json(mapping_corpus_path_2 + '/joined_tools.json', compression='gzip', orient='records', lines=True) ###Output _____no_output_____
analysis/figure_nbs/FIGURE_metastable_state_comparison.ipynb
###Markdown Comparison to Xtal structure ###Code protein = '1fme' data_dir = Path('../data/msms/1fme/metastable_states/') out_dir = Path(f"{protein}") xtal_pdb = Path('/Users/robertarbon/Documents/Research/fast_folders/analysis/compare_structures').joinpath(f'{protein}.pdb') models = [53, 60, 52, 47, 81, 86] title_by_ix = {53: "1: Best (Dihedrals)", 60: '2: Best (Contacts)', 52: "3: Best (logit(contacts))", 47: '4: Worst (dihedrals)', 81: '5: Best gap (dihedrals)', 86: '7: Best gap (logit(contacts))' } model_titles = [title_by_ix[i] for i in models] state_ix = 1 mod_ix = 53 all_dfs = [] for mod_ix in models: for state_ix in [0, 1]: traj = md.load(str(data_dir.joinpath(f'hp_{mod_ix}', f'state_{state_ix}.xtc')), top=str(xtal_pdb)) xtal = md.load(str(xtal_pdb))[0] traj = traj.superpose(xtal) rmsd = md.rmsd(traj,xtal)*10 df = pd.DataFrame(dict(rmsd=rmsd)) if mod_ix ==86: df['state'] = ['A', 'B'][state_ix] else: df['state'] = ['B', 'A'][state_ix] df['model'] = title_by_ix[mod_ix] all_dfs.append(df) df = pd.concat(all_dfs) with sns.plotting_context('talk'): g = sns.displot(df,x='rmsd', hue='model', col='state', kind='kde', col_order=['A', 'B']) plt.savefig('/Users/robertarbon/Documents/Talks/2022-03-10-Mey-group/xtal_comparison.pdf', bbox_inches='tight') ###Output _____no_output_____ ###Markdown Comparison of populations ###Code root_dir = Path("/Users/robertarbon/Documents/Research/msm_sensitivity_analysis/data/msms") traj_dir = Path("/Users/robertarbon/Data/DESRES") title_by_ix = {53: "1: Best (Dihedrals)", 60: '2: Best (Contacts)', 52: "3: Best (logit(contacts))", 47: '4: Worst overall', 81: '5: Best gap (dihedrals)', 86: '7: Best gap (logit(contacts))' } lag = 41 protein = '1fme' system_name='BBA' n_procs = 2 out_dir = Path('/Users/robertarbon/Documents/Talks/2022-03-10-Mey-group/xtal_comparison.pdf', bbox_inches='tight') top_path = list(traj_dir.rglob(f"*{protein.upper()}*/**/*.pdb"))[0] assert top_path all_dfs = [] for hp_ix in models: dtraj_paths = list(root_dir.joinpath(protein, 'dtrajs', f"hp_{hp_ix}").glob(f'*{protein.upper()}*.npy')) dtrajs = [np.load(str(x)) for x in dtraj_paths] mod = pyemma.msm.estimate_markov_model(dtrajs, lag=lag) hmm = mod.coarse_grain(n_procs) sds = hmm.stationary_distribution sds = np.sort(sds) if mod_ix ==86: df = pd.DataFrame({'$\pi$': sds, 'state': ['A', 'B']}) else: df = pd.DataFrame({'$\pi$': sds, 'state': ['B', 'A']}) df['model'] = title_by_ix[hp_ix] all_dfs.append(df) df_sds = pd.concat(all_dfs) df_sds with sns.plotting_context('talk'): g = sns.barplot(data=df_sds, x='$\pi$', y='model', hue='state', orient='horizontal') plt.savefig('/Users/robertarbon/Documents/Talks/2022-03-10-Mey-group/pop_comparison.pdf', bbox_inches='tight') ###Output _____no_output_____
deeplearning.ai - TensorFlow in Practice Specialization/deeplearning.ai - Convolutional Neural Networks in TensorFlow/module2- Augmentation/Exercise_2_Cats_vs_Dogs_using_augmentation_Question-FINAL.ipynb
###Markdown salimt ###Code # ATTENTION: Please do not alter any of the provided code in the exercise. Only add your own code where indicated # ATTENTION: Please do not add or remove any cells in the exercise. The grader will check specific cells based on the cell position. # ATTENTION: Please use the provided epoch values when training. # In this exercise you will train a CNN on the FULL Cats-v-dogs dataset # This will require you doing a lot of data preprocessing because # the dataset isn't split into training and validation for you # This code block has all the required inputs import os import zipfile import random import shutil import tensorflow as tf from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator from shutil import copyfile from os import getcwd # This code block unzips the full Cats-v-Dogs dataset to /tmp # which will create a tmp/PetImages directory containing subdirectories # called 'Cat' and 'Dog' (that's how the original researchers structured it) path_cats_and_dogs = f"{getcwd()}/../tmp2/cats-and-dogs.zip" shutil.rmtree('/tmp') local_zip = path_cats_and_dogs zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp') zip_ref.close() print(len(os.listdir('/tmp/PetImages/Cat/'))) print(len(os.listdir('/tmp/PetImages/Dog/'))) # Expected Output: # 1500 # 1500 # Use os.mkdir to create your directories # You will need a directory for cats-v-dogs, and subdirectories for training # and testing. These in turn will need subdirectories for 'cats' and 'dogs' try: os.makedirs("/tmp/cats-v-dogs/training/cats/") os.makedirs("/tmp/cats-v-dogs/training/dogs/") os.makedirs("/tmp/cats-v-dogs/testing/cats/") os.makedirs("/tmp/cats-v-dogs/testing/dogs/") except OSError: pass # Write a python function called split_data which takes # a SOURCE directory containing the files # a TRAINING directory that a portion of the files will be copied to # a TESTING directory that a portion of the files will be copie to # a SPLIT SIZE to determine the portion # The files should also be randomized, so that the training set is a random # X% of the files, and the test set is the remaining files # SO, for example, if SOURCE is PetImages/Cat, and SPLIT SIZE is .9 # Then 90% of the images in PetImages/Cat will be copied to the TRAINING dir # and 10% of the images will be copied to the TESTING dir # Also -- All images should be checked, and if they have a zero file length, # they will not be copied over # # os.listdir(DIRECTORY) gives you a listing of the contents of that directory # os.path.getsize(PATH) gives you the size of the file # copyfile(source, destination) copies a file from source to destination # random.sample(list, len(list)) shuffles a list def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE): # YOUR CODE STARTS HERE path, dirs, files = next(os.walk(SOURCE)) file_count = len(files) train_filenames = random.sample(os.listdir(SOURCE), int(file_count*SPLIT_SIZE)) for fname in train_filenames: srcpath = os.path.join(SOURCE, fname) shutil.move(srcpath, TRAINING) for f in os.listdir(SOURCE): shutil.move(SOURCE+f, TESTING) # YOUR CODE ENDS HERE CAT_SOURCE_DIR = "/tmp/PetImages/Cat/" TRAINING_CATS_DIR = "/tmp/cats-v-dogs/training/cats/" TESTING_CATS_DIR = "/tmp/cats-v-dogs/testing/cats/" DOG_SOURCE_DIR = "/tmp/PetImages/Dog/" TRAINING_DOGS_DIR = "/tmp/cats-v-dogs/training/dogs/" TESTING_DOGS_DIR = "/tmp/cats-v-dogs/testing/dogs/" split_size = .9 split_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size) split_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size) print(len(os.listdir('/tmp/cats-v-dogs/training/cats/'))) print(len(os.listdir('/tmp/cats-v-dogs/training/dogs/'))) print(len(os.listdir('/tmp/cats-v-dogs/testing/cats/'))) print(len(os.listdir('/tmp/cats-v-dogs/testing/dogs/'))) # Expected output: # 1350 # 1350 # 150 # 150 # DEFINE A KERAS MODEL TO CLASSIFY CATS V DOGS # USE AT LEAST 3 CONVOLUTION LAYERS model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['acc']) ###Output _____no_output_____ ###Markdown NOTE:In the cell below you **MUST** use a batch size of 10 (`batch_size=10`) for the `train_generator` and the `validation_generator`. Using a batch size greater than 10 will exceed memory limits on the Coursera platform. ###Code TRAINING_DIR = "/tmp/cats-v-dogs/training/" train_datagen = ImageDataGenerator(rescale = 1.0/255., rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') # NOTE: YOU MUST USE A BATCH SIZE OF 10 (batch_size=10) FOR THE # TRAIN GENERATOR. train_generator = train_datagen.flow_from_directory(TRAINING_DIR, batch_size=10, class_mode='binary', target_size=(150, 150)) VALIDATION_DIR = "/tmp/cats-v-dogs/testing/" validation_datagen = ImageDataGenerator(rescale = 1.0/255., rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') # NOTE: YOU MUST USE A BACTH SIZE OF 10 (batch_size=10) FOR THE # VALIDATION GENERATOR. validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR, batch_size=10, class_mode = 'binary', target_size = (150, 150)) # Expected Output: # Found 2700 images belonging to 2 classes. # Found 300 images belonging to 2 classes. history = model.fit_generator(train_generator, epochs=2, verbose=1, validation_data=validation_generator) # PLOT LOSS AND ACCURACY %matplotlib inline import matplotlib.image as mpimg import matplotlib.pyplot as plt #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- acc=history.history['acc'] val_acc=history.history['val_acc'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs=range(len(acc)) # Get number of epochs #------------------------------------------------ # Plot training and validation accuracy per epoch #------------------------------------------------ plt.plot(epochs, acc, 'r', "Training Accuracy") plt.plot(epochs, val_acc, 'b', "Validation Accuracy") plt.title('Training and validation accuracy') plt.figure() #------------------------------------------------ # Plot training and validation loss per epoch #------------------------------------------------ plt.plot(epochs, loss, 'r', "Training Loss") plt.plot(epochs, val_loss, 'b', "Validation Loss") plt.title('Training and validation loss') # Desired output. Charts with training and validation metrics. No crash :) ###Output _____no_output_____ ###Markdown Submission Instructions ###Code # Now click the 'Submit Assignment' button above. ###Output _____no_output_____ ###Markdown When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This will free up resources for your fellow learners. ###Code %%javascript <!-- Save the notebook --> IPython.notebook.save_checkpoint(); %%javascript IPython.notebook.session.delete(); window.onbeforeunload = null setTimeout(function() { window.close(); }, 1000); ###Output _____no_output_____
lectures/5_linear_regression/part1.ipynb
###Markdown Introduction to linear regression Many problems can be posed as "finding a relation" between factors/variablesThis can be interpreted as predicting and/or explaining a variable given othersSome examples:- Predicting sales given money spent in advertising- Predicting chance to rain in Valdivia given temperature, pressure and humidity- Predicting gasoline consumption of a car given acceleration, weight and number of cylinders- Predicting chance to get lung cancer given number of smoked cigarettes per day, age and genderWe could ask- Are these variable related?- How strong and/or significant is the relationship?- What is the nature of the relationship?Answering these helps us **understand the underlying processes behind the data** Defining regression**Regression** refers to a family of statistical methods to find **relationships** between **variables**In general the relation is modeled as a function $g(\cdot)$ that maps two types of variables- The input variable $X$ is called **independent variable** or feature- The output variable $Y$ is called **dependent variable**, response or targetThe mapping or function $g$ is called **predictor** or **regressor**$$g: X \to Y$$The objective is to learn $g$ such that we can predict $Y$ given $X$, *i.e.* $\mathbb{E}[Y|X]$ - **Regression** can be defined from an statistical perspective as a special case of model fitting (parameter estimation)- In many books **Regression** is defined from a pure-optimization perspective (deterministic)- **Regression** is considered part of the *supervised learning* paradigm. The difference between **Regression** and *classification* is the nature of the dependent variable (continuous vs categorical) Parametric vs non-parametric regressionRegression methods can be broadly classified as either parametric or non-parametric In parametric regression - We know the model of the regressor- The model has a finite number of parameters- The parameters of the model are all we need to do predictions - Simpler but with bigger assumptions (inductive bias)In nonparametric regression- There is no functional form for the regressor- It can have an infinite number of parameters (and a finite number of hyperparameters)- The regressor is defined from the training data- More flexible but requires more data to fit it- Examples: Splines, Support vector regression, Gaussian processesIn this lesson we will focus on parametric regression Parametric linear models for regressionLet - $X$ be a continuous D-dimensional variable (feature) and $Y$ be a continuous unidimensional variable (target) - $\{x_i, y_i\}$ with $i=1,\ldots,N$ be a set of $N$ *iid* observations of $X$ and $Y$- $g_\theta$ be a model with a M-dimensional parameter $\theta$ Then we can define parametric regression as finding a value of $\theta$ such that $$y_i \approx g_\theta(x_i),\quad i=1,\ldots, N$$The simplest parametric model is the **linear model**. A linear model gives rise to **linear regression**:::{important}The linear model is linear on $\theta$ but not necessarily on $X$:::For example a model with unidimensional input$$g_\theta \left(x_i \right) = \theta_0 + \theta_1 x_i + \theta_2 x_i^2,$$is a linear model and$$g_\theta(x_i) = \theta_0 + \theta_1 \log(x_i),$$is also a linear model but$$g_\theta(x_i) = \theta_0 + \log(x_i + \theta_1),$$is not a linear model The simplest linear model: The lineIf we consider a one-dimensional variable $x_i \in \mathbb{R}, i=1,\ldots,N$, then the simplest linear model is$$g_\theta(x_i) = \theta_0 + \theta_1 x_i$$which has $M=2$ parameters. This corresponds to a line in $\mathbb{R}^2$ and we recognize- $\theta_0$ as the intercept- $\theta_1$ as the slopeIf we consider a two-dimensional variable $x_i = (x_{i1}, x_{i2}) \in \mathbb{R}^2, i=1,\ldots,N$ then we obtain$$g_\theta(x_i) = \theta_0 + \theta_1 x_{i1} + \theta_2 x_{i2}$$which has $M=3$ parameters. This corresponds to a plane in $\mathbb{R}^3$The most general form assumes a D-dimensional variable $x_i = (x_{i1}, x_{i2}, \ldots, x_{iD}), i=1,\ldots,N$ $$g_\theta(x_i) = \theta_0 + \sum_{j=1}^D \theta_j x_{ij}$$which has $M=D+1$ parameters, which corresponds to an hyperplane in $\mathbb{R}^M$ Fitting the simplest linear model: MathematicsAssuming that we have $\{x_i, y_i\}_{i=1,\ldots,N}$ *iid* observations from unidimensional variables X and Y> How do we find $\theta_0$ and $\theta_1$ such that $y_i \approx \theta_0 + \theta_1 x_i, \forall i$?Let's start by writing the squared residual (error) as $$E_i^2 = (y_i - \theta_0 - \theta_1 x_i)^2,$$We can fit (train) the model with$$\min_{\theta} L = \sum_{i=1}^N E_i^2 = \sum_{i=1}^N (y_i - \theta_0 - \theta_1 x_i)^2,$$where $L$, the sum of squares errors, is a our loss/cost function:::{note}Later we will see that this cost function arises when a gaussian likelihood for $Y$ is assumed::: Setting the derivative of this expression with respect to the parameters we obtain$$\hat \theta_0 = \bar y - \hat \theta_1 \bar x,$$where $\bar x = \frac{1}{N} \sum_{i=1}^N x_i$, $\bar y = \frac{1}{N} \sum_{i=1}^N y_i$ and$$\hat \theta_1 = \frac{N\sum_i x_i y_i - (\sum_i x_i)(\sum_i y_i)}{N \sum_i x_i^2 - (\sum_i x_i)^2},$$ :::{dropdown} ProofWith$$\begin{align}\frac{dL}{d\theta_0} &= -2 \sum_{i=1}^N (y_i - \theta_0 - \theta_1 x_i) \nonumber \\&= -2 \sum_{i=1}^N y_i + 2 N\theta_0 + 2 \theta_1 \sum_{i=1}^N x_i = 0 \nonumber\end{align}$$and $$\begin{align}\frac{dL}{d\theta_1} &= -2 \sum_{i=1}^N (y_i - \theta_0 - \theta_1 x_i) x_i \nonumber \\&= -2 \sum_{i=1}^N y_i x_i + 2 \theta_0 \sum_{i=1}^N x_i + 2 \theta_1 \sum_{i=1}^N x_i^2 = 0 \nonumber\end{align}$$a system of two equations and two unknowns is obtained$$\begin{pmatrix} N & \sum_i x_i \\ \sum_i x_i & \sum_i x_i^2\\\end{pmatrix} \begin{pmatrix} \theta_0 \\ \theta_1 \end{pmatrix} = \begin{pmatrix} \sum_i y_i \\ \sum_i x_i y_i \end{pmatrix} $$whose solution is$$\begin{pmatrix} \hat \theta_0 \\ \hat \theta_1 \end{pmatrix} = \frac{1}{N\sum_i x_i^2 - \left(\sum_i x_i\right)^2}\begin{pmatrix} \sum_i x_i^2 & -\sum_i x_i \\ -\sum_i x_i & N\\\end{pmatrix} \begin{pmatrix} \sum_i y_i \\ \sum_i x_i y_i \end{pmatrix} $$where we assume that the determinant of the matrix is not zero::: Fitting the simplest linear model: PythonWe can fit a line in Python using the `scipy.stats` library```pythonscipy.stats.linregress(x, N vector or Mx2 matrix (if y is None) y=None, N vector )```This function returns an object, its main attributes are- `slope`: Equivalent to $\hat \theta_1$- `intercept`: Equivalent to $\hat \theta_0$- `rvalue`: The correlation coefficient (more on this later)- `pvalue`: A p-value for the null hypothesis that $\theta_1 =0$ (more on this later)Let's create synthetic data to test this function ###Code np.random.seed(12345) theta, sigma = [0.5 , -1], 0.5 x = np.random.rand(25)*5 def model(x, theta): return theta[0] + theta[1]*x y = model(x, theta) + sigma*np.random.randn(len(x)) ###Output _____no_output_____ ###Markdown We fit the data using ###Code res = scipy.stats.linregress(x, y) hat_theta = np.array([res.intercept, res.slope]) print(f"hat theta0: {hat_theta[0]:0.5f}, hat theta1: {hat_theta[1]:0.5f}") ###Output _____no_output_____ ###Markdown Predicting with the model and inspecting the results We can use the fitted model to interpolate/extrapolate on new values $\hat x$The following plot shows the fitted model on the training samples ###Code hat_x = np.linspace(-1, 6, num=50) hat_y = model(hat_x, hat_theta) p_fitted = hv.Curve((hat_x, hat_y), label='Fitted model') p_data = hv.Scatter((x, y), label='Training data').opts(color='k', size=5) hv.Overlay([p_data, p_fitted]) ###Output _____no_output_____ ###Markdown The fitted model (blue) follows the data closely. To visually assess the quality of the fit we can also plot the residuals, i.e. the distance between each sample of the training set and the fitted line. We can also inspect the histogram of the residuals ###Code residuals = y - model(x, hat_theta) bins, edges = np.histogram(residuals, density=True) p_residuals = hv.Scatter((y, residuals), 'Target variable', 'Residuals').opts(color='k', size=5, width=350) p_zero = hv.HLine(0).opts(color='k', line_dash='dashed', alpha=0.5) p_hist = hv.Histogram((edges, bins), kdims='Residuals', vdims='Density').opts(width=350) hv.Layout([p_residuals * p_zero, p_hist]).cols(2) ###Output _____no_output_____ ###Markdown Look for residuals that- concentrate around zero - are not correlated (white noise like)Correlation in the residuals is a sign that the choice of the model (line) was not adequate Coefficient of determinationWe can measure how strong is the linear relation between $y$ and $\hat y = \hat \theta_0 + \hat \theta_1 x$ using the **coefficient of determination** or $r^2$This is defined as$$r^2 = 1 - \frac{\sum_i (y_i - \hat y_i)^2}{\sum_i (y_i - \bar y_i)^2} \in [0, 1]$$*i.e.* one minus the sum of residuals divided by the variance of $y$. The $r$ statistic is also known as Pearson's correlation coefficient. Interpreting $r^2$:- If $r^2 = 1$, the data points are fitted perfectly by the model. The regressor accounts for all of the variation in y- If $r^2 = 0$, the regression line is horizontal. The regressor accounts for none of the variation in y:::{warning}If the relation is strong but non-linear it will not be detected by $r^2$:::Note that $r$ is available in the object returned by `scipy.stats.linregress`For example in this case: ###Code print(res.rvalue**2) ###Output _____no_output_____
nanoscint/maintext/Figure4-5/analysis/FIB5_interpretation_final.ipynb
###Markdown Figure 4 analysis (20 microns sample) ###Code folder_name = "../res/FIB_expt3/A1_20/final" # dose A, small area, 20 um thick expt_enhanc = 9.06 data0 = read_single_file(glob.glob(folder_name + "/*.npy")[0]) # Takes any file for reference wl_vec_ref = data0.item().get("wl_vec") pol_vec = {"s", "p"} theta_vec = data0.item().get("theta_vec") theta_mat = data0.item().get("theta_mat") phi_mat = data0.item().get("phi_mat") rad_etch = data0.item().get("rad_etch") # Reference simulation (unpatterned) width = 0.080 sig = width / 2.355 wl0 = 0.550 epsi = 1.0 * np.exp(-np.divide(np.square(wl_vec_ref-wl0),2.*sig**2.)) data_matsym_ref, data_matavg_ref, R_matsym_ref, R_matavg_ref, T_matsym_ref, T_matavg_ref, wl_vec, loss, _, _ = process_data_file(glob.glob(folder_name + "/*.npy")[0], extension_flag=False) file_list = [] for file in glob.glob(folder_name + "/*.npy"): file_name = re.split(folder_name, file)[1] file_list.append(file_name) if re.findall("radius0.0", file_name): data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _= process_data_file(file_name) scintref = np.trapz(data_matavg[:,0]*epsi, x = wl_vec*1e3) scint_yield_enhanc_list = [] loss_list = [] L_list = [] # Calculates enhancement from all simulations in folder for file in file_list: data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _= process_data_file(file) scint_yield = np.trapz(data_matavg[:,0]*epsi, x = wl_vec*1e3) scint_yield_enhanc_list.append(scint_yield/scintref) print("Scintillation yield {0} = {1} (enhancement = {2})".format(file, scint_yield, scint_yield/scintref)) L_float = re.findall("depth_(\d+).(\d)", file)[0] L_list.append(float(L_float[0])+float(L_float[1])/10.) loss_list.append(loss) scint_yield_enhanc_list = np.array(scint_yield_enhanc_list) L_list = np.array(L_list) ind_best_fit = np.where(np.abs(scint_yield_enhanc_list-expt_enhanc)==np.min(np.abs(scint_yield_enhanc_list-expt_enhanc))) print("Etch depth fitting expt = {0}nm".format(L_list[ind_best_fit][0])) print("Corresponding enhanc. = x{0}".format(scint_yield_enhanc_list[0])) # Plots enhancement experiment vs. simulation plt.figure() plt.plot(L_list, scint_yield_enhanc_list, 'o', label = "simulation") plt.plot(plt.xlim(), [expt_enhanc, expt_enhanc],'--', label = "experiment") plt.ylim([1.5, np.max(scint_yield_enhanc_list)*1.2]) plt.xlabel("Height (nm)") plt.ylabel("Enhancement") plt.legend() ## For 20 microns experiment # Spectral enhancement plot from scipy.ndimage import gaussian_filter1d plt.figure(figsize = (3,6)) file = "/rcwa_YAG_20.0um_design_L_430.0nm_etch_depth_60.0nm_radius215.0nm_nG_51_loss_1e-06.npy" data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _ = process_data_file(file) epsi = 1.0 * np.exp(-np.divide(np.square(wl_vec-wl0),2.*sig**2.)) wl_range = np.arange(0,len(wl_vec)) pstd_enhancement = data_matavg[wl_range,0]*epsi normalization = np.max(gaussian_filter1d(pstd_enhancement,100)) pstd_enhancement = pstd_enhancement/normalization file = "/rcwa_YAG_20.0um_design_L_430.0nm_etch_depth_50.0nm_radius0.0nm_nG_51_loss_1e-06.npy" data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _= process_data_file(file) off_pattern = data_matavg[wl_range,0]*epsi/normalization file = "/rcwa_YAG_20.0um_design_L_430.0nm_etch_depth_50.0nm_radius215.0nm_nG_51_loss_1e-06.npy" data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _ = process_data_file(file) mean_enhancement = data_matavg[wl_range,0]*epsi/normalization file = "/rcwa_YAG_20.0um_design_L_430.0nm_etch_depth_40.0nm_radius215.0nm_nG_51_loss_1e-06.npy" data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _ = process_data_file(file) mstd_enhancement = data_matavg[wl_range,0]*epsi/normalization # Uncomment line with raw data if needed plt.semilogy(wl_vec[wl_range]*1e3, gaussian_filter1d(off_pattern, 1), label = 'no etch') # Unpatterned plt.semilogy(wl_vec[wl_range]*1e3, gaussian_filter1d(mean_enhancement,100), label = 'etch, avg. (50 nm)', color = 'r') # Patterned, 50 nm etch depth # plt.semilogy(wl_vec[wl_range]*1e3, mean_enhancement, 'r', label = 'etch (raw)', alpha = 0.5) plt.semilogy(wl_vec[wl_range]*1e3, gaussian_filter1d(mstd_enhancement,100), '--', label = 'etch, avg. (40 nm)', color = 'r') # Patterned, 40 nm etch depth # plt.semilogy(wl_vec[wl_range]*1e3, mstd_enhancement, 'r--', label = 'etch (raw)', alpha = 0.5) plt.semilogy(wl_vec[wl_range]*1e3, gaussian_filter1d(pstd_enhancement,100), '-.', label = 'etch, avg. (60 nm)', color = 'r') # Patterned, 60 nm etch depth # plt.semilogy(wl_vec[wl_range]*1e3, pstd_enhancement, 'r-.', label = 'etch (raw)', alpha = 0.5) plt.fill_between(wl_vec[wl_range]*1e3, np.maximum(gaussian_filter1d(pstd_enhancement,100), gaussian_filter1d(mean_enhancement,100)), gaussian_filter1d(mstd_enhancement,100), color = 'r', alpha = 0.4) plt.ylabel('Scintillation signal (a.u.)') plt.xlabel('Wavelength (nm)') plt.legend() ###Output _____no_output_____ ###Markdown 50 microns sample (Used for Figure 5 measurement -- Enhancement plot shown in the SI)Many variables are shared with first part of notebook, so be careful when re-running cells ###Code folder_name = "../res/FIB_expt3/A2_50/final" # dose A, large area, 50 um thick (July sample) expt_enhanc = 2.31 data0 = read_single_file(glob.glob(folder_name + "/*.npy")[0]) # Takes any file for reference wl_vec_ref = data0.item().get("wl_vec") pol_vec = {"s", "p"} theta_vec = data0.item().get("theta_vec") theta_mat = data0.item().get("theta_mat") phi_mat = data0.item().get("phi_mat") rad_etch = data0.item().get("rad_etch") data_matsym_ref, data_matavg_ref, R_matsym_ref, R_matavg_ref, T_matsym_ref, T_matavg_ref, wl_vec, loss, _, _ = process_data_file(glob.glob(folder_name + "/*.npy")[0], extension_flag=False) file_list = [] for file in glob.glob(folder_name + "/*.npy"): file_name = re.split(folder_name, file)[1] file_list.append(file_name) if re.findall("radius0.0", file_name): data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _= process_data_file(file_name) scintref = np.trapz(data_matavg[:,0]*epsi, x = wl_vec*1e3) scint_yield_enhanc_list = [] loss_list = [] L_list = [] for file in file_list: data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _= process_data_file(file) scint_yield = np.trapz(data_matavg[:,0]*epsi, x = wl_vec*1e3) scint_yield_enhanc_list.append(scint_yield/scintref) print("Scintillation yield {0} = {1} (enhancement = {2})".format(file, scint_yield, scint_yield/scintref)) L_float = re.findall("depth_(\d+).(\d)", file)[0] L_list.append(float(L_float[0])+float(L_float[1])/10.) loss_list.append(loss) scint_yield_enhanc_list = np.array(scint_yield_enhanc_list) L_list = np.array(L_list) ind_best_fit = np.where(np.abs(scint_yield_enhanc_list-expt_enhanc)==np.min(np.abs(scint_yield_enhanc_list-expt_enhanc))) # Plots enhancement experiment vs. simulation plt.figure() plt.plot(L_list, scint_yield_enhanc_list, 'o', label = "simulation") plt.plot(plt.xlim(), [expt_enhanc, expt_enhanc],'--', label = "experiment") plt.ylim([1.5, np.max(scint_yield_enhanc_list)*1.2]) plt.xlabel("Height (nm)") plt.ylabel("Enhancement") plt.legend() ## For 50 microns experiment # Spectral enhancement plot from scipy.ndimage import gaussian_filter1d plt.figure(figsize = (3,6)) file = "/rcwa_YAG_50.0um_design_L_430.0nm_etch_depth_44.0nm_radius215.0nm_nG_51_loss_1e-06.npy" data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _ = process_data_file(file) epsi = 1.0 * np.exp(-np.divide(np.square(wl_vec-wl0),2.*sig**2.)) wl_range = np.arange(0,len(wl_vec)) pstd_enhancement = data_matavg[wl_range,0]*epsi normalization = np.max(gaussian_filter1d(pstd_enhancement,100)) pstd_enhancement = pstd_enhancement/normalization file = "/rcwa_YAG_50.0um_design_L_430.0nm_etch_depth_34.0nm_radius0.0nm_nG_51_loss_1e-06.npy" data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _= process_data_file(file) off_pattern = data_matavg[wl_range,0]*epsi/normalization file = "/rcwa_YAG_50.0um_design_L_430.0nm_etch_depth_34.0nm_radius215.0nm_nG_51_loss_1e-06.npy" data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _ = process_data_file(file) mean_enhancement = data_matavg[wl_range,0]*epsi/normalization file = "/rcwa_YAG_50.0um_design_L_430.0nm_etch_depth_24.0nm_radius215.0nm_nG_51_loss_1e-06.npy" data_matsym, data_matavg, R_matsym, R_matavg, T_matsym, T_matavg, wl_vec, loss, _, _ = process_data_file(file) mstd_enhancement = data_matavg[wl_range,0]*epsi/normalization # Uncomment line with raw data if needed plt.semilogy(wl_vec[wl_range]*1e3, gaussian_filter1d(off_pattern, 1), label = 'no etch') # unpatterned plt.semilogy(wl_vec[wl_range]*1e3, gaussian_filter1d(mean_enhancement,100), label = 'etch, avg. (34 nm)', color = 'r') # Patterned, 34 nm etch depth # plt.semilogy(wl_vec[wl_range]*1e3, mean_enhancement, 'r', label = 'etch (raw, 34 nm)', alpha = 0.5) plt.semilogy(wl_vec[wl_range]*1e3, gaussian_filter1d(mstd_enhancement,100), '--', label = 'etch, avg. (24 nm)', color = 'r') # Patterned, 24 nm etch depth # plt.semilogy(wl_vec[wl_range]*1e3, mstd_enhancement, 'r--', label = 'etch (raw, 24 nm)', alpha = 0.5) plt.semilogy(wl_vec[wl_range]*1e3, gaussian_filter1d(pstd_enhancement,100), '-.', label = 'etch, avg. (44 nm)', color = 'r') # Patterned, 44 nm etch depth # plt.semilogy(wl_vec[wl_range]*1e3, pstd_enhancement, 'r-.', label = 'etch (raw, 44 nm)', alpha = 0.5) plt.fill_between(wl_vec[wl_range]*1e3, np.maximum(gaussian_filter1d(pstd_enhancement,100), gaussian_filter1d(mean_enhancement,100)), gaussian_filter1d(mstd_enhancement,100), color = 'r', alpha = 0.4) plt.ylabel('Scintillation signal (a.u.)') plt.xlabel('Wavelength (nm)') # plt.legend() ###Output _____no_output_____
notebooks/papers/targeted-extraction/false discovery for YHE010.ipynb
###Markdown TFD/E prepare the TFD/E results ###Code # load the results EXPERIMENT_DIR = '/media/data-4t-a/results-P3856_YHE010/2022-03-28-17-55-06' RESULTS_DB_NAME = '{}/summarised-results/results.sqlite'.format(EXPERIMENT_DIR) db_conn = sqlite3.connect(RESULTS_DB_NAME) tfde_results_df = pd.read_sql_query("select * from sequences", db_conn) db_conn.close() # convert the identifications from JSON to Python objects tfde_results_df['identifications_d'] = tfde_results_df.apply(lambda row: json.loads(row.identifications), axis=1) def classify_protein(protein): result = 'UNKNOWN' if 'HUMAN' in protein.upper(): result = 'Human' elif 'YEAST' in protein.upper(): result = 'Yeast' elif 'ECOLI' in protein.upper(): result = 'E. coli' return result # separate some key metrics into separate columns tfde_results_df['id_perc_q_value'] = tfde_results_df.apply(lambda row: row.identifications_d['perc_q_value'], axis=1) tfde_results_df['id_count_all_runs'] = tfde_results_df.apply(lambda row: len(row.identifications_d['run_names']), axis=1) tfde_results_df['id_number_of_proteins'] = tfde_results_df.apply(lambda row: row.identifications_d['number_of_proteins'], axis=1) tfde_results_df['id_protein'] = tfde_results_df.apply(lambda row: row.identifications_d['proteins'][0], axis=1) tfde_results_df['id_species'] = tfde_results_df.apply(lambda row: classify_protein(row.id_protein), axis=1) tfde_results_df.id_species.unique() # remove the results that couldn't be extracted or were not classified as a target tfde_results_df = tfde_results_df[tfde_results_df.extractions.notnull()].copy() # convert from JSON to Python objects tfde_results_df['extractions_l'] = tfde_results_df.apply(lambda row: json.loads(row.extractions), axis=1) # separate some key metrics into separate columns tfde_results_df['ext_count_all_runs'] = tfde_results_df.apply(lambda row: len(row.extractions_l), axis=1) tfde_results_df.sample(n=3) ###Output _____no_output_____ ###Markdown false identification rate ###Code number_of_unique_peptides = len(tfde_results_df) number_of_unique_peptides number_of_unique_nonhuman_peptides = len(tfde_results_df[(tfde_results_df.id_species != "Human")]) number_of_unique_nonhuman_peptides FDR_detection = number_of_unique_nonhuman_peptides/number_of_unique_peptides*100 FDR_detection ###Output _____no_output_____ ###Markdown false transfer rate ###Code tfde_results_df['id_run_names'] = tfde_results_df.apply(lambda row: row.identifications_d['run_names'], axis=1) tfde_results_df['ext_run_names'] = tfde_results_df.apply(lambda row: [e['run_name'] for e in row.extractions_l], axis=1) tfde_results_df['transfer_count'] = tfde_results_df.apply(lambda row: len(set(row.ext_run_names)-set(row.id_run_names)), axis=1) tfde_results_df['possible_transfer_count'] = tfde_results_df.apply(lambda row: number_of_runs-len(set(row.id_run_names)), axis=1) total_transfer_count_tfde = tfde_results_df.transfer_count.sum() total_transfer_count_tfde false_transfer_count_tfde = tfde_results_df[(tfde_results_df.id_species != "Human")].transfer_count.sum() false_transfer_count_tfde # false transfer rate as a proportion of all transfers false_transfer_rate_tfde = false_transfer_count_tfde / total_transfer_count_tfde * 100.0 false_transfer_rate_tfde tfde_nonhuman_results_df = tfde_results_df[(tfde_results_df.id_species != "Human")] # false transfer rate as a proportion of all possible false transfers tfde_nonhuman_results_df.transfer_count.sum() / tfde_nonhuman_results_df.possible_transfer_count.sum() * 100.0 f, ax1 = plt.subplots() f.set_figheight(8) f.set_figwidth(8) plt.margins(0.06) counts = np.bincount(tfde_results_df.id_count_all_runs) ax1.bar(range(number_of_runs+1), counts, width=0.8, align='center') plt.xlabel('number of files in which a modified sequence-charge was identified') plt.ylabel('frequency') ax1.set(xticks=range(1,number_of_runs+1), xlim=[0, number_of_runs+1]) # plt.ylim((0,18000)) plt.show() f, ax1 = plt.subplots() f.set_figheight(8) f.set_figwidth(8) plt.margins(0.06) counts = np.bincount(tfde_nonhuman_results_df.id_count_all_runs) ax1.bar(range(number_of_runs+1), counts, width=0.8, align='center') plt.xlabel('number of files in which a non-human modified sequence-charge was identified') plt.ylabel('frequency') ax1.set(xticks=range(1,number_of_runs+1), xlim=[0, number_of_runs+1]) # plt.ylim((0,18000)) plt.show() f, ax1 = plt.subplots() f.set_figheight(8) f.set_figwidth(8) plt.margins(0.06) counts = np.bincount(tfde_nonhuman_results_df.ext_count_all_runs) ax1.bar(range(number_of_runs+1), counts, width=0.8, align='center') plt.xlabel('number of files in which a non-human modified sequence-charge was extracted') plt.ylabel('frequency') ax1.set(xticks=range(1,number_of_runs+1), xlim=[0, number_of_runs+1]) # plt.ylim((0,18000)) plt.show() ###Output _____no_output_____ ###Markdown MaxQuant prepare the MaxQuant results (MaxQuant was executed with MBR on) ###Code MQ_RESULTS_DIR = '{}'.format(expanduser('~')) mq_results_df = pd.read_csv('{}/MQ-analysis-of-P3856/combined-P3856_YHE010/txt/evidence.txt'.format(MQ_RESULTS_DIR), sep='\\t', engine='python') # remove decoys, which are indicated by a '+' in the Reverse column mq_results_df = mq_results_df[pd.isna(mq_results_df.Reverse)] # remove identifications with no intensity mq_results_df = mq_results_df[(mq_results_df.Intensity > 0)] # remove potential contaminants mq_results_df = mq_results_df[pd.isna(mq_results_df['Potential contaminant'])] # remove identifications with mass error more than +/- 5 ppm mq_results_df = mq_results_df[np.abs(mq_results_df['Mass error [ppm]']) <= 5.0] # add RT as seconds mq_results_df['retention_time_secs'] = mq_results_df['Retention time']*60.0 mq_results_df.Type.unique() # definition of uniqueness in MaxQuant output with MBR on unique_peptide_key = ['Sequence','Modifications','Charge'] mq_results_df.sample(n=5) mq_results_df[mq_results_df.Type=='TIMS-MULTI-MSMS'].sample(n=5) mq_results_df['id_species'] = mq_results_df.apply(lambda row: classify_protein(row.Proteins), axis=1) # count the number of runs each unique peptide was identified and extracted mq_counts_l = [] for group_name,group_df in mq_results_df.groupby(unique_peptide_key, as_index=False): identifications_l = group_df[group_df.Type=='TIMS-MULTI-MSMS']['Raw file'].unique() extractions_l = group_df[group_df.Type=='TIMS-MULTI-MATCH']['Raw file'].unique() transfer_count = len(set(extractions_l) - set(identifications_l)) possible_transfer_count = number_of_runs - len(set(identifications_l)) species = classify_protein(' '.join(group_df.Proteins.tolist())) mq_counts_l.append({'peptide_key':group_name, 'species':species, 'transfer_count':transfer_count, 'possible_transfer_count':possible_transfer_count, 'id_count_all_runs':len(identifications_l), 'id_runs':identifications_l, 'ext_count_all_runs':len(extractions_l), 'ext_runs':extractions_l}) mq_counts_df = pd.DataFrame(mq_counts_l) mq_counts_df.sample(n=5) # remove all failed extractions as we did for TFD/E mq_counts_df = mq_counts_df[(mq_counts_df.ext_count_all_runs>0)] ###Output _____no_output_____ ###Markdown false identification rate ###Code number_of_unique_peptides_mq = len(mq_counts_df) number_of_unique_peptides_mq number_of_unique_nonhuman_peptides_mq = len(mq_counts_df[(mq_counts_df.species != "Human")]) number_of_unique_nonhuman_peptides_mq FDR_detection_mq = number_of_unique_nonhuman_peptides_mq/number_of_unique_peptides_mq*100 FDR_detection_mq ###Output _____no_output_____ ###Markdown false transfer rate ###Code total_transfer_count_mq = mq_counts_df.transfer_count.sum() total_transfer_count_mq false_transfer_count_mq = mq_counts_df[(mq_counts_df.species != "Human")].transfer_count.sum() false_transfer_count_mq # false transfer rate as a proportion of all transfers false_transfer_rate = false_transfer_count_mq / total_transfer_count_mq * 100.0 false_transfer_rate mq_nonhuman_counts_df = mq_counts_df[(mq_counts_df.species != "Human")] # false transfer rate as a proportion of all possible false transfers mq_nonhuman_counts_df.transfer_count.sum() / mq_nonhuman_counts_df.possible_transfer_count.sum() * 100.0 f, ax1 = plt.subplots() f.set_figheight(8) f.set_figwidth(8) plt.margins(0.06) counts = np.bincount(mq_nonhuman_counts_df.id_count_all_runs) ax1.bar(range(number_of_runs+1), counts, width=0.8, align='center') plt.xlabel('number of files in which a non-human modified sequence-charge was identified') plt.ylabel('frequency') ax1.set(xticks=range(1,number_of_runs+1), xlim=[0, number_of_runs+1]) # plt.ylim((0,18000)) plt.show() f, ax1 = plt.subplots() f.set_figheight(8) f.set_figwidth(8) plt.margins(0.06) counts = np.bincount(mq_nonhuman_counts_df.ext_count_all_runs) ax1.bar(range(number_of_runs+1), counts, width=0.8, align='center') plt.xlabel('number of files in which a non-human modified sequence-charge was extracted') plt.ylabel('frequency') ax1.set(xticks=range(0,number_of_runs+1), xlim=[-1, number_of_runs+1]) # plt.ylim((0,18000)) plt.show() ###Output _____no_output_____ ###Markdown Fragger ###Code FRAGGER_RESULTS_DIR = '{}'.format(expanduser('~')) fragger_results_df = pd.read_csv('{}/MSFragger-analysis-P3856-YHE010/MSstats.csv'.format(FRAGGER_RESULTS_DIR), sep=',') fragger_results_df = fragger_results_df[fragger_results_df.Intensity.notnull()] fragger_results_df.sample(n=5) fragger_results_df['id_species'] = fragger_results_df.apply(lambda row: classify_protein(row.ProteinName), axis=1) unique_peptide_key_fragger = ['PeptideSequence', 'PrecursorCharge'] # count the number of runs each unique peptide was identified and extracted fragger_counts_l = [] for group_name,group_df in fragger_results_df.groupby(unique_peptide_key_fragger, as_index=False): extractions_l = group_df['Run'].unique() species = classify_protein(' '.join(group_df.ProteinName.tolist())) fragger_counts_l.append({'peptide_key':group_name, 'species':species, 'ext_count_all_runs':len(extractions_l)}) fragger_counts_df = pd.DataFrame(fragger_counts_l) fragger_counts_df.sample(n=5) # remove all failed extractions as we did for TFD/E fragger_counts_df = fragger_counts_df[(fragger_counts_df.ext_count_all_runs>0)] ###Output _____no_output_____ ###Markdown false identification rate ###Code number_of_unique_peptides_fragger = len(fragger_counts_df) number_of_unique_peptides_fragger number_of_unique_nonhuman_peptides_fragger = len(fragger_counts_df[(fragger_counts_df.species != "Human")]) number_of_unique_nonhuman_peptides_fragger FDR_detection_fragger = number_of_unique_nonhuman_peptides_fragger/number_of_unique_peptides_fragger*100 FDR_detection_fragger ###Output _____no_output_____
notebooks/table_reduction.ipynb
###Markdown The table reduction morphismIn this notebook we assume familiarity with both, the Barratt-Eccles $\mathcal E$ and surjection $\mathcal X$ operads.**Contents**1. [Definition](definition)2. [References](references) Definition The table reduction morphisms $TR : \mathcal E \to \mathcal X$ from the Barratt-Eccles to the surjection operad is a surjective weak equivalence of operads introduced in [BF]. For a basis Barratt-Eccles element $(\sigma_0, \dots, \sigma_n) \in \mathcal E(r)_n$ we have that\begin{equation*}TR(\sigma_0, \dots, \sigma_n) = \sum_{a} s_{a}\end{equation*}is a sum of surjections\begin{equation*}s_{a} : \{1, \dots, n+r \} \to \{1, \dots, r\}\end{equation*}parametrized by all tuples of positive integers $a = (a_0, \dots, a_n)$ with $a_0 + \cdots + a_n = n + r$. For one such tuple $a$ we now describe the surjection $s_a$. Define recursively\begin{equation*}A_{-1} = 0 \qquad A_i = A_{i-1} + a_{i.}\end{equation*}For $k \in \{1, \dots, n+r\}$ we identify $i \in \{1, \dots, n\}$ such that $A_{i-1} < k \leq A_{i}$ and define $s_a(k)$ to be the $(k - A_{i-1})$-th element in $(\sigma_i(1), \dots, \sigma_i(r))$ not in\begin{equation*}\big\{ s_a(j) \ | \ j < k \text{ and } j \neq A_0, \dots , A_{i-1} \big\}.\end{equation*}As proven in [BF] this operad map preserves the $E_n$-filtration.The class `BarrattEcclesElement` is equipped with the method `table_reduction` modeling this morphism. The result `SurjectionElement` has the same `torsion` attribute with the Berger-Fresse sign convention. ###Code from comch import BarrattEcclesElement b = BarrattEcclesElement({((1,2,3),(1,3,2)):1}) print(f'If b = {b} then TR(b) = {b.table_reduction()}') b = BarrattEcclesElement({((1,2,3,4), (1,4,3,2)): 1, ((1,2,4,3), (3,4,2,1)): 2}) dtr_b = b.table_reduction().boundary() trd_b = b.boundary().table_reduction() print(f'Is it a chain map: {dtr_b == trd_b}') from comch import SymmetricRingElement sym = SymmetricRingElement({(1,3,2,4): 1, (2,1,3,4): -1}) sym_tr_b = sym * (b.table_reduction()) tr_sym_b = (sym * b).table_reduction() print(f'Is it equivariant: {sym_tr_b == tr_sym_b}') a = BarrattEcclesElement({((1,2,3), (1,3,2)): 1, ((2,1,3), (1,2,3)): 2}) tr_ab = a.compose(b, 2).table_reduction() tra_trb = a.table_reduction().compose(b.table_reduction(), 2) print(f'Is it composition preserving: {tr_ab == tra_trb}') ###Output _____no_output_____
Case 3 - medical text categorization.ipynb
###Markdown Case 3 - Medical text categorizationTom ThielMetropolias UAS12.3.2018 1. ObjectivesObjective of this assignment is build a document classifier for 20 000 cardiovascular disease abstracts. There are total of 23 categories which the abstracts may belong.To build a Natural Language Classifier in Keras you need to first preprocess the data by tokenizing the words. For that we have to create a vocabulary. 2. Load data and find the labels ###Code #Download the training data and the labels import os case3_dir = '../ohsumed-first-20000-docs/' train_dir = os.path.join(case3_dir, 'training') labels = [] texts = [] # to see how many abstracts there are in each directory label_list = [label for label in os.listdir(train_dir)] for label_type in label_list: dir_name = os.path.join(train_dir, label_type) print(label_type, len(os.listdir(dir_name))) for fname in os.listdir(dir_name): f = open(os.path.join(dir_name, fname)) texts.append(f.read()) f.close() labels.append(label_list.index(label_type)) ###Output C01 423 C02 158 C03 65 C04 1163 C05 283 C06 588 C07 100 C08 473 C09 125 C10 621 C11 162 C12 491 C13 281 C14 1249 C15 215 C16 200 C17 295 C18 388 C19 191 C20 525 C21 546 C22 92 C23 1799 ###Markdown 3. Shape and preprocess data ###Code #Preprocess integer labels into one-hot tensor labels from keras.utils.np_utils import to_categorical import numpy as np labels = np.asarray(labels) labels = to_categorical(labels) print('Shape of labels tensor:', labels.shape) ###Output Shape of labels tensor: (10433, 23) ###Markdown Setting the vocabulary of the documents to 20 000 will cut off text any longer and for shorter abstracts the zeros will be filled to rest of the vector. ###Code #Preprocess word data into integer tensors #Each sample is zero-padded to length "maxlen" #Only "max_words" most frequent words are taken into account from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences maxlen = 300 # common length of all the samples max_words = 20000 # number of different word indices tokenizer = Tokenizer(num_words = max_words) tokenizer.fit_on_texts(texts) sequences = tokenizer.texts_to_sequences(texts) word_index = tokenizer.word_index print('Found %s unique tokens' % len(word_index)) # add padding, so that every letter gets vectorized data = pad_sequences(sequences, maxlen = maxlen) print('Shape of data tensor:', data.shape) ###Output Found 30857 unique tokens Shape of data tensor: (10433, 300) ###Markdown Training data must be suffled for validation split. Keras validation split picks out samples from the bottom of the data and in this case they would all be from category nro 23 which has 1799 samples. https://github.com/keras-team/keras/issues/597Let's test how the labels are suffled by printing abstract nro 7 ###Code samples = np.arange(data.shape[0]) print((texts[samples[6]])) #Shuffle the training data np.random.shuffle(samples) data = data[samples] labels = labels[samples] #print abstract nro 7 again and see if it's the same print(texts[samples[6]]) ###Output Death at cardiac catheterization: coronary artery embolization of calcium debris from Ionescu-Shiley bioprosthesis. The case described is a death due to embolization of calcium debris from a bioprosthesis, dislodged at cardiac catheterization. As more bioprosthetic valves are implanted, and more of them fail long-term with calcification, such complications of the invasive study of these valves may be expected. ###Markdown The suffling was successful. 5. ModelingTime to use embedding to get rid of one-hot vectors and use more sophisticated way for presenting the words in matrixes. There will be only one representation for each word during and after training.Texts to matrix -> default way in keras. It loses the order of words so it is not suitable for machine translation. Neural network based models like vector inputs. We, therefore, need to convert the integers into vectors. Embedding turns positive integers (indexes) into dense vectors of fixed size. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]] Embedding layer can only be used as the first layer in a model. It can also be used alone to learn a word embedding that can be saved and used in another model later.Embedding layer must specify 3 arguments:input_dimoutput_diminput_lengthUsing LSTM (or any other type of RNN with time sequence) you don't need flattening layers before fully connected dense layer as the output shape is 100x1.The key to LSTMs is the cell state, a line running through the states (sequences). It can store values from state to state. ###Code #Build and compile the network model from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM model = Sequential() #create 300x32 matrix model.add(Embedding(max_words, 32, input_shape=(maxlen,))) model.add(LSTM(100)) #matrix size 100x1 model.add(Dense(23, activation = 'softmax')) model.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy', metrics = ['accuracy']) model.summary() ###Output _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_4 (Embedding) (None, 300, 32) 640000 _________________________________________________________________ lstm_4 (LSTM) (None, 100) 53200 _________________________________________________________________ dense_4 (Dense) (None, 23) 2323 ================================================================= Total params: 695,523 Trainable params: 695,523 Non-trainable params: 0 _________________________________________________________________ ###Markdown 5. Training ###Code # Fit the model to training data import time t1 = time.time() # takes 5 % and uses it as validation data history = model.fit(data, labels, epochs = 12, batch_size = 25, validation_split = 0.05) t2 = time.time() print('Elapsed time: {:.2f} seconds'.format((t2-t1))) # Plot the results for model 1 import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(figsize=(15, 3)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.grid() plt.xlabel('Epoch') plt.legend() plt.figure(figsize=(15, 3)) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.grid() plt.xlabel('Epoch') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Acc > 0.5Val_acc < 0.1 The model is definately overfitting. Let's try with other hyperparameters: batch_size 20 -> 1010 epocs is enoughup the validations split 0.05 -> 0.2 ###Code t1 = time.time() # takes 20% and uses it as validation data history = model.fit(data, labels, epochs = 10, batch_size = 10, validation_split = 0.2) t2 = time.time() print('Elapsed time: {:.2f} seconds'.format((t2-t1))) # Plot the results for model 2 import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(figsize=(15, 3)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.grid() plt.xlabel('Epoch') plt.legend() plt.figure(figsize=(15, 3)) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.grid() plt.xlabel('Epoch') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown With lower batch size the training took 40 % longer (1700 vs 1200sec).This time the validation accuracy stayed around 0.2. The model is still overfit, but I think is because of the size of training data and lack of stop word removal in preprocessing. 6. Evaluating with test data ###Code #Download and preprocess test data test_dir = os.path.join(case3_dir, 'test') test_labels = [] test_texts = [] for label_type in label_list: dir_name = os.path.join(test_dir, label_type) for fname in os.listdir(dir_name): f = open(os.path.join(dir_name, fname)) test_texts.append(f.read()) f.close() test_labels.append(label_list.index(label_type)) test_sequences = tokenizer.texts_to_sequences(test_texts) x_test = pad_sequences(test_sequences, maxlen=maxlen) test_labels = np.asarray(test_labels) y_test = to_categorical(test_labels) # Evaluate model with test data # http://scikit-learn.org/stable/modules/model_evaluation.html model.evaluate(x_test, y_test) ###Output 12733/12733 [==============================] - 31s 2ms/step ###Markdown 7. ResultsModel 2 achieved 8 % accuracy with test set. That is almost as bad as randomly guessing (1/23 = 0.0434) the classes. 8. Testing the Pre-Trained GloVe Embedding As a final attempt I downloaded the glove6b pretrained model. I wanted to see if it could be used on this particular problem. At first I ran into decoding issue, which could be fixed with forcing the char-encoding to utf8. ###Code from numpy import asarray from numpy import zeros from keras.layers import Flatten t1 = time.time() vocab_size = len(tokenizer.word_index) + 1 # load the whole embedding into memory embeddings_index = dict() f = open('glove.6B.100d.txt',encoding='utf8') for line in f: values = line.split() word = values[0] coefs = asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() print('Loaded %s word vectors.' % len(embeddings_index)) # create a weight matrix for words in training docs embedding_matrix = zeros((vocab_size, 100)) for word, i in tokenizer.word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector # define model model = Sequential() e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=4, trainable=False) model.add(e) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) # compile the model with adam optimizer model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) # summarize the model print(model.summary()) t2 = time.time() print('Elapsed time: {:.2f} seconds'.format((t2-t1))) ###Output Loaded 400000 word vectors. _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_7 (Embedding) (None, 4, 100) 3085800 _________________________________________________________________ flatten_2 (Flatten) (None, 400) 0 _________________________________________________________________ dense_6 (Dense) (None, 1) 401 ================================================================= Total params: 3,086,201 Trainable params: 401 Non-trainable params: 3,085,800 _________________________________________________________________ None Elapsed time: 19.71 seconds ###Markdown Got the model loaded after few tweaks. But then got stuck trying to fit the data in for training. ###Code # fit the model model.fit(data, labels, epochs=10, batch_size = 10) # evaluate the model loss, accuracy = model.evaluate(data, labels, verbose=1) print('Accuracy: %f' % (accuracy*100)) ###Output _____no_output_____
1-conociendo-r/conociendo_r.ipynb
###Markdown Conociendo R Este curso esta diseñado para que de manera introductoria quien lo lea entienda R y todo su potencial.R es un lenguaje de programación y ambiente de desarrollo para estadísticas y gráficos.La manera en como funciona es a través del **desarrollo colaborativo**, por lo cual se maneja mucho en "paquetes" o **librerías**.Cada librería es desarrollada por conocedores del tema o personas interesadas en desarrollar herramientas para uso personal y público, que deciden compartir sus avances.A raíz de esto, R suele ser complicado de entender ya que no todo está disponible a simple vista o estandarizado como en otro software (SPSS, STATA, Minitab, etc.).Adicionalmente, R necesita que el usuario sepa de **manejo de datos** o tenga una noción de ello, ya que hace mucho uso (y es una de sus fortalezas) de data frames y listas. Existen diversas maneras de conseguir R, una de ellas es a través del portal de CRAN:https://www.r-project.org/Pero se han hecho esfuerzos por hacer más amigable al usuario la plataforma que ofrece R, por lo cual tambíen se cuenta con RStudio:https://www.rstudio.com/products/rstudio/download/downloadEl cual cuenta con diversas versiónes de las cuales solo una es gratuita.Para este curso, si bien vamos a usar R, se recomienda instalar Anaconda, que es un paquete que incluye diversos software y lenguajes de programación como Python, Jupyter (donde esta hecho este documento), RStudio, entre otros.https://www.anaconda.com/download/Anaconda fue específicamente diseñado para lo que se conoce como **data science** o ciencias de datos, a lo cual le daremos énfasis en este curso. Instalación 1. Bajar AnacondaSe baja como cualquier otro software.2. Instalar Anacondase instala como cualquier otro software siguiendo el tutorial.3. Instalar RStudioAnaconda dentro de su plataforma ofrece la posiblidad de actualizar los paquetes que se usan tanto en R como en Python, así como agregar otras soluciones como RStudio.![Pantalla Inicial de Anaconda Navigator](anaconda-inicio.png)Vienen diferentes Apps de inicio como **Spyder** o **Jupyter**, pero es necesario instalar **RStudio**, lo cual haremos.![Software instalado y no instalado](anaconda-apps.png) R y RStudio RStudio es un *Integrated Development Enviromnet* o IDE. Este tipo de software ayuda a que los desarrolladores tengan más a la mano información sobre lo que hacen al ofrecer diversas herramientas en el mismo entorno.![](rstudio-general2.jpg)La parte principal es Console y donde se desarrollan Scripts. Los scripts son archivos con instrucciones que se corren y son interpretados por la maquina, en este caso a través de R y su lenguaje de programación.Los Scripts nos ayudan a organizar algoritmos, archivos, programas, clases, entre otros.![](rstudio-script2.jpg)La consola es la parte central de R y de cualquier otro lenguaje de programación, se pueden mandar instruccines linea por linea, lo cual lo hace más complicado de seguir a lo cual la solución son los scripts.![](rstudio-console2.jpg)Para darle solución al manejo de scripts, variables, funciones, clases y data frames, existe la ventana de Environment:![](rstudio-environment.png)En History se tiene un registro de todo los comandos que ha corrido console, así de todos los mensajes de error y advertencias:![](rstudio-history2.jpg)RStudio ofrece la posiblidad de trabajar en base a proyectos, los cuales implican que se manejen diferentes carpetas, archivos, scripts, etc. Para poder administrarlos de manera más eficiente existe la ventana de Files:![](rstudio-files.png)También dentro de la misma ventana, por default, vienen las pestañas de Plot, Packages, Help y Viewer, los cuales veremos a lo largo del curso. Datos Una de las fortalezas de R es que puede manejar datos de diversas maneras y manipularlos de acuerdo a como los interpreta, así como a organizarlos según su naturaleza. Vectores Los vectores son la unidad más básica de datos, ya sea que sean de $1 x N$ o $N x 1$ o escalar de $1 x 1$, R reconoce los vectores según lo que contengan, ya sea números enteros $N$, números de punto flotante, decimales, caractéres o valores booleanos (TRUE o FALSE). ###Code a <- c(1,2,3,4,5) b <- c("Ana","Daniel","Victor", "Ivan") c <- c(TRUE, FALSE, TRUE) a b c ###Output _____no_output_____ ###Markdown Matrices Las matrices son vectores de tamaño $M x N$, a diferencia del dataframe, todos los valores dentro de una matríz deben de ser de la misma naturaleza (numero, carácter, booleano, etc). ###Code m1 <- matrix(1:20, nrow = 4, ncol = 5) m1 ###Output _____no_output_____ ###Markdown Arrays Los arrays son similares a las matrices, solo que contienen más dimensiones, como los cubos de $N x M x P$ Data Frames Los data frames son las más usadas dentro de R, ya que ayudan a organizar los datos de diversas maneras, con diferentes valores, así como aplicarles operaciones en conjunto. ###Code a <- c(1,2,3,4) b <- c("Ana", "Daniel","Victor","Iván") c <- c("Indicadores Sociodemográficos","Indicadores Económicos","Programación","Investigación Estratégica") df <- data.frame(a,b,c) names(df) <- c("id","Nombre","Puesto") df ###Output _____no_output_____ ###Markdown Listas o Lists Las listas son un conjunto ordenado o no ordenado de *cosas*, sin importar que sean o cual sea su naturaleza. ###Code lista <- c(a, df, m1) lista ###Output ERROR while rich displaying an object: Error in FUN(X[[i]], ...): attempt to use zero-length variable name Traceback: 1. FUN(X[[i]], ...) 2. tryCatch(withCallingHandlers({ . rpr <- mime2repr[[mime]](obj) . if (is.null(rpr)) . return(NULL) . prepare_content(is.raw(rpr), rpr) . }, error = error_handler), error = outer_handler) 3. tryCatchList(expr, classes, parentenv, handlers) 4. tryCatchOne(expr, names, parentenv, handlers[[1L]]) 5. doTryCatch(return(expr), name, parentenv, handler) 6. withCallingHandlers({ . rpr <- mime2repr[[mime]](obj) . if (is.null(rpr)) . return(NULL) . prepare_content(is.raw(rpr), rpr) . }, error = error_handler) 7. mime2repr[[mime]](obj) 8. repr_html.list(obj) 9. repr_list_generic(obj, "html", "\t<li>%s</li>\n", "\t<dt>$%s</dt>\n\t\t<dd>%s</dd>\n", . "<strong>$%s</strong> = %s", "<ol>\n%s</ol>\n", "<dl>\n%s</dl>\n", . numeric_item = "\t<dt>[[%s]]</dt>\n\t\t<dd>%s</dd>\n", escape_fun = html_escape) 10. sapply(nms, as.name, USE.NAMES = FALSE) 11. lapply(X = X, FUN = FUN, ...) 12. FUN(X[[i]], ...) ERROR while rich displaying an object: Error in FUN(X[[i]], ...): attempt to use zero-length variable name Traceback: 1. FUN(X[[i]], ...) 2. tryCatch(withCallingHandlers({ . rpr <- mime2repr[[mime]](obj) . if (is.null(rpr)) . return(NULL) . prepare_content(is.raw(rpr), rpr) . }, error = error_handler), error = outer_handler) 3. tryCatchList(expr, classes, parentenv, handlers) 4. tryCatchOne(expr, names, parentenv, handlers[[1L]]) 5. doTryCatch(return(expr), name, parentenv, handler) 6. withCallingHandlers({ . rpr <- mime2repr[[mime]](obj) . if (is.null(rpr)) . return(NULL) . prepare_content(is.raw(rpr), rpr) . }, error = error_handler) 7. mime2repr[[mime]](obj) 8. repr_markdown.list(obj) 9. repr_list_generic(obj, "markdown", "%s. %s\n", "$%s\n: %s\n", . "**$%s** = %s", "%s\n\n", numeric_item = "[[%s]]\n: %s\n", . item_uses_numbers = TRUE, escape_fun = html_escape) 10. sapply(nms, as.name, USE.NAMES = FALSE) 11. lapply(X = X, FUN = FUN, ...) 12. FUN(X[[i]], ...) ERROR while rich displaying an object: Error in FUN(X[[i]], ...): attempt to use zero-length variable name Traceback: 1. FUN(X[[i]], ...) 2. tryCatch(withCallingHandlers({ . rpr <- mime2repr[[mime]](obj) . if (is.null(rpr)) . return(NULL) . prepare_content(is.raw(rpr), rpr) . }, error = error_handler), error = outer_handler) 3. tryCatchList(expr, classes, parentenv, handlers) 4. tryCatchOne(expr, names, parentenv, handlers[[1L]]) 5. doTryCatch(return(expr), name, parentenv, handler) 6. withCallingHandlers({ . rpr <- mime2repr[[mime]](obj) . if (is.null(rpr)) . return(NULL) . prepare_content(is.raw(rpr), rpr) . }, error = error_handler) 7. mime2repr[[mime]](obj) 8. repr_latex.list(obj) 9. repr_list_generic(obj, "latex", "\\item %s\n", "\\item[\\$%s] %s\n", . "\\textbf{\\$%s} = %s", enum_wrap = "\\begin{enumerate}\n%s\\end{enumerate}\n", . named_wrap = "\\begin{description}\n%s\\end{description}\n", . numeric_item = "\\item[{[[%s]]}] %s\n", escape_fun = latex_escape) 10. sapply(nms, as.name, USE.NAMES = FALSE) 11. lapply(X = X, FUN = FUN, ...) 12. FUN(X[[i]], ...) ###Markdown Factores Los factores, más que un tipo de dato, es una forma de organizarlos de tal manera que las variables nominales sean interpretadas por R como números o factores.Por ejemplo, al usar género como carácter, el usuarío tendría que considerarlas como tal. ###Code gen <- c(rep("hombre",20), rep("mujer", 30)) gen gen.factor <- factor(gen) gen.factor summary(gen) summary(gen.factor) ###Output _____no_output_____ ###Markdown Empezando R Una de las mayores ventajas de R es su capacidad para manejar datos a gran escala y su facilidad para crear gráficas a partir de ellos. Pero esto viene a un costo para el usuario, haciendolo complejo de aprender por su manera de hacer las cosas en base a comandos (ya que es un lenguaje de programación basado en C/C++) y al manejo de las diferentes partes en archivos o scripts.Para facilitar esto se han creado herramientas como RStudio.RStudio no hace nada diferente a R, su ventaja esta en que es más visual y ayuda al usuario a saber donde estan las cosas y en que se quedó. Esto lo logra a través del uso de *imagenes* que se van guardando segun lo prefiera el usuario.Otra ayuda que ofrece RStudio es el manejo de directorios de manera más visual.Para saber en que directorio se trabaja se usa el comando $getwd()$ o *get working directory* ###Code getwd() ###Output _____no_output_____ ###Markdown Para establecer otro directorio se usa el comando $setwd()$ ###Code setwd("~/GitHub/r-econometria/1-conociendo-r") getwd() ###Output _____no_output_____ ###Markdown En este caso estamos trabajando en **jupyter** y **RStudio**, **jupyter** es una interface que nos ayuda a realizar documentos listos para su uso web, como ahora, e integra los comandos de **R** y **Python**. Cada archivo de **jupyter** es un cuaderno, pero los comandos funcionan de manera similar en R y RStudio.Para empezar a trabajar haremos un tabla y una gráfica, nos apoyaremos de la documentación que ofrecen los paquetes o librerías.Otra de las ventajas de R y Python es este manejo de librerias creadas por usuarios expertos en el tema. Lo cual hace posible que no sea necesario crear otro script o desarrollar algún paquete y se pueden integrar de otras existentes. Dichas librerias son consultadas y llevan un proceso de publicación para estar disponibles en el CRAN.Para hacer el primer ejercicio usaremos un archivo *roboscasa.csv*, el cual guardaremos en un directorio para trabajar.Primero hay que importar el archivo a un dataframe, R ofrece diversas posibilidades para importar datos con el comando *read.xxxx* donde xxxx especifica el tipo de archivo que se busca importar, en este caso es un csv pero tambíen existe la posiblidad de importar datos directo de una BD, clipboard/portapapeles o excel, entre otros.Para facilitar la manipulación de datos y el trabajo entre excel y R, RStudio ofrece ayuda para poder importar datos directamente desde excel con una función dentro del panel de environment.![Import Data](rstudio-importdata.png)Este wizard o ayuda nos sirve para poder especificar el tipo de datos, si usa fechas o si contiene NA o valores nulos.![Import Data Ventana](rstudio-importdata2.png)O tambíen se puede hacer mediante código, a lo cual existen muchas maneras (funciones). ###Code library(readxl) roboscasa <- read_excel("roboscasa.xlsx", col_types = c("date", # Fecha "numeric", # Torreón "numeric", # Gomez "numeric", # Lerdo "numeric")) # Matamoros ###Output _____no_output_____ ###Markdown Una vez cargados los datos, podemos hacer uso de ellos mediante las diversas funciones o librerias.Para installar una librería se usa el comando $install.packages()$, para esta caso usaremos varias como plotly, ggplot2, reshape, etc...RStudio nos ofrece un directorio de paquetes disponibles mediante el CRAN, así como una ventana para ayuda de cada paquete. ###Code install.packages(c("plotly","reshape")) #Es necesario usar estos comandos en RStudio. ###Output also installing the dependency 'crosstalk' ###Markdown Cuando ya se cuentan con las librerias instaladas, a lo cual también nos sirve Anaconda, para hacer uso de ella dentro de un script usamos la funcion $library$ ###Code library(gdata) library(ggplot2) library(plyr) library(reshape2) library(readxl) library(plotly) ###Output gdata: Unable to locate valid perl interpreter gdata: gdata: read.xls() will be unable to read Excel XLS and XLSX files gdata: unless the 'perl=' argument is used to specify the location of a gdata: valid perl intrpreter. gdata: gdata: (To avoid display of this message in the future, please ensure gdata: perl is installed and available on the executable search path.) gdata: Unable to load perl libaries needed by read.xls() gdata: to support 'XLX' (Excel 97-2004) files. gdata: Unable to load perl libaries needed by read.xls() gdata: to support 'XLSX' (Excel 2007+) files. gdata: Run the function 'installXLSXsupport()' gdata: to automatically download and install the perl gdata: libaries needed to support Excel XLS and XLSX formats. Attaching package: 'gdata' The following object is masked from 'package:stats': nobs The following object is masked from 'package:utils': object.size The following object is masked from 'package:base': startsWith Attaching package: 'plotly' The following objects are masked from 'package:plyr': arrange, mutate, rename, summarise The following object is masked from 'package:ggplot2': last_plot The following object is masked from 'package:stats': filter The following object is masked from 'package:graphics': layout ###Markdown Una vez que ya tenemos datos y librerias, podemos hacer diversas funciones comochecar el contenido de nuestras variables o dataframes.$head()$ nos permite ver las primeras lineas de las tablas que le especificamos$summary()$ nos da un resumen (media, cuartiles, promedio, cantidad de valores nulos, cantidad de valores) de las diversas variables que integran nuestros datos.$ls()$ nos da una lista de las variables de los datos. ###Code head(roboscasa) summary(roboscasa) ls(roboscasa) ###Output _____no_output_____ ###Markdown Podemos hacer gráficas simples con los comandos de plot() o plot.ts() ###Code plot(roboscasa) ###Output _____no_output_____ ###Markdown o podemos hacer uso de otras librerías como ggplot/ggplot2 o plotly para hacer gráficas más visualesPara el siguiente ejemplo, con los mismos datos, vamos a hacer un poco de cambios a las tablas declarando otra variable $robos$ y aplicandole la función $melt()$.Esta función lo que hace es cambiar la tabla o dataframe que tenemos $roboscasa$ a un formato que se denomina **largo**, el cual ayuda en términos de interpretación de datos a la computadora al ofrecer en forma de lista todos los datos de una tabla. ###Code robos <-melt(roboscasa, id = "Fecha") head(robos) qplot(Fecha, value, data = robos, geom = "line", group = variable) + facet_grid(variable ~., scale = "free_y") + theme_bw() + ggtitle("Robos a casa habitación por ciudad") + xlab("Año") + ylab("Número de robos") ggplotly() ###Output We recommend that you use the dev version of ggplot2 with `ggplotly()` Install it with: `devtools::install_github('hadley/ggplot2')` IOPub data rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_data_rate_limit`. ###Markdown R también contiene librerias y funciones para hacer análisis estadísticos, como primer ejercicio usaremos una regresión lineal con los datos que tenemos.Para hacer una regresión podemos usar diferentes comandos pero la más usual es la funcion $lm()$.Aquí haremos uso de las *listas*, así que asignaremos a la variable **r** los resultados de correr la función $lm()$. ###Code reg <- lm(Torreon ~ Gomez + Lerdo + Matamoros, data = roboscasa) # Para poder ver el contenido de r es necesario llamarlo reg ###Output _____no_output_____ ###Markdown La manera en como trabaja R es en base a la simplicidad, es por eso que al llamar los resultados de la regresión, nos muestra un resumen de ello, el cual son los parámetros especificados y los resultados de los coeficientes estimados.Para poder ver a mayor detalle los resultados de la regrsión, podemos usar la función $summary()$. ###Code summary(reg) ###Output _____no_output_____ ###Markdown Para poder interpretar una regresión es necesario saber que es lo que hace y como se especifica. Para este caso queremos saber en qué medida se relacionan los robos a casa habitación de la ciudad de Torreón con los de las otras ciudades de la ZML.Para ello dentro de la funcion lm() necesitamos especificar una variable dependiente, en este caso $Torreon$ y con el operador $~$ asignarle las variables independientes $Gomez$ , $Lerdo$ y $Matamoros$, tambíen le especificamos de dónde vamos a sacar estos datos con $ data = roboscasa $Los resultados que nos da se pueden ver uno por uno con diferentes funciones como residuals() o coefficientes(). ###Code residuals(reg) coefficients(reg) ###Output _____no_output_____ ###Markdown También es posible ver la correlación entre dos variables con cor(), para el caso de nuestras variables, la correlación entre robos a casa habitación es negativa. ###Code cor(roboscasa$Torreon, roboscasa$Gomez) ###Output _____no_output_____ ###Markdown Pero las regresiones y el análisis de los datos es explorar las diferentes combinaciones que pueden existir y las causas detrás. Es por eso que necesitamos correr varias regresiones para analizar los resultados y estimar el modelo que más se adapte a la realidad o concluir que no existe algún tipo de relación entre las variables.Especificamos otro modelo como $r2$ con solo las variables de Torreón y Gomez. ###Code r2 <- lm(Torreon ~ Gomez, data = roboscasa) r2 summary(r2) plot(r2) r3 <- lm(Gomez ~ Torreon, data = roboscasa) r3 summary(r3) r4 <- lm(Torreon ~ Gomez + Lerdo + Matamoros -1, data = roboscasa) r4 summary(r4) ###Output _____no_output_____
docs/source/mnist.ipynb
###Markdown Getting Started with HyperkiteThis short introduction uses Hyperkite to:1. Build a neural network that classifies handwritten images (MNIST).2. Optimize the hyperparameters of the model using pytorchThis tutorial is part of the original [Hyperkite Documentation](https://hyperkite.ai/docs/getting-started/). InstallInstall the Hyperkite package by running the following command in your terminal: pip install hyperkite For installation instructions for other packages, visit the [PyTorch](https://pytorch.org/) website. ###Code import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data.dataloader as dataloader import torch.optim as optim from torch.utils.data import TensorDataset from torch.autograd import Variable from torchvision import transforms from torchvision.datasets import MNIST SEED = 1 cuda = torch.cuda.is_available() if cuda: torch.cuda.manual_seed(SEED) torch.manual_seed(SEED) ###Output _____no_output_____ ###Markdown Loading DataWe obtain a collection of handwritten digits by loading the [Digit Dataset](https://scikit-learn.org/stable/auto_examples/datasets/plot_digits_last_image.html) from sklearn: ###Code train = MNIST('./data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), # ToTensor does min-max normalization. ]), ) valid = MNIST('./data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), # ToTensor does min-max normalization. ]), ) train_valid_split = 55000 train.data = train.data[:train_valid_split] valid.data = valid.data[train_valid_split:] test = MNIST('./data', train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), # ToTensor does min-max normalization. ]), ) # Create DataLoader dataloader_args = dict(shuffle=True, batch_size=64) train_loader = dataloader.DataLoader(train, **dataloader_args) valid_loader = dataloader.DataLoader(valid, **dataloader_args) test_loader = dataloader.DataLoader(test, **dataloader_args) print(f'Training set ({len(train)} images)') print(f'Validation set ({len(valid)} images)') print(f'Test set ({len(test)} images)') ###Output Training set (55000 images) Validation set (5000 images) Test set (10000 images) ###Markdown Model ###Code import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 4 * 4, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 4 * 4) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown TrainTraining time:- CPU, about 1 minute and 30 seconds- GPU, about 10 seconds ###Code for epoch in range(2): # Loop over the dataset 2 times running_loss = 0.0 for i, (inputs, labels) in enumerate(train_loader): # Zero the parameter gradients optimizer.zero_grad() # Forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # Print statistics running_loss += loss.item() if i % 20 == 0: print(f'Epoch {epoch+1}, Iter {i+1}, Loss: {running_loss}') running_loss = 0.0 print('Finished Training') plot(losses) ###Output _____no_output_____ ###Markdown Evaluate ###Code test_x = Variable(test_loader.dataset.test_data.type_as(torch.FloatTensor())) test_y = Variable(test_loader.dataset.test_labels) if cuda: test_x, test_y = test_x.cuda(), test_y.cuda() model.eval() output = model(test_x) pred = output.data.max(1)[1] d = pred.eq(test_y.data).cpu() accuracy = d.sum() / d.size()[0] print(f'Accuracy without Hyperkite: {accuracy}') print(f'Accuracy with Hyperkite: {accuracy}') ###Output Accuracy without Hyperkite: 0 Accuracy with Hyperkite: 0
analysis/photodraw_2x2_analysis_jefan.ipynb
###Markdown load in features and metadata ###Code K = pd.read_csv(os.path.join(csv_dir, 'photodraw2x2_sketch_data.csv')) T = pd.read_csv(os.path.join(csv_dir, 'photodraw2x2_stroke_data.csv')) S = pd.read_csv(os.path.join(csv_dir, 'photodraw2x2_survey_data.csv')) S = S.reset_index(drop = True) F = np.load(os.path.join(feature_dir, f'FEATURES_FC6_photodraw2x2_sketch.npy')) Fi = np.load(os.path.join(feature_dir, f'photodraw2x2_instance_features.npy')) K = K.sort_values(by='feature_ind') KF = pd.concat([pd.DataFrame(F), K], axis=1) KF.sort_values(by=['goal', 'condition', 'category'], ascending = True, inplace = True) K = K.sort_values(by='feature_ind_instance') KFi = pd.concat([pd.DataFrame(Fi), K], axis=1) KFi.sort_values(by=['goal', 'condition', 'category'], ascending = True, inplace = True) category_means = [] for name, group in KF.groupby(['goal', 'condition', 'category']): if len(category_means)==0: category_means = group[np.arange(4096)].mean(axis=0) else: category_means = np.vstack((category_means, group[np.arange(4096)].mean(axis=0))) category_means_i = [] for name, group in KFi.groupby(['goal', 'condition', 'category']): if len(category_means_i)==0: category_means_i = group[np.arange(1000)].mean(axis=0) else: category_means_i = np.vstack((category_means_i, group[np.arange(1000)].mean(axis=0))) ###Output _____no_output_____ ###Markdown within category/experiment variance! ###Code def high_dim_variance(X): return sum(np.linalg.norm(x_i - x_j)**2 for x_i, x_j in combinations(X, 2)) / (len(X))**2 K = K.sort_values(by='feature_ind') KF = pd.concat([pd.DataFrame(F), K], axis=1) KF.sort_values(by=['goal', 'condition', 'category'], ascending = True, inplace = True) K = K.sort_values(by='feature_ind_instance') KF = pd.concat([pd.DataFrame(Fi, columns = np.arange(4096, 4096 + 2048)), KF], axis=1) df = pd.DataFrame(columns = ['category', 'condition', 'goal', 'fc6_variance', 'inst_variance']) i = 0 for ind, group in KF.groupby(['category', 'condition', 'goal']): indx = list(ind) indx.append(high_dim_variance(np.array(group[np.arange(4096)]))) indx.append(high_dim_variance(np.array(group[np.arange(4096, 4096 + 2048)]))) df.loc[i] = indx i += 1 sns.barplot(data = df, x = 'condition', y = 'fc6_variance', hue = 'goal'); plt.title('variance (fc6)!'); plt.show() sns.barplot(data = df, x = 'condition', y = 'inst_variance', hue = 'goal'); plt.title('variance (instance)!'); plt.show() #df.to_csv(os.path.join(csv_dir, 'photodraw_category_by_experiment_variances.csv')) df = pd.read_csv(os.path.join(csv_dir, 'photodraw2x2_category_by_experiment_variances.csv')) df.groupby(['condition', 'goal'])['fc6_variance'].mean() ###Output _____no_output_____ ###Markdown Get gallery stims for cogsci 2021 ###Code gall_path_2x2 = make_dir_if_not_exists(os.path.abspath('../../photodraw_latex/cogsci2021/photodraw32_gallery_examples')) cat = 'butterfly' fn = lambda obj: obj.loc[np.random.choice(obj.index),:] group = K[(K.category == cat) & (K.condition == 'photo')] lows = sorted(group['inst_typicality'].unique())[:3] highs = sorted(group['inst_typicality'].unique())[-3:] lowURLs = group[group.inst_typicality.isin(lows)].groupby('imageURL', as_index = False).\ apply(fn).sample(3).imageURL.values highURLs = group[group.inst_typicality.isin(highs)].groupby('imageURL', as_index = False).\ apply(fn).sample(3).imageURL.values for i, g in K[K.category == cat].groupby('experiment'): path = make_dir_if_not_exists(os.path.join(gall_path_2x2, g.experiment.values[0])) if all(g.condition == 'text'): images = [Image.open(BytesIO(base64.b64decode(imgdata))).resize((224,224)) for \ imgdata in g.pngData.sample(6).values] [im.save(os.path.join(path, f"{g.experiment.values[0]}_{cat}_{i}.png")) for i, im in enumerate(images)] else: atyp = g[g.imageURL.isin(lowURLs)] typ = g[g.imageURL.isin(highURLs)] atyp = atyp.groupby('imageURL', as_index = False).apply(fn).sample(3) typ = typ.groupby('imageURL', as_index = False).apply(fn).sample(3) images_atyp, at = atyp.pngData.values, atyp.imageURL.values images_typ, t = typ.pngData.values, typ.imageURL.values images_atyp = [Image.open(BytesIO(base64.b64decode(imgdata))).resize((224,224)) for imgdata in images_atyp] images_typ = [Image.open(BytesIO(base64.b64decode(imgdata))).resize((224,224)) for imgdata in images_typ] [im.save(os.path.join(path, f"{g.experiment.values[0]}_{cat}_atypical_{i}.png")) \ for i, im in zip(at, images_atyp)] [im.save(os.path.join(path, f"{g.experiment.values[0]}_{cat}_typical_{i}.png")) \ for i, im in zip(t, images_typ)] stims_path_atyp = [os.path.abspath(os.path.join(proj_dir, f'stimuli/photodraw32_stims_copy/{cat}_{url}.png'))\ for url in atyp.imageURL] stims_path_typ = [os.path.abspath(os.path.join(proj_dir, f'stimuli/photodraw32_stims_copy/{cat}_{url}.png'))\ for url in typ.imageURL] [shutil.copyfile(src, os.path.join(path, src.split('\\')[-1])) for src in stims_path_atyp] [shutil.copyfile(src, os.path.join(path, src.split('\\')[-1])) for src in stims_path_typ] ###Output _____no_output_____
HelloWorld/Python_HW.ipynb
###Markdown Python script for printing "Hello World" ###Code print ('Hello World') ###Output Hello World
site/zh-cn/io/tutorials/prometheus.ipynb
###Markdown Copyright 2020 The TensorFlow IO Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown 从 Prometheus 服务器加载指标 在 TensorFlow.org 上查看 在 Google Colab 中运行 在 Github 上查看源代码 下载笔记本 小心:除了 Python 软件包以外,此笔记本还使用 `sudo apt-get install` 安装了第三方软件包。 概述本教程会将 [Prometheus](https://prometheus.io) 服务器中的 CoreDNS 指标加载到 `tf.data.Dataset` 中,然后使用 `tf.keras` 进行训练和推理。[CoreDNS](https://github.com/coredns/coredns) 是一种专注于服务发现的 DNS 服务器,作为 [Kubernetes](https://kubernetes.io) 集群的一部分广泛部署。因此,CoreDNS 常通过 DevOps 运算进行密切监控。本教程中提供的示例可帮助 DevOps 通过机器学习实现自动化运算。 设置和用法 安装所需的 tensorflow-io 软件包,然后重新启动运行时 ###Code import os try: %tensorflow_version 2.x except Exception: pass !pip install tensorflow-io from datetime import datetime import tensorflow as tf import tensorflow_io as tfio ###Output _____no_output_____ ###Markdown 安装并设置 CoreDNS 和 Prometheus出于演示目的,CoreDNS 服务器在本地开放了 `9053` 端口用于接收 DNS 查询,并开放了 `9153` 端口(默认)用于公开抓取指标。以下为 CoreDNS 的基本 Corefile 配置,可供[下载](https://github.com/tensorflow/io/blob/master/docs/tutorials/prometheus/Corefile):```.:9053 { prometheus whoami}```有关安装的更多详细信息,请参阅 CoreDNS [文档](https://coredns.io)。 ###Code !curl -s -OL https://github.com/coredns/coredns/releases/download/v1.6.7/coredns_1.6.7_linux_amd64.tgz !tar -xzf coredns_1.6.7_linux_amd64.tgz !curl -s -OL https://raw.githubusercontent.com/tensorflow/io/master/docs/tutorials/prometheus/Corefile !cat Corefile # Run `./coredns` as a background process. # IPython doesn't recognize `&` in inline bash cells. get_ipython().system_raw('./coredns &') ###Output _____no_output_____ ###Markdown 下一步是设置 Prometheus 服务器,并使用 Prometheus 抓取在上述 `9153` 端口上公开的 CoreDNS 指标。用于配置的 `prometheus.yml` 文件同样可供[下载](https://github.com/tensorflow/io/blob/master/docs/tutorials/prometheus/prometheus.yml): ###Code !curl -s -OL https://github.com/prometheus/prometheus/releases/download/v2.15.2/prometheus-2.15.2.linux-amd64.tar.gz !tar -xzf prometheus-2.15.2.linux-amd64.tar.gz --strip-components=1 !curl -s -OL https://raw.githubusercontent.com/tensorflow/io/master/docs/tutorials/prometheus/prometheus.yml !cat prometheus.yml # Run `./prometheus` as a background process. # IPython doesn't recognize `&` in inline bash cells. get_ipython().system_raw('./prometheus &') ###Output _____no_output_____ ###Markdown 为了展示一些活动,可以使用 `dig` 命令针对已设置的 CoreDNS 服务器生成一些 DNS 查询: ###Code !sudo apt-get install -y -qq dnsutils !dig @127.0.0.1 -p 9053 demo1.example.org !dig @127.0.0.1 -p 9053 demo2.example.org ###Output ; <<>> DiG 9.11.3-1ubuntu1.11-Ubuntu <<>> @127.0.0.1 -p 9053 demo2.example.org ; (1 server found) ;; global options: +cmd ;; Got answer: ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 53163 ;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 3 ;; WARNING: recursion requested but not available ;; OPT PSEUDOSECTION: ; EDNS: version: 0, flags:; udp: 4096 ; COOKIE: f18b2ba23e13446d (echoed) ;; QUESTION SECTION: ;demo2.example.org. IN A ;; ADDITIONAL SECTION: demo2.example.org. 0 IN A 127.0.0.1 _udp.demo2.example.org. 0 IN SRV 0 0 42194 . ;; Query time: 0 msec ;; SERVER: 127.0.0.1#9053(127.0.0.1) ;; WHEN: Tue Mar 03 22:35:21 UTC 2020 ;; MSG SIZE rcvd: 132 ###Markdown 现在设置的是 CoreDNS 服务器,Prometheus 服务器将抓取该 CoreDNS 服务器的指标并准备用于 TensorFlow。 为 CoreDNS 指标创建数据集并在 TensorFlow 中使用可以使用 `tfio.experimental.IODataset.from_prometheus` 为 CoreDNS 指标创建可在 PostgreSQL 服务器上访问的数据集。至少需要两个参数。需要将 `query` 传递至 Prometheus 服务器以选择指标,`length` 为要加载到数据集的时间段。您可以从 `"coredns_dns_request_count_total"` 和 `"5"`(秒)开始来创建以下数据集。由于在本教程前面部分中已发送了两个 DNS 查询,因此在时间序列末尾,`"coredns_dns_request_count_total"` 的指标将为 `"2.0"`。 ###Code dataset = tfio.experimental.IODataset.from_prometheus( "coredns_dns_request_count_total", 5, endpoint="http://localhost:9090") print("Dataset Spec:\n{}\n".format(dataset.element_spec)) print("CoreDNS Time Series:") for (time, value) in dataset: # time is milli second, convert to data time: time = datetime.fromtimestamp(time // 1000) print("{}: {}".format(time, value['coredns']['localhost:9153']['coredns_dns_request_count_total'])) ###Output Dataset Spec: (TensorSpec(shape=(), dtype=tf.int64, name=None), {'coredns': {'localhost:9153': {'coredns_dns_request_count_total': TensorSpec(shape=(), dtype=tf.float64, name=None)}}}) CoreDNS Time Series: 2020-03-03 22:35:17: 2.0 2020-03-03 22:35:18: 2.0 2020-03-03 22:35:19: 2.0 2020-03-03 22:35:20: 2.0 2020-03-03 22:35:21: 2.0 ###Markdown 进一步研究数据集的规范:```( TensorSpec(shape=(), dtype=tf.int64, name=None), { 'coredns': { 'localhost:9153': { 'coredns_dns_request_count_total': TensorSpec(shape=(), dtype=tf.float64, name=None) } } })```显而易见,数据集由 `(time, values)` 元组组成,其中 `values` 字段为 Python 字典,扩展为:```"job_name": { "instance_name": { "metric_name": value, },}```在上例中,`'coredns'` 为作业名称,`'localhost:9153'` 为实例名称,而 `'coredns_dns_request_count_total'` 为指标名称。请注意,根据所使用的 Prometheus 查询,可能会返回多个作业/实例/指标。这也是在数据集结构中使用 Python 字典的原因。以另一项查询 `"go_memstats_gc_sys_bytes"` 为例。由于 CoreDNS 和 Prometheus 均使用 Go 语言进行编写,`"go_memstats_gc_sys_bytes"` 指标可用于 `"coredns"` 作业和 `"prometheus"` 作业: 注:此单元在您第一次运行时可能会出错。再次运行将通过。 ###Code dataset = tfio.experimental.IODataset.from_prometheus( "go_memstats_gc_sys_bytes", 5, endpoint="http://localhost:9090") print("Time Series CoreDNS/Prometheus Comparision:") for (time, value) in dataset: # time is milli second, convert to data time: time = datetime.fromtimestamp(time // 1000) print("{}: {}/{}".format( time, value['coredns']['localhost:9153']['go_memstats_gc_sys_bytes'], value['prometheus']['localhost:9090']['go_memstats_gc_sys_bytes'])) ###Output Time Series CoreDNS/Prometheus Comparision: 2020-03-03 22:35:17: 2385920.0/2775040.0 2020-03-03 22:35:18: 2385920.0/2775040.0 2020-03-03 22:35:19: 2385920.0/2775040.0 2020-03-03 22:35:20: 2385920.0/2775040.0 2020-03-03 22:35:21: 2385920.0/2775040.0 ###Markdown 现在,可以将创建的 `Dataset` 直接传递至 `tf.keras` 用于训练或推理了。 使用数据集进行模型训练在指标数据集创建完成后,可以将数据集直接传递至 `tf.keras` 用于模型训练或推理。出于演示目的,本教程将仅使用一种非常简单的 LSTM 模型,该模型以 1 个特征和 2 个步骤作为输入: ###Code n_steps, n_features = 2, 1 simple_lstm_model = tf.keras.models.Sequential([ tf.keras.layers.LSTM(8, input_shape=(n_steps, n_features)), tf.keras.layers.Dense(1) ]) simple_lstm_model.compile(optimizer='adam', loss='mae') ###Output _____no_output_____ ###Markdown 要使用的数据集为带有 10 个样本的 CoreDNS 的 'go_memstats_sys_bytes' 的值。但是,由于形成了 `window=n_steps` 和 `shift=1` 的滑动窗口,因此还需要使用其他样本(对于任意两个连续元素,将第一个元素作为 `x`,将第二个元素作为 `y` 用于训练)。总计为 `10 + n_steps - 1 + 1 = 12` 秒。数据值还将缩放到 `[0, 1]`。 ###Code n_samples = 10 dataset = tfio.experimental.IODataset.from_prometheus( "go_memstats_sys_bytes", n_samples + n_steps - 1 + 1, endpoint="http://localhost:9090") # take go_memstats_gc_sys_bytes from coredns job dataset = dataset.map(lambda _, v: v['coredns']['localhost:9153']['go_memstats_sys_bytes']) # find the max value and scale the value to [0, 1] v_max = dataset.reduce(tf.constant(0.0, tf.float64), tf.math.maximum) dataset = dataset.map(lambda v: (v / v_max)) # expand the dimension by 1 to fit n_features=1 dataset = dataset.map(lambda v: tf.expand_dims(v, -1)) # take a sliding window dataset = dataset.window(n_steps, shift=1, drop_remainder=True) dataset = dataset.flat_map(lambda d: d.batch(n_steps)) # the first value is x and the next value is y, only take 10 samples x = dataset.take(n_samples) y = dataset.skip(1).take(n_samples) dataset = tf.data.Dataset.zip((x, y)) # pass the final dataset to model.fit for training simple_lstm_model.fit(dataset.batch(1).repeat(10), epochs=5, steps_per_epoch=10) ###Output Train for 10 steps Epoch 1/5 10/10 [==============================] - 2s 150ms/step - loss: 0.8484 Epoch 2/5 10/10 [==============================] - 0s 10ms/step - loss: 0.7808 Epoch 3/5 10/10 [==============================] - 0s 10ms/step - loss: 0.7102 Epoch 4/5 10/10 [==============================] - 0s 11ms/step - loss: 0.6359 Epoch 5/5 10/10 [==============================] - 0s 11ms/step - loss: 0.5572
.ipynb_checkpoints/Traffic_Sign_Classifier (2)-checkpoint.ipynb
###Markdown Self-Driving Car Engineer Nanodegree Deep Learning Project: Build a Traffic Sign Recognition ClassifierIn this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/!/rubrics/481/view) for this project.The [rubric](https://review.udacity.com/!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. --- Step 0: Load The Data ###Code # Load pickled data import pickle # TODO: Fill this in based on where you saved the training and testing data training_file = './train.p' validation_file='./valid.p' testing_file = './test.p' with open(training_file, mode='rb') as f: train = pickle.load(f) with open(validation_file, mode='rb') as f: valid = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_valid, y_valid = valid['features'], valid['labels'] X_test, y_test = test['features'], test['labels'] ###Output _____no_output_____ ###Markdown --- Step 1: Dataset Summary & ExplorationThe pickled data is a dictionary with 4 key/value pairs:- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas ###Code ### Replace each question mark with the appropriate value. ### Use python, pandas or numpy methods rather than hard coding the results # TODO: Number of training examples n_train = X_train.shape[0] # TODO: Number of validation examples n_validation = X_valid.shape[0] # TODO: Number of testing examples. n_test = X_test.shape[0] # TODO: What's the shape of an traffic sign image? image_shape = X_train.shape[1:3] # TODO: How many unique classes/labels there are in the dataset. n_classes = len(set(y_test)) print("Number of training examples =", n_train) print('Number of validation examples =', n_validation) print("Number of testing examples =", n_test) print("Image data shape =", image_shape) print("Number of classes =", n_classes) ###Output Number of training examples = 34799 Number of validation examples = 4410 Number of testing examples = 12630 Image data shape = (32, 32) Number of classes = 43 ###Markdown Include an exploratory visualization of the dataset Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others? ###Code # import the csv signnames to a dict import csv sign_names = {} with open('signnames.csv', mode='r') as infile: reader = csv.reader(infile) for row in reader: num, content = row sign_names[num] = content ### Data exploration visualization code goes here. ### Feel free to use as many code cells as needed. import matplotlib.pyplot as plt # Visualizations will be shown in the notebook. %matplotlib inline choiced_num = 4 label = y_train[choiced_num] image = X_train[choiced_num] plt.imshow(image) plt.title(str(label)+": "+sign_names[str(label)]) # Shuffle the training data from sklearn.utils import shuffle X_train, y_train = shuffle(X_train, y_train) # Get 43 samples image_samples = [] # a list hold the 43 samples for i in range(n_classes): for j in range(n_train): if y_train[j] == i: image_samples.append(X_train[j]) break # Show all 43 signs from matplotlib.gridspec import GridSpec samples_fig = plt.figure(figsize=(16,8)) rows = 5 cols = 10 count = 0 gs = GridSpec(rows,cols) for i in range(rows): for j in range(cols): ax = samples_fig.add_subplot(gs[i,j]) ax.imshow(image_samples[count]) ax.set_xticks([]) ax.set_yticks([]) # ax.set_title(sign_names[str(count)]) count += 1 if count == n_classes: break samples_fig.savefig("sample_fig.jpg") # Show the sign with label (first 16 figure) from matplotlib.gridspec import GridSpec samples_fig = plt.figure(figsize=(16,20)) rows = 5 cols = 4 count = 0 gs = GridSpec(rows,cols) for i in range(rows): for j in range(cols): ax = samples_fig.add_subplot(gs[i,j]) ax.imshow(image_samples[count]) ax.set_xticks([]) ax.set_yticks([]) ax.set_title(str(count)+": "+sign_names[str(count)]) count += 1 if count == n_classes: break samples_fig.savefig('sample_fig_with_label.jpg') # Get the classes distribution in train data import numpy as np classes_num = [] for i in range(n_classes): classes_num.append(np.sum(y_train==i)) import pandas as pd classes_text = [sign_names[str(i)] for i in range(n_classes)] classes_serial = pd.Series(classes_num, index=classes_text) classes_serial.plot(kind='bar',figsize=(16,16)) # Show the sign with label (first 16 figure) from matplotlib.gridspec import GridSpec images_sample = X_train[:12].copy() # get 12 pictures samples_fig = plt.figure(figsize=(16,12)) rows = 3 cols = 4 count = 0 gs = GridSpec(rows,cols) for i in range(rows): for j in range(cols): ax = samples_fig.add_subplot(gs[i,j]) ax.imshow(images_sample[count]) ax.set_xticks([]) ax.set_yticks([]) count += 1 ### Data Augmentation, Transfer import tensorflow as tf images_sample = X_train[:12].copy() # get 12 pictures degree = np.random.randint(-30,30)/180*np.pi # rotate np.pi = 180 degree offset_height, offset_width, target_height, target_width = np.random.randint(0,7),np.random.randint(0,7),32+12,32+12 images = tf.placeholder(tf.uint8, images_sample.shape, name='images_holder') images_pad = tf.image.pad_to_bounding_box(images, offset_height, offset_width, target_height, target_width) offset_height_crop, offset_width_crop = np.random.randint(0,7),np.random.randint(0,7) images_crop = tf.image.crop_to_bounding_box(images_pad, offset_height_crop, offset_width_crop,32,32) with tf.Session() as sess: result = sess.run(images_crop, feed_dict={images: images_sample}) samples_fig = plt.figure(figsize=(16,12)) rows = 3 cols = 4 count = 0 gs = GridSpec(rows,cols) for i in range(rows): for j in range(cols): ax = samples_fig.add_subplot(gs[i,j]) ax.imshow(result[count]) ax.set_xticks([]) ax.set_yticks([]) count += 1 ### Data Augmentation, rotation import tensorflow as tf images_sample = X_train[:12].copy() # get 12 pictures degree = np.random.randint(-30,30)/180*np.pi # rotate np.pi = 180 degree images = tf.placeholder(tf.uint8, images_sample.shape, name='images_holder') images_ro = tf.contrib.image.rotate(images, degree) with tf.Session() as sess: result = sess.run(images_ro, feed_dict={images: images_sample}) samples_fig = plt.figure(figsize=(16,12)) rows = 3 cols = 4 count = 0 gs = GridSpec(rows,cols) for i in range(rows): for j in range(cols): ax = samples_fig.add_subplot(gs[i,j]) ax.imshow(result[count]) ax.set_xticks([]) ax.set_yticks([]) count += 1 ### Data Augmentation, transfor, rotation import tensorflow as tf images_sample = X_train[:12].copy() # get 12 pictures images = tf.placeholder(tf.uint8, images_sample.shape, name='images_holder') offset_height, offset_width, target_height, target_width = np.random.randint(0,7),np.random.randint(0,7),32+12,32+12 images_pad = tf.image.pad_to_bounding_box(images, offset_height, offset_width, target_height, target_width) offset_height_crop, offset_width_crop = np.random.randint(0,7),np.random.randint(0,7) images_crop = tf.image.crop_to_bounding_box(images_pad, offset_height_crop, offset_width_crop,32,32) degree = np.random.randint(-30,30)/180*np.pi # rotate np.pi = 180 degree images_ro = tf.contrib.image.rotate(images_crop, degree) with tf.Session() as sess: result = sess.run(images_ro, feed_dict={images: images_sample}) samples_fig = plt.figure(figsize=(16,12)) rows = 3 cols = 4 count = 0 gs = GridSpec(rows,cols) for i in range(rows): for j in range(cols): ax = samples_fig.add_subplot(gs[i,j]) ax.imshow(result[count]) ax.set_xticks([]) ax.set_yticks([]) count += 1 ### Data Augmentation, rotation, pad import tensorflow as tf images_sample = X_train[:12].copy() # get 12 pictures images = tf.placeholder(tf.uint8, images_sample.shape, name='images_holder') degree = np.random.randint(-30,30)/180*np.pi # rotate np.pi = 180 degree images_ro = tf.contrib.image.rotate(images, degree) offset_height, offset_width, target_height, target_width = np.random.randint(0,7),np.random.randint(0,7),32+12,32+12 images_pad = tf.image.pad_to_bounding_box(images_ro, offset_height, offset_width, target_height, target_width) offset_height_crop, offset_width_crop = np.random.randint(0,7),np.random.randint(0,7) images_crop = tf.image.crop_to_bounding_box(images_pad, offset_height_crop, offset_width_crop,32,32) with tf.Session() as sess: result = sess.run(images_crop, feed_dict={images: images_sample}) samples_fig = plt.figure(figsize=(16,12)) rows = 3 cols = 4 count = 0 gs = GridSpec(rows,cols) for i in range(rows): for j in range(cols): ax = samples_fig.add_subplot(gs[i,j]) ax.imshow(result[count]) ax.set_xticks([]) ax.set_yticks([]) count += 1 ###Output _____no_output_____ ###Markdown ---- Step 2: Design and Test a Model ArchitectureDesign and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. There are various aspects to consider when thinking about this problem:- Neural network architecture (is the network over or underfitting?)- Play around preprocessing techniques (normalization, rgb to grayscale, etc)- Number of examples per label (some have more than others).- Generate fake data.Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. Pre-process the Data Set (normalization, grayscale, etc.) Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. Other pre-processing steps are optional. You can try different techniques to see if it improves performance. Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. ###Code ### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include ### converting to grayscale, etc. ### Feel free to use as many code cells as needed. from helper import * X_train_gray = rgb_gray(X_train) X_valid_gray = rgb_gray(X_valid) X_test_gray = rgb_gray(X_test) # X_train = rgb_y(X_train) # X_valid = rgb_y(X_valid) # X_test = rgb_y(X_test) # X_train = rgb_yuv(X_train) # X_valid = rgb_yuv(X_valid) # X_test = rgb_yuv(X_test) # Shuffle the training data # from sklearn.utils import shuffle # X_train, y_train = shuffle(X_train, y_train) # Normalize the images import numpy as np X_train_f = X_train.astype(np.float32) # change the dtype from uint8 to float32 X_valid_f = X_valid.astype(np.float32) X_test_f = X_test.astype(np.float32) X_train_n = normalize_image(X_train) X_valid_n = normalize_image(X_valid) X_test_n = normalize_image(X_test) # Normalize the images import numpy as np X_train_gray_f = X_train_gray.astype(np.float32) # change the dtype from uint8 to float32 X_valid_gray_f = X_valid_gray.astype(np.float32) X_test_gray_f = X_test_gray.astype(np.float32) X_train_gray_n = normalize_image(X_train_gray) X_valid_gray_n = normalize_image(X_valid_gray) X_test_gray_n = normalize_image(X_test_gray) ###Output _____no_output_____ ###Markdown Model Architecture ###Code ### Define your architecture here. ### Feel free to use as many code cells as needed. import tensorflow as tf x = tf.placeholder(tf.float32, (None, 32, 32, 1), 'X_holder') y = tf.placeholder(tf.int32, (None), 'y_holder') one_hot_y = tf.one_hot(y, 43) from tensorflow.contrib.layers import flatten keep_prob_conv = tf.placeholder(tf.float32, name='keep_prob_conv') keep_prob_fc = tf.placeholder(tf.float32, name='keep_prob_conv') ### Define your architecture here. ### Feel free to use as many code cells as needed. import tensorflow as tf x = tf.placeholder(tf.float32, (None, 32, 32, 3), 'X_holder') y = tf.placeholder(tf.int32, (None), 'y_holder') one_hot_y = tf.one_hot(y, 43) # # Transfer offsets = tf.placeholder(tf.int32, 4) # offset_height, offset_width, target_height, target_width = np.random.randint(0,7),np.random.randint(0,7),32+12,32+12 images_pad = tf.image.pad_to_bounding_box(x, offsets[0], offsets[1], 32+12, 32+12) # offset_height_crop, offset_width_crop = np.random.randint(0,7),np.random.randint(0,7) images_tr = tf.image.crop_to_bounding_box(images_pad, offsets[2], offsets[3],32,32) # Rotate degree = tf.placeholder(tf.float32, 1, name='rotate_degree_holder') # degree = np.random.randint(-30,30)/180*np.pi # rotate np.pi = 180 degree images_ro = tf.contrib.image.rotate(x, degree) # # Transfer + Rotate images_ro_tr = tf.contrib.image.rotate(images_crop, degree) # # rotate + Transfer images_ro_pad = tf.image.pad_to_bounding_box(images_ro, offsets[0], offsets[1], 32+12, 32+12) images_ro_tr = tf.image.crop_to_bounding_box(images_ro_pad, offsets[2], offsets[3], 32, 32) from tensorflow.contrib.layers import flatten keep_prob_conv = tf.placeholder(tf.float32, name='keep_prob_conv') keep_prob_fc = tf.placeholder(tf.float32, name='keep_prob_conv') ###Output _____no_output_____ ###Markdown Train, Validate and Test the Model A validation set can be used to assess how well the model is performing. A low accuracy on the training and validationsets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting. ###Code ### Train your model here. ### Calculate and report the accuracy on the training and validation set. ### Once a final model architecture is selected, ### the accuracy on the test set should be calculated and reported as well. ### Feel free to use as many code cells as needed. from helper import * rate = 0.001 EPOCHS = 10 BATCH_SIZE = 128 # logits = LeNet(x) # logits_ro = LeNet(images_ro) # ogits = LeNet(x_5x) # logits = LeNet_dropout(x) # nodrop, just check the function if work, should has same result as LeNet(x) # logits = LeNet_4x(x) # logits = LeNet_4x_dropout(x) # logits = LeNet_4x_3c(x) # logits_ro = LeNet_4x_3c(images_ro) # logits_tr = LeNet_4x_c(images_) # logits = LeNet_4x_3c_dropout(x) # logits = LeNet_4x_3c(x) # logits = LeNet_16x(x) # logits = LeNet_16x_3c(x) # logits = LeNet_16x_dropout(x) # logits = LeNet_MS(x) # logits = LeNet_MS_2(x) # logits = LeNet_MS_3(x) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate = rate) training_operation = optimizer.minimize(loss_operation) cross_entropy_ro = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits_ro) loss_operation_ro = tf.reduce_mean(cross_entropy_ro) optimizer_ro = tf.train.AdamOptimizer(learning_rate = rate) training_operation_ro = optimizer_ro.minimize(loss_operation_ro) correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver() def evaluate(X_data, y_data): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, BATCH_SIZE): batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE] accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob_conv: 1.0, keep_prob_fc: 1.0}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples def pipeline(network, images): logits = network(images) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate = rate) training_operation = optimizer.minimize(loss_operation) return training_operation network = LeNet_4x_3c training_operation = pipeline(network, x) training_operation_ro = pipeline(network, images_ro) # cross_entropy_ro = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits_ro) # loss_operation_ro = tf.reduce_mean(cross_entropy_ro) # optimizer_ro = tf.train.AdamOptimizer(learning_rate = rate) # training_operation_ro = optimizer_ro.minimize(loss_operation_ro) correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver() def evaluate(X_data, y_data): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, BATCH_SIZE): batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE] accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob_conv: 1.0, keep_prob_fc: 1.0}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples def train(X_train,y_train,X_valid,y_valid,X_test,y_test): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print("Training...") # print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob_conv: 1.0, keep_prob_fc: 1.0}) train_accuracy = evaluate(X_train, y_train) validation_accuracy = evaluate(X_valid, y_valid) test_accuracy = evaluate(X_test, y_test) print("EPOCH {} ...".format(i+1)) print("Train Accuracy = {:.3f} Validation Accuracy = {:.3f} Test Accuracy = {:.3f}"\ .format(train_accuracy,validation_accuracy,test_accuracy)) # print() saver.save(sess, './lenet') print("Model saved") def train_add_data(X_train,y_train,X_valid,y_valid,X_test,y_test): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print("Training...") # print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob_conv: 1.0, keep_prob_fc: 1.0}) for _ in range(20): # rotation data degree_gen = [np.random.randint(-30,30)/180*np.pi] # rotate np.pi = 180 degree sess.run(training_operation_ro, feed_dict={x: batch_x, y: batch_y, keep_prob_conv: 1.0, keep_prob_fc: 1.0, degree: degree_gen}) offsets_gen = [np.random.randint(0,7),np.random.randint(0,7),np.random.randint(0,7),np.random.randint(0,7)] sess.run(training_operation_tr, feed_dict={x: batch_x, y: batch_y, keep_prob_conv: 1.0, keep_prob_fc: 1.0, degree: degree_gen, offsets: offsets_gen}) train_accuracy = evaluate(X_train, y_train) validation_accuracy = evaluate(X_valid, y_valid) test_accuracy = evaluate(X_test, y_test) print("EPOCH {} ...".format(i+1)) print("Train Accuracy = {:.3f} Validation Accuracy = {:.3f} Test Accuracy = {:.3f}"\ .format(train_accuracy,validation_accuracy,test_accuracy)) # print() saver.save(sess, './lenet') print("Model saved") def train_data_augmentation(X_train,y_train,X_valid,y_valid,X_test,y_test, augemntation_factor=1): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print("Training...") # print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob_conv: 1.0, keep_prob_fc: 1.0}) # the augmentation data for _ in range(augemntation_factor): # transfer degree = np.random.randint(-30,30)/180*np.pi # rotate np.pi = 180 degree offset_height, offset_width, target_height, target_width = np.random.randint(0,7),np.random.randint(0,7),32+12,32+12 images_pad = tf.image.pad_to_bounding_box(batch_x, offset_height, offset_width, target_height, target_width) offset_height_crop, offset_width_crop = np.random.randint(0,7),np.random.randint(0,7) images_crop = tf.image.crop_to_bounding_box(images_pad, offset_height_crop, offset_width_crop,32,32) images_modified = images_crop.eval() sess.run(training_operation, feed_dict={x: images_modified, y:batch_y, keep_prob_conv: 1.0, keep_prob_fc:1.0}) train_accuracy = evaluate(X_train, y_train) validation_accuracy = evaluate(X_valid, y_valid) test_accuracy = evaluate(X_test, y_test) print("EPOCH {} ...".format(i+1)) print("Train Accuracy = {:.3f} Validation Accuracy = {:.3f} Test Accuracy = {:.3f}"\ .format(train_accuracy,validation_accuracy,test_accuracy)) # print() saver.save(sess, './lenet') print("Model saved") def get_errors(X_data, y_data): error_list = [0 for i in range(n_classes)] with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) predict = sess.run(correct_prediction, feed_dict={x: X_data, y: y_data, keep_prob_conv: 1.0, keep_prob_fc: 1.0}) for i in range(len(y_data)): if predict[i] == False: error_list[y_data[i]] += 1 return error_list # LeNet Model result RGB to Gray train(X_train_gray,y_train,X_valid_gray,y_valid,X_test_gray,y_test) # LeNet Model result RGB to Gray, add 20 x data train_add_data(X_train_gray,y_train,X_valid_gray,y_valid,X_test_gray,y_test) # LeNet Model result RGB to Gray, add 20 x data train_add_data(X_train_gray,y_train,X_valid_gray,y_valid,X_test_gray,y_test) type(y_train[0]) error_list = get_errors(X_valid_gray,y_valid) print(1-sum(error_list)/len(y_valid)) # compare with the validation Accuracy # classes_text = [sign_names[str(i)] for i in range(n_classes)] errors_serial = pd.Series(error_list, index=classes_text) errors_serial.plot(kind='bar',figsize=(16,16)) # LeNet_drop_out Model result RGB to Gray Normalized, 1.0, 1.0, should no change, the keep_prop need set manully in the train function. train(X_train,y_train,X_valid,y_valid,X_test,y_test) error_list = get_error() # LeNet_drop_out Model result RGB to Gray Normalized, 1.0, 0.5, train(X_train,y_train,X_valid,y_valid,X_test,y_test) # LeNet_drop_out Model result RGB to Gray Normalized, 0.5, 0.5, train(X_train,y_train,X_valid,y_valid,X_test,y_test) # LeNet_4x_3c import time import datetime train(X_train,y_train,X_valid,y_valid,X_test,y_test) running_time = (time2-time1)/60 print("used time : {:.2f} minutes".format(running_time)) now = datetime.datetime.now() print(now) import time time1 = time.time() train(X_train,y_train,X_valid,y_valid,X_test,y_test) time2 = time.time() print("Running time: {:.2f} minutes.".format((time2-time1)/60)) # LeNet_4x_3c, 10x, rotate only import time time1 = time.time() train_add_data(X_train,y_train,X_valid,y_valid,X_test,y_test) time2 = time.time() print("Running time: {:.2f} minutes.".format((time2-time1)/60)) # LeNet_4x_3c, 20x import time time1 = time.time() train_add_data(X_train,y_train,X_valid,y_valid,X_test,y_test) time2 = time.time() print("Running time: {:.2f} minutes.".format((time2-time1)/60)) X_train.shape # LeNet_4x_3c 5x data train(X_train,y_train,X_valid,y_valid,X_test,y_test) # LeNet 4x, conv dropout 1.0 fc dropout 0.5 train(X_train,y_train,X_valid,y_valid,X_test,y_test) # LeNet 4x, conv dropout 0.5, fc dropout 0.5 train(X_train,y_train,X_valid,y_valid,X_test,y_test) # LeNet 4x, drop out conv 0.1 fc 0.5 train(X_train,y_train,X_valid,y_valid,X_test,y_test) # expand the LeNet to 16x, train(X_train_gray,y_train,X_valid_gray,y_valid,X_test_gray,y_test) error_list = get_errors(X_valid_gray,y_valid) print(1-sum(error_list)/len(y_valid)) # compare with the validation Accuracy # classes_text = [sign_names[str(i)] for i in range(n_classes)] errors_serial = pd.Series(error_list, index=classes_text) errors_serial.plot(kind='bar',figsize=(16,16)) # LeNet_16x_3c train(X_train,y_train,X_valid,y_valid,X_test,y_test) error_list = get_errors(X_valid,y_valid) print(1-sum(error_list)/len(y_valid)) # compare with the validation Accuracy # classes_text = [sign_names[str(i)] for i in range(n_classes)] errors_serial = pd.Series(error_list, index=classes_text) errors_serial.plot(kind='bar',figsize=(16,16)) # expand the LeNet to 16x, conv dropout 1.0 fc dropout 0.5 train(X_train,y_train,X_valid,y_valid,X_test,y_test) # expand the LeNet to 16x, conv dropout 0.5 fc dropout 0.5 train(X_train,y_train,X_valid,y_valid,X_test,y_test) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) test_accuracy = evaluate(X_test, y_test) print("Test Accuracy = {:.3f}".format(test_accuracy)) ###Output _____no_output_____ ###Markdown --- Step 3: Test a Model on New ImagesTo give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. Load and Output the Images ###Code ### Load the images and plot them here. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Predict the Sign Type for Each Image ###Code ### Run the predictions here and use the model to output the prediction for each image. ### Make sure to pre-process the images with the same pre-processing pipeline used earlier. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Analyze Performance ###Code ### Calculate the accuracy for these 5 new images. ### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images. ###Output _____no_output_____ ###Markdown Output Top 5 Softmax Probabilities For Each Image Found on the Web For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.htmltop_k) could prove helpful here. The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:``` (5, 6) arraya = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, 0.12789202], [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, 0.15899337], [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , 0.23892179], [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , 0.16505091], [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, 0.09155967]])```Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:```TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], [ 0.28086119, 0.27569815, 0.18063401], [ 0.26076848, 0.23892179, 0.23664738], [ 0.29198961, 0.26234032, 0.16505091], [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], [0, 1, 4], [0, 5, 1], [1, 3, 5], [1, 4, 3]], dtype=int32))```Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices. ###Code ### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Project WriteupOnce you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file. > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. --- Step 4 (Optional): Visualize the Neural Network's State with Test Images This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol. Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image. Your output should look something like this (above) ###Code ### Visualize your network's feature maps here. ### Feel free to use as many code cells as needed. # image_input: the test image being fed into the network to produce the feature maps # tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer # activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output # plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1): # Here make sure to preprocess your image_input in a way your network expects # with size, normalization, ect if needed # image_input = # Note: x should be the same name as your network's tensorflow data placeholder variable # If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function activation = tf_activation.eval(session=sess,feed_dict={x : image_input}) featuremaps = activation.shape[3] plt.figure(plt_num, figsize=(15,15)) for featuremap in range(featuremaps): plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number if activation_min != -1 & activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray") elif activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray") elif activation_min !=-1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray") else: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray") !ls ###Output checkpoint lenet.meta sample_fig_with_label.jpg valid.p helper.py __pycache__ signnames.csv lenet.data-00000-of-00001 sample_data test.p lenet.index sample_fig.jpg train.p
KerasNN/0150_MLP_IMDB.ipynb
###Markdown Introduction This notebook presents **bag-of-words** sentiment anlysis on [IMDB](http://ai.stanford.edu/~amaas/data/sentiment/) movie reviews dataset in Keras. **Contents*** [IMDB Dataset](IMDB-Dataset) - load and preprocess* [Keras Model](Keras-Model) - build and train model Imports ###Code import numpy as np import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Limit TensorFlow GPU memory usage ###Code import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config): pass # init sessin with allow_growth ###Output _____no_output_____ ###Markdown IMDB Dataset Load IMDB movie review dataset using Keras API. Most params are set to default values. ###Code (x_train_raw, y_train), (x_test_raw, y_test) = tf.keras.datasets.imdb.load_data( path='imbd.npz', # download to '~/.keras/datasets/' + path num_words=10000, # top most frequent words to consider skip_top=0, # top most frequent words to ignore ('the', 'a', 'at', ...) maxlen=None, # truncate reviews longer than this seed=113, # data shuffling seed start_char=1, # start-of-sequence token oov_char=2, # if skip_top used, then dropped words replaced with this token index_from=3) # actual word tokens start here ###Output _____no_output_____ ###Markdown Data shapes ###Code print(x_train_raw.shape) print(x_test_raw.shape) ###Output (25000,) (25000,) ###Markdown Example data sample ###Code print('Label:', y_train[0]) print('Review:', x_train_raw[0]) ###Output Label: 1 Review: [1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32] ###Markdown As a sanity check recreate word dictionary ###Code w2i = tf.keras.datasets.imdb.get_word_index() w2i = {k:(v+2) for k,v in w2i.items()} # 0 is <PAD>; add +2 for <ST> and <UNK> w2i['<PAD>'] = 0 w2i['<ST>'] = 1 w2i['<UNK>'] = 2 i2w = {v: k for k, v in w2i.items()} ###Output _____no_output_____ ###Markdown Print subset of vocabulary ###Code vocab_10000 = [i2w[i] for i in range(10000)] # 1000 most common words (indices are sorted) print(sorted(vocab_10000)[:100]) # sort alphabeticaly and show first 100 words ###Output ["'", "'60s", "'70s", "'73", "'80s", "'a", "'cause", "'em", "'i", "'s", "'the", '0', '00', '000', '1', '10', '100', '1000', '101', '11', '12', '13', '13th', '14', '15', '150', '16', '17', '18', '18th', '19', '1920s', '1930', "1930's", '1930s', '1931', '1932', '1933', '1934', '1936', '1938', '1939', '1940', "1940's", '1940s', '1941', '1942', '1943', '1944', '1945', '1946', '1948', '1949', '1950', "1950's", '1950s', '1951', '1953', '1954', '1955', '1956', '1957', '1958', '1959', '1960', "1960's", '1960s', '1963', '1964', '1965', '1966', '1967', '1968', '1969', '1970', "1970's", '1970s', '1971', '1972', '1973', '1974', '1975', '1976', '1977', '1978', '1979', '1980', "1980's", '1980s', '1981', '1982', '1983', '1984', '1985', '1986', '1987', '1988', '1989', '1990', "1990's"] ###Markdown Print sample review as actual text ###Code # 16965 is a "Bromwell High is a cartoon comedy..." review, # which in original dataset is first train review print(' '.join(i2w[id] for id in x_train_raw[16965] )) ###Output <ST> <UNK> house br of party point i killers by and part she for there first frames it's getting plot something for devices would constructed old it and valley boy's humor well is course was <UNK> <UNK> survive br been drop is opening we br devices and <UNK> is context <UNK> and hangs ray so were their without should had magic <UNK> <UNK> and <UNK> to and bit dramatic at tongue well to and concepts this british a had ray very this almost and understand it only of fresh wes sorry is gotta want and getting this grade <UNK> by house of boring given horrific real these is flip all to end devices fresh cinderella is <UNK> house this thinking was being difference to would says characters was <UNK> house br making ape good of 0 was i saw ###Markdown Convert movie reviews to multi-hot vectors of length 1000, where each position corresponds to one word in vocabulary ###Code tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=10000) x_train = tokenizer.sequences_to_matrix(x_train_raw, mode='binary') x_test = tokenizer.sequences_to_matrix(x_test_raw, mode='binary') print('shape:', x_train.shape) print('data:') print(x_train[0, :100]) ###Output shape: (25000, 10000) data: [0. 1. 1. 0. 1. 1. 1. 1. 1. 1. 0. 0. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 0. 0. 1. 1. 0. 1. 0. 1. 0. 1. 1. 0. 1. 1. 0. 1. 1. 0. 0. 0. 1. 0. 0. 1. 0. 1. 0. 1. 1. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 0. 0. 1. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 1. 0.] ###Markdown Keras Model Simple multi-layer perceptron ###Code from tensorflow.keras.layers import Dense model = tf.keras.Sequential() model.add(Dense(units=50, input_dim=10000, activation='sigmoid')) model.add(Dense(units=1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.summary() ###Output _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 50) 500050 _________________________________________________________________ dense_1 (Dense) (None, 1) 51 ================================================================= Total params: 500,101 Trainable params: 500,101 Non-trainable params: 0 _________________________________________________________________ ###Markdown Keras only logs metrics every epoch, to get more datapoints we implement our own callback. ###Code class Callback(tf.keras.callbacks.Callback): def on_train_begin(self, logs={}): self.losses = [] self.accs = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) self.accs.append(logs.get('acc')) ###Output _____no_output_____ ###Markdown Train model ###Code cback = Callback() history = model.fit(x=x_train, y=y_train, batch_size=250, epochs=2, callbacks=[cback]) ###Output Epoch 1/2 25000/25000 [==============================] - 2s 97us/step - loss: 0.4983 - acc: 0.7976 Epoch 2/2 25000/25000 [==============================] - 2s 76us/step - loss: 0.2991 - acc: 0.9028 ###Markdown Final results ###Code loss, acc = model.evaluate(x_train, y_train, verbose=0) print(f'Accuracy on train set: {acc:.2f}') loss, acc = model.evaluate(x_test, y_test, verbose=0) print(f'Accuracy on test set: {acc:.2f}') ###Output Accuracy on train set: 0.93 Accuracy on test set: 0.89 ###Markdown Plot loss and accuracy during training period ###Code plt.plot(cback.losses, label='loss') plt.plot(cback.accs, label='acc', color='red') plt.legend(); ###Output _____no_output_____
Python/04 Matplotlib/02.02 Ajustando linhas - Cores e tracejados.ipynb
###Markdown **Ajustando as cores das linhas** ###Code x = np.linspace(0, 10, 1000) plt.plot(x, np.sin(x - 0), color='blue') # specify color by name plt.plot(x, np.sin(x - 1), color='g') # short color code (rgbcmyk) plt.plot(x, np.sin(x - 2), color='0.75') # Grayscale between 0 and 1 plt.plot(x, np.sin(x - 3), color='#FFDD44') # Hex code (RRGGBB from 00 to FF) plt.plot(x, np.sin(x - 4), color=(1.0,0.2,0.3)) # RGB tuple, values 0 and 1 plt.plot(x, np.sin(x - 5), color='chartreuse'); # all HTML color names supported ###Output _____no_output_____ ###Markdown **Ajustando o tracejado das linhas** ###Code plt.plot(x, x + 0, linestyle='solid') plt.plot(x, x + 1, linestyle='dashed') plt.plot(x, x + 6, linestyle='-.') # dashdot plt.plot(x, x + 7, linestyle=':'); # dotted ###Output _____no_output_____ ###Markdown **Ajustando cor e tracejado das linhas** ###Code plt.plot(x, x + 0, '-g') # solid green plt.plot(x, x + 1, '--c') # dashed cyan plt.plot(x, x + 2, '-.k') # dashdot black plt.plot(x, x + 3, ':r'); # dotted red ###Output _____no_output_____
rosalind_workbook/genome_assembly.ipynb
###Markdown Genome AssemblyThe algorithmic reconstruction of contiguous chromosomes from short fragments of DNA.Rosalind link: [Genome Assembly](http://rosalind.info/problems/topics/genome-assembly/) Import modules ###Code import os import sys from itertools import permutations import numpy as np import pandas as pd from Bio.Seq import Seq from Bio import SeqIO from Bio.Alphabet import generic_rna print('DONE!') ###Output _____no_output_____ ###Markdown Genome Assembly as Shortest SuperstringRosalind link: [Genome Assembly as Shortest Superstring](http://rosalind.info/problems/long/) ###Code # TODO ###Output _____no_output_____ ###Markdown Error Correction in ReadsRosalind link: [Error Correction in Reads](http://rosalind.info/problems/corr/) ###Code # TODO ###Output _____no_output_____ ###Markdown Constructing a De Bruijn GraphRosalind link: [Constructing a De Bruijn Graph](http://rosalind.info/problems/dbru/) ###Code # TODO ###Output _____no_output_____ ###Markdown Genome Assembly with Perfect CoverageRosalind link: [Genome Assembly with Perfect Coverage](http://rosalind.info/problems/pcov/) ###Code # TODO ###Output _____no_output_____ ###Markdown Genome Assembly Using ReadsRosalind link: [Genome Assembly Using Reads](http://rosalind.info/problems/gasm/) ###Code # TODO ###Output _____no_output_____ ###Markdown Assessing Assembly Quality with N50 and N75Rosalind link: [Assessing Assembly Quality with N50 and N75](http://rosalind.info/problems/asmq/) ###Code # TODO ###Output _____no_output_____ ###Markdown Genome Assembly with Perfect Coverage and RepeatsRosalind link: [Genome Assembly with Perfect Coverage and Repeats](http://rosalind.info/problems/grep/) ###Code # TODO ###Output _____no_output_____
notebooks/1.1-model-exploration-deep-learning.ipynb
###Markdown Experiment 1.1 Explore models and approaches for deep learningFinetune models on left leg amplitude prediction with lying videos as input.For now this is just to quickly test the settings that Shankara got out of previous experiments, i.e.:* Cutoff first 50 frames* Interpolate when likelihood is below 0.7* Use opposite bodypart if all likelihood is below 0.7 for a bodypart* Use a standard scaler to scale We don't really know whether this is the best set of hyperparameters. We can further experiment with the following hyperparameters:* Use a StandardScaler* Include likelihood* Use different model architectures* Use interpolation* write one function that takes in parameters ###Code import numpy as np import pandas as pd from sklearn.model_selection import GroupShuffleSplit from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import StandardScaler import tensorflow.keras as keras from src.helpers import read_scores from src.data_generators import RawDataGenerator from src.data_selection import MultipleScoreSelector from src.ai_func import cross_validation_generator from src.settings import LYING_VIDEOS_DATA_FOLDER, SITTING_VIDEOS_DATA_FOLDER, DATA_FOLDER from keras.models import Sequential from keras.models import Model from keras.layers import Input from keras.layers import Dense from keras.layers import Flatten from keras.layers import Dropout from keras.layers import LSTM from keras.layers import Lambda from keras.layers.convolutional import Conv1D from keras.layers.convolutional import MaxPooling1D from keras.layers.advanced_activations import LeakyReLU %load_ext autoreload %autoreload 2 scores_df = read_scores(DATA_FOLDER / 'data_clinical_scoring.xlsx') ###Output _____no_output_____ ###Markdown Definitions ###Code SCORES_TO_USE = ['D_LLP_R_tA_pscore'] SCORER_TO_USE = 1 SCORERS_TO_USE = [1, 2, 3] n_outputs = len(SCORES_TO_USE) ###Output _____no_output_____ ###Markdown Pipeline for training a deep neural network Define model architecture (here: simple CNN) ###Code def get_model(n_timesteps, n_features, n_outputs): model = Sequential() model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features))) model.add(Conv1D(filters=64, kernel_size=3, activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Conv1D(filters=32, kernel_size=5, activation='relu', input_shape=(n_timesteps,n_features))) model.add(Conv1D(filters=32, kernel_size=5, activation='relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(100)) model.add(Dense(n_outputs)) return model def get_model_old(): # simple CNN input_layer = keras.layers.Input(shape=(n_timesteps,n_features)) norm_layer = keras.layers.BatchNormalization()(input_layer) cnn_layer = keras.layers.Conv1D(filters=32, kernel_size=3, activation='relu')(norm_layer) cnn_layer = keras.layers.Conv1D(filters=32, kernel_size=3, activation='relu')(cnn_layer) cnn_layer = keras.layers.MaxPooling1D(pool_size=2)(cnn_layer) cnn_layer = keras.layers.Dropout(0.7)(cnn_layer) cnn_layer = keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu')(cnn_layer) cnn_layer = keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu')(cnn_layer) cnn_layer = keras.layers.MaxPooling1D(pool_size=2)(cnn_layer) cnn_layer = keras.layers.Dropout(0.7)(cnn_layer) cnn_layer = keras.layers.Flatten()(cnn_layer) cnn_layer = keras.layers.Dense(100)(cnn_layer) output_layer = keras.layers.Dense(n_outputs)(cnn_layer) return keras.Model(inputs=input_layer, outputs=output_layer) ###Output _____no_output_____ ###Markdown Train network ###Code def train_model(train_generator, n_timesteps, n_features, n_outputs): model = get_model(n_timesteps, n_features, n_outputs) model.compile(loss='mae', optimizer=keras.optimizers.Adam()) model.fit(train_generator, epochs=30) return model def train_cross_val(cross_val): y_pred = [] y_test = [] splits = [] for i_split, (train_scores, test_scores) in enumerate(cross_val): print(f'Fitting for 5-fold split {i_split}') train_generator = RawDataGenerator(train_scores, **data_generation_params) scaler = train_generator.get_scaler() data_generation_params['scaler'] = scaler test_generator = RawDataGenerator(test_scores, **data_generation_params) X, y = train_generator.__getitem__(0) n_timesteps, n_features = (X.shape[1], X.shape[2]) model = train_model(train_generator, n_timesteps, n_features, n_outputs) y_pred.append(model.predict(test_generator)) y_test.append(test_scores) splits.extend([i_split]*len(test_scores)) break # This results in training only for one of the 5 folds y_pred = np.vstack(y_pred) y_test = pd.concat(y_test) return y_test, y_pred, splits def evaluate(y_test, y_pred): results = [] for i_score, column in enumerate(y_test): mae = mean_absolute_error(y_test.iloc[:, i_score], y_pred[:, i_score]) results.append({'score': column, 'mae': mae}) return pd.DataFrame(results) ###Output _____no_output_____ ###Markdown Test all combinations ###Code results_df = pd.DataFrame(columns=['scorer', 'scores', 'split', 'y', 'pred_y']) for scorer in SCORERS_TO_USE: for cutoff in [0,50]: for threshold in [0, 0.7]: for scaler in [None, StandardScaler()]: for bodyparts in [None, ['ankle1', 'knee1', 'hip1','ankle2', 'knee2', 'hip2']]: data_generation_params = { 'videos_folder': LYING_VIDEOS_DATA_FOLDER, 'cutoff': cutoff, 'interpolation_threshold': threshold, 'batch_size': 1, 'scaler': scaler, 'bodyparts': bodyparts } print('params:',data_generation_params) print(f'Training model for scorer {scorer}') selector = MultipleScoreSelector(scores_to_use=SCORES_TO_USE, scorer_to_use=scorer) selected_data = selector.transform(scores_df) cross_val = cross_validation_generator(selected_data) y_test, y_pred, splits = train_cross_val(cross_val) for y_item, y_pred_item, split in zip(y_test.values, y_pred, splits): results_df = results_df.append({'scorer':scorer, 'scores': ', '.join(SCORES_TO_USE), 'split': split, 'y':y_item[0], 'pred_y': y_pred_item[0], 'videos_folder': data_generation_params['videos_folder'], 'cutoff': data_generation_params['cutoff'], 'interpolation_threshold': data_generation_params['interpolation_threshold'], 'batch_size': data_generation_params['batch_size'], 'scaler': data_generation_params['scaler'], 'bodyparts': data_generation_params['bodyparts']}, ignore_index=True) results_df['MAE'] = abs(results_df['y'] - results_df['pred_y']) results_df results_df.to_excel('results/experiment-1.1-results-validation.xlsx', index=False) for queryset in [['scorer == 1', 'scorer == 2', 'scorer == 3'], ['cutoff == 50', 'cutoff == 0'], ['interpolation_threshold==0.7', 'interpolation_threshold==0'], ['scaler!=scaler', 'scaler==scaler'], ['bodyparts!=bodyparts', 'bodyparts==bodyparts']]: for query in queryset: print(query) print('Mean:',results_df.query(query)['MAE'].mean()) print('Median:',results_df.query(query)['MAE'].median()) print() for scorer in ['scorer == 1', 'scorer == 2', 'scorer == 3']: for cutoff in ['cutoff == 50', 'cutoff == 0']: for interpolation in ['interpolation_threshold==0.7', 'interpolation_threshold==0']: for scaler in ['scaler!=scaler', 'scaler==scaler']: for bodyparts in ['bodyparts!=bodyparts', 'bodyparts==bodyparts']: print(' and '.join([scorer, cutoff, interpolation, scaler, bodyparts])) print('Mean:', results_df.query(' and '.join([scorer, cutoff, interpolation, scaler, bodyparts]))['MAE'].mean()) print('Median:', results_df.query(' and '.join([scorer, cutoff, interpolation, scaler, bodyparts]))['MAE'].median()) print() results_df.query('cutoff == 50 and scorer==1 and interpolation_threshold==0.7 and scaler!=scaler and bodyparts!=bodyparts')['MAE'].mean() ###Output _____no_output_____ ###Markdown Run Once ###Code data_generation_params = { 'videos_folder': LYING_VIDEOS_DATA_FOLDER, 'cutoff': 50, 'interpolation_threshold': 0.7, 'batch_size': 1, 'scaler': None, 'bodyparts': None } print(f'Training model for scorer {SCORER_TO_USE}') selector = MultipleScoreSelector(scores_to_use=SCORES_TO_USE, scorer_to_use=SCORER_TO_USE) selected_data = selector.transform(scores_df) cross_val = cross_validation_generator(selected_data) y_test, y_pred, splits = train_cross_val(cross_val) print(evaluate(y_test, y_pred)) ###Output score mae 0 D_LLP_R_tA_pscore 0.101455
intro_to_chemvae.ipynb
###Markdown Load librariesлрлор ###Code # tensorflow backend from os import environ environ['KERAS_BACKEND'] = 'tensorflow' # vae stuff from chemvae.vae_utils import VAEUtils from chemvae import mol_utils as mu # import scientific py import numpy as np import pandas as pd # rdkit stuff from rdkit.Chem import AllChem as Chem from rdkit.Chem import PandasTools # plotting stuff import matplotlib.pyplot as plt import matplotlib as mpl from IPython.display import SVG, display %config InlineBackend.figure_format = 'retina' %matplotlib inline ###Output Using TensorFlow backend. /Users/aleksandrbutenko/opt/anaconda3/envs/chemvae/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_qint8 = np.dtype([("qint8", np.int8, 1)]) /Users/aleksandrbutenko/opt/anaconda3/envs/chemvae/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) /Users/aleksandrbutenko/opt/anaconda3/envs/chemvae/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_qint16 = np.dtype([("qint16", np.int16, 1)]) /Users/aleksandrbutenko/opt/anaconda3/envs/chemvae/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:474: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_quint16 = np.dtype([("quint16", np.uint16, 1)]) /Users/aleksandrbutenko/opt/anaconda3/envs/chemvae/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:475: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. _np_qint32 = np.dtype([("qint32", np.int32, 1)]) ###Markdown Load a model ###Code vae = VAEUtils(directory='models/zinc_properties') ###Output /Users/aleksandrbutenko/opt/anaconda3/envs/chemvae/lib/python3.6/site-packages/keras/models.py:245: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually. warnings.warn('No training configuration found in save file: ' ###Markdown Using the VAE Decode/Encode Might not be perfect (it's probabilistic), try it several times.smiles x z x_r smiles_r ###Code smiles_1 = mu.canon_smiles('CSCC(=O)NNC(=O)c1c(C)oc(C)c1C') # smiles_1 = mu.canon_smiles('Cc1ccc(S2(=O)=NC(=O)Nc3ccccc32)cc1') # smiles_1 = mu.canon_smiles('CN(Cc1ccc2c(c1)C(=O)CC2)C(=O)OC(C)(C)C') # smiles_1 = mu.canon_smiles('COC(=O)C1CCC(Oc2ccc(NC(=O)C(=O)NN)cn2)CC1') X_1 = vae.smiles_to_hot(smiles_1,canonize_smiles=True) z_1 = vae.encode(X_1) X_r= vae.decode(z_1) print(X_1.shape) print('{:20s} : {}'.format('Input',smiles_1)) print('{:20s} : {}'.format('Reconstruction',vae.hot_to_smiles(X_r,strip=True)[0])) print('{:20s} : {} with norm {:.3f}'.format('Z representation',z_1.shape, np.linalg.norm(z_1))) ###Output (1, 120, 35) Input : CSCC(=O)NNC(=O)c1c(C)oc(C)c1C Reconstruction : CSCC(=O)N(C(=O)c1c(C)oc(C)c1C Z representation : (1, 196) with norm 10.678 ###Markdown property preditor ###Code print('Properties (qed,SAS,logP):') y_1 = vae.predict_prop_Z(z_1)[0] print(y_1) ###Output Properties (qed,SAS,logP): [ 0.77286768 2.43317604 0.95585614] ###Markdown Decode several attemptsVAE are probabilistic ###Code noise=5.0 print('Searching molecules randomly sampled from {:.2f} std (z-distance) from the point'.format(noise)) df = vae.z_to_smiles( z_1,decode_attempts=100,noise_norm=noise) print('Found {:d} unique mols, out of {:d}'.format(len(set(df['smiles'])),sum(df['count']))) print('SMILES\n',df.smiles) display(PandasTools.FrameToGridImage(df,column='mol', legendsCol='smiles',molsPerRow=5)) df.head() ###Output Searching molecules randomly sampled from 5.00 std (z-distance) from the point ###Markdown PCA of latent spaceSample random points from the training set along with properties ###Code Z, data, smiles = vae.ls_sampler_w_prop(size=50000,return_smiles=True) prop_opt = 'qed' prop_df = pd.DataFrame(data).reset_index() prop_df['smiles']=smiles prop_df.head() ###Output _____no_output_____ ###Markdown Perform a PCA projection and color the points based on a property ###Code from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler # do pca and normalize Z_pca = PCA(n_components=2).fit_transform(Z) Z_pca = MinMaxScaler().fit_transform(Z_pca) df = pd.DataFrame(np.transpose((Z_pca[:,0],Z_pca[:,1]))) df.columns = ['x','y'] df[prop_opt]=prop_df[prop_opt] plt.scatter(x=df['x'], y=df['y'], c=df[prop_opt], cmap= 'viridis', marker='.', s=10,alpha=0.5, edgecolors='none') plt.show() ###Output _____no_output_____ ###Markdown compare with t-SNE, will take some time ###Code from sklearn.manifold import TSNE Z_tsne = TSNE(n_components=2).fit_transform(Z) Z_tsne = MinMaxScaler().fit_transform(Z_tsne) f = pd.DataFrame(np.transpose((Z_tsne[:,0],Z_tsne[:,1]))) df.columns = ['x','y'] df[prop_opt]=prop_df[prop_opt] plt.scatter(x=df['x'], y=df['y'], c=df[prop_opt], cmap= 'viridis', marker='.', s=10,alpha=0.5, edgecolors='none') plt.show() ###Output _____no_output_____
data structures/pprint.ipynb
###Markdown pprint - a module for printing complex data structures that are readable to humans! ###Code sample_data = [ (1, {'a': 'A', 'b': 'B', 'c': 'C', 'd': 'D'}), (2, {'e': 'E', 'f': 'F', 'g': 'G', 'h': 'H', 'i': 'I', 'j': 'J', 'k': 'K', 'l': 'L'}), (3, ['m', 'n']), (4, ['o', 'p', 'q']), (5, ['r', 's', 't''u', 'v', 'x', 'y', 'z']), (6, ['1', '2', '3', '4', '5', '6', '7', '8', '9']), (7, ['!', '@', '#', '$', '%', '^', '&', '*']), ] import pprint print('PRINT:') print(sample_data) print('\nPPRINT') pprint.pprint(sample_data) ###Output PRINT: [(1, {'a': 'A', 'b': 'B', 'c': 'C', 'd': 'D'}), (2, {'e': 'E', 'f': 'F', 'g': 'G', 'h': 'H', 'i': 'I', 'j': 'J', 'k': 'K', 'l': 'L'}), (3, ['m', 'n']), (4, ['o', 'p', 'q']), (5, ['r', 's', 'tu', 'v', 'x', 'y', 'z']), (6, ['1', '2', '3', '4', '5', '6', '7', '8', '9']), (7, ['!', '@', '#', '$', '%', '^', '&', '*'])] PPRINT [(1, {'a': 'A', 'b': 'B', 'c': 'C', 'd': 'D'}), (2, {'e': 'E', 'f': 'F', 'g': 'G', 'h': 'H', 'i': 'I', 'j': 'J', 'k': 'K', 'l': 'L'}), (3, ['m', 'n']), (4, ['o', 'p', 'q']), (5, ['r', 's', 'tu', 'v', 'x', 'y', 'z']), (6, ['1', '2', '3', '4', '5', '6', '7', '8', '9']), (7, ['!', '@', '#', '$', '%', '^', '&', '*'])] ###Markdown Limiting nested outputs ###Code print('Single depth:') pprint.pprint(sample_data, depth=1) print() print('Double depth:') pprint.pprint(sample_data, depth=2) print() print('Triple depth:') pprint.pprint(sample_data, depth=3) ###Output Single depth: [(...), (...), (...), (...), (...), (...), (...)] Double depth: [(1, {...}), (2, {...}), (3, [...]), (4, [...]), (5, [...]), (6, [...]), (7, [...])] Triple depth: [(1, {'a': 'A', 'b': 'B', 'c': 'C', 'd': 'D'}), (2, {'e': 'E', 'f': 'F', 'g': 'G', 'h': 'H', 'i': 'I', 'j': 'J', 'k': 'K', 'l': 'L'}), (3, ['m', 'n']), (4, ['o', 'p', 'q']), (5, ['r', 's', 'tu', 'v', 'x', 'y', 'z']), (6, ['1', '2', '3', '4', '5', '6', '7', '8', '9']), (7, ['!', '@', '#', '$', '%', '^', '&', '*'])] ###Markdown Compact flag - fitting more data on each line ###Code print('Without using compact flag:') pprint.pprint(sample_data, compact=False) print() print('With using compact flag:') pprint.pprint(sample_data, compact=True) ###Output Without using compact flag: [(1, {'a': 'A', 'b': 'B', 'c': 'C', 'd': 'D'}), (2, {'e': 'E', 'f': 'F', 'g': 'G', 'h': 'H', 'i': 'I', 'j': 'J', 'k': 'K', 'l': 'L'}), (3, ['m', 'n']), (4, ['o', 'p', 'q']), (5, ['r', 's', 'tu', 'v', 'x', 'y', 'z']), (6, ['1', '2', '3', '4', '5', '6', '7', '8', '9']), (7, ['!', '@', '#', '$', '%', '^', '&', '*'])] With using compact flag: [(1, {'a': 'A', 'b': 'B', 'c': 'C', 'd': 'D'}), (2, {'e': 'E', 'f': 'F', 'g': 'G', 'h': 'H', 'i': 'I', 'j': 'J', 'k': 'K', 'l': 'L'}), (3, ['m', 'n']), (4, ['o', 'p', 'q']), (5, ['r', 's', 'tu', 'v', 'x', 'y', 'z']), (6, ['1', '2', '3', '4', '5', '6', '7', '8', '9']), (7, ['!', '@', '#', '$', '%', '^', '&', '*'])]
Regression/Simple_linear_regression.ipynb
###Markdown Simple Linear Regression Importing the libraries ###Code import numpy as np import matplotlib.pyplot as plt import pandas as pd ###Output _____no_output_____ ###Markdown Importing the dataset ###Code dataset = pd.read_csv('Salary_Data.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values ###Output _____no_output_____ ###Markdown X takes all values except the last one, which goes into y. Splitting the dataset into the Training set and Test set ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0) ###Output _____no_output_____ ###Markdown The test size is takenn to be 1/3rd, but can be varied based on preferences. Training the Simple Linear Regression model on the Training set ###Code from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Once instantiated, you fit the data using the instance that is created. Predicting the Test set results ###Code y_pred = regressor.predict(X_test) ###Output _____no_output_____ ###Markdown Visualising the Training set results ###Code plt.scatter(X_train, y_train, color = 'red') plt.plot(X_train, regressor.predict(X_train), color = 'blue') plt.title('Salary vs Experience (Training set)') plt.xlabel('Years of Experience') plt.ylabel('Salary') plt.show() ###Output _____no_output_____ ###Markdown Visualising the Test set results ###Code plt.scatter(X_test, y_test, color = 'red') plt.plot(X_train, regressor.predict(X_train), color = 'blue') plt.title('Salary vs Experience (Test set)') plt.xlabel('Years of Experience') plt.ylabel('Salary') plt.show() ###Output _____no_output_____
1 Big data essentials/Homeworks/Week 3/4 Word Groups.ipynb
###Markdown Assignment 4: Word GroupsCalculate statistics for groups of words which are equal up to permutations of letters. For example, ‘emit’, ‘item’ and ‘time’ are the same words up to a permutation of letters. Determine such groups of words and sum all their counts. Apply stop words filter. Filter out groups that consist of only one word.Output: count of occurrences for the group of words, number of unique words in the group, comma-separated list of the words in the group in lexicographical order:```sum group size word1,word2,...```Example: assume ‘emit’ occurred 3 times, 'item' -- 2 times, 'time' -- 5 times; 3 + 2 + 5 = 10, group contains 3 words, so for this group result is:```10 3 emit,item,time```The result of the task is the output line with word ‘english’.***NB:*** *Do not forget about the lexicographical order of words in the group: 'emit,item,time' is OK, 'emit,time,item' is not.* ###Code %%writefile mapper.py import sys import re reload(sys) sys.setdefaultencoding('utf-8') # required to convert to unicode with open('stop_words_en.txt') as f: stop_words = set(f.read().split()) for line in sys.stdin: try: article_id, text = unicode(line.strip()).split('\t', 1) except ValueError as e: continue words = re.split("\W*\s+\W*", text, flags=re.UNICODE) for word in words: word = word.lower() if word in stop_words: continue word_sorted = ''.join(sorted(word)) print "%s\t%d\t%s" % (word_sorted, 1, word) %%writefile reducer.py import sys import re reload(sys) sys.setdefaultencoding('utf-8') # required to convert to unicode current_key = None current_cnt = 0 words_set = set() for line in sys.stdin: try: key, cnt, word = unicode(line.strip()).split('\t') cnt = int(cnt) except ValueError as e: continue if current_key != key: if current_key and (len(words_set) > 1): print "%d\t%d\t%s" % (current_cnt, len(words_set), ','.join(sorted(words_set))) current_key = key words_set = set() words_set.add(word) current_cnt = cnt else: words_set.add(word) current_cnt += cnt print "%d\t%d\t%s" % (current_cnt, len(words_set), ','.join(sorted(words_set))) %%bash OUT_DIR="word_groups" NUM_REDUCERS=8 hdfs dfs -rm -r -skipTrash ${OUT_DIR} > /dev/null yarn jar /opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar \ -D mapred.jab.name="Streaming word groups" \ -D mapreduce.job.reduces=${NUM_REDUCERS} \ -files mapper.py,reducer.py,/datasets/stop_words_en.txt \ -mapper "python2 mapper.py" \ -reducer "python2 reducer.py" \ -input /data/wiki/en_articles_part \ -output ${OUT_DIR} > /dev/null hdfs dfs -cat word_groups/* | grep -P '(,|\t)english($|,)' ###Output 7820 5 english,helsing,hesling,shengli,shingle ###Markdown Assignment 4: Word GroupsCalculate statistics for groups of words which are equal up to permutations of letters. For example, ‘emit’, ‘item’ and ‘time’ are the same words up to a permutation of letters. Determine such groups of words and sum all their counts. Apply stop words filter. Filter out groups that consist of only one word.Output: count of occurrences for the group of words, number of unique words in the group, comma-separated list of the words in the group in lexicographical order:```sum group size word1,word2,...```Example: assume ‘emit’ occurred 3 times, 'item' -- 2 times, 'time' -- 5 times; 3 + 2 + 5 = 10, group contains 3 words, so for this group result is:```10 3 emit,item,time```The result of the task is the output line with word ‘english’.***NB:*** *Do not forget about the lexicographical order of words in the group: 'emit,item,time' is OK, 'emit,time,item' is not.* ###Code %%writefile mapper.py import sys import re reload(sys) sys.setdefaultencoding('utf-8') # required to convert to unicode with open('stop_words_en.txt') as f: stop_words = set(f.read().split()) for line in sys.stdin: try: article_id, text = unicode(line.strip()).split('\t', 1) except ValueError as e: continue words = re.split("\W*\s+\W*", text, flags=re.UNICODE) for word in words: word = word.lower() if word in stop_words: continue word_sorted = ''.join(sorted(word)) print "%s\t%d\t%s" % (word_sorted, 1, word) %%writefile reducer.py import sys import re reload(sys) sys.setdefaultencoding('utf-8') # required to convert to unicode current_key = None current_cnt = 0 words_set = set() for line in sys.stdin: try: key, cnt, word = unicode(line.strip()).split('\t') cnt = int(cnt) except ValueError as e: continue if current_key != key: if current_key and (len(words_set) > 1): print "%d\t%d\t%s" % (current_cnt, len(words_set), ','.join(sorted(words_set))) current_key = key words_set = set() words_set.add(word) current_cnt = cnt else: words_set.add(word) current_cnt += cnt print "%d\t%d\t%s" % (current_cnt, len(words_set), ','.join(sorted(words_set))) %%bash OUT_DIR="word_groups" NUM_REDUCERS=8 hdfs dfs -rm -r -skipTrash ${OUT_DIR} > /dev/null yarn jar /opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar \ -D mapred.jab.name="Streaming word groups" \ -D mapreduce.job.reduces=${NUM_REDUCERS} \ -files mapper.py,reducer.py,/datasets/stop_words_en.txt \ -mapper "python mapper.py" \ -reducer "python reducer.py" \ -input /data/wiki/en_articles_part \ -output ${OUT_DIR} > /dev/null hdfs dfs -cat word_groups/* | grep -P '(,|\t)english($|,)' ###Output 7820 5 english,helsing,hesling,shengli,shingle
notebooks/vaccine_clinic_model4.ipynb
###Markdown Improving our initial vaccine clinic modelNow that we have a rough first model working, let's think about some possible improvements. There are many as we've taken numerous shortcuts to get this model working. In particular, we'll take on these three improvements:* Specifying the global sim inputs through a command line interface (CLI)* Getting rid of hard coded processing time distributions* Automating the post-processing of simulation outputs to create summary statistics. Redesign for easier use Hard coded input parametersThe current version of the model has many hard coded input parameters, including:* patient arrival rate,* percent of patients requiring 2nd dose,* capacity of the various resources (e.g. vaccinators),* processing time distributions for the various stages of the vaccination process,* length of time patient needs to remain in observation after being vaccinated (i.e. the 15 minutes),* length of time to run the simulation model.This makes it cumbersome to try out different scenarios relating to patient volume and resource capacity. Better post-processingWe should make it easy to specify that some post-processing should just happen automatically. In general, we want better control over post-processing and think through how we want to create and store output files. Moving code from a Jupyter notebook to a Python scriptAs our code grows, working inside a Jupyter notebook becomes less practical. It's fine for testing out little code snippets, but the production code should be in a `.py` file and we should use an IDE for further development. Adding a command line interfaceBy moving the code to a `.py` file, it will also be easier to add and use a simple command line interface. This will be useful for automating the process of running numerous scenarios. Creating a CLI and using config filesThese two enhancements are actually related and we'll address them together. Instead of hard coding input parameter values into our code, we could store them in a simple configuration file. Before getting into configuration files, we need to step back and review passing command line arguments to Python scripts. Basic handling of command line arguments (learning about `argv`)Back in the pcda class, we used some materials from the [Software Carpentry Python Programming lesson](). There were several parts that we left as optional, including the last part on [Command Line Programs](https://swcarpentry.github.io/python-novice-inflammation/12-cmdline/index.html). I highly encourage you to review that first if you've never worked with command line arguments in Python.**Bottom line:** `sys.argv` is a **list** of command line arguments passed to a Python program when running the program. Let's create a small program that takes a few command line arguments and just repeats them back when the program is run. Here's what the program looks like. I've saved it as `echo_args.py`. We'll interpret the command line arguments as follows:1. mean interarrival times of patients2. number of greeters3. number of registration staff4. number of vaccinators5. number of schedulersWe call these *positional arguments* in that our program will infer the meaning of each passed in argument from its position on the command line. ###Code # echo_args.py import sys def main(): print(f"Command line args: {sys.argv}\n") for i, arg in enumerate(sys.argv): print(f"sys.argv[{i}]: {arg}") if __name__ == '__main__': main() !python ../src/vaccine_clinic/echo_args.py 3.0 2 4 15 4 ###Output Command line args: ['../src/vaccine_clinic/echo_args.py', '3.0', '2', '4', '15', '4'] sys.argv[0]: ../src/vaccine_clinic/echo_args.py sys.argv[1]: 3.0 sys.argv[2]: 2 sys.argv[3]: 4 sys.argv[4]: 15 sys.argv[5]: 4 ###Markdown So, `sys.argv` is a list of the command line arguments passed in to `echo_args.py`. Note that `sys.argv[0]` is just the name of the program itself, including the relative path from this working directory to the program file. Also notice the use of `enumerate` with `sys.argv` which allows us to get both the index and argument value from the `sys.argv` list - no need to make our own index loop.At this point, our program doesn't do any checking of the presence or validity of the input arguments. It just prints back out whatever values we input on the command line. If all of these arguments were required, we could add a check like this: ###Code # echo_args.py import sys def main(): print(f"Command line args: {sys.argv}\n") if len(sys.argv) != 6: print(f"Five args required, only {len(sys.argv) - 1} specified.") exit(1) for i, arg in enumerate(sys.argv): print(f"sys.argv[{i}]: {arg}") if __name__ == '__main__': main() !python ../src/vaccine_clinic/echo_args.py 3.0 2 4 ###Output Command line args: ['../src/vaccine_clinic/echo_args.py', '3.0', '2', '4'] Five positional args required, only 3 specified. ###Markdown What's up with the `__name__` thing? ###Code __name__ ###Output _____no_output_____ ###Markdown If you've forgotten what the purpose of the following is:```pythonif __name__ == '__main__':```then, make sure you review the [Command Line Programs](https://swcarpentry.github.io/python-novice-inflammation/12-cmdline/index.html) mentioned above. In a nutshell, when a Python program is run (as opposed to being imported), the special Python variable `__name__` is equal to `'__main__'`. When a Python program is imported, `__name__` is equal to the name of the module. So, the code snippet above is often included so that a Python program can be both run and imported. The example above is highly simplified and the world of command line arguments and command line processing, known as *argument parsing*, is much richer than this. Not only do we have *arguments* such as in this example, we also might have *options* (also called *flags*). In the pcda class we saw this when using things like the `ls` command:```ls -l -a```We might want to define options for our Python program. These options might appear in any order, if at all. Furthermore,by convention there are short form flags that start with a single `-` such as `-a` in the example above, and long form options that start with `--` such as `--version`. Often we can use either a short or long form option such as `-h` or `--help`. Before checking out argument parsing tools, let's do it ourselves for the following simple case. Let's assume we just want the following command line options. They can be in any order but each must be followed by a numeric value for that option. So,`argv[i]` will be one of the following for odd values of `i` and `argv[i + 1]` will be the corresponding value.* `--iat` : mean patient interarrival time* `--greet` : number of greeters* `--reg` : number of registration clerks* `--vacc` : number of vaccinators* `--sched` : number of schedulersThe following is just a code snippet to illustrate how one might get the input values using standard Python. Once we have the input values, we could pass them on to other parts of our simulation model. Notice that this code doesn't do any input validity checking other than checking for invalid option names. The user input values are stored in a dictionary and this code just prints out that dictionary. ###Code # get_option_values.py import sys def main(): input_params = {'mean_interarrival_time': 0.0, 'num_greeters': 0, 'num_reg_staff': 0, 'num_vaccinators': 0, 'num_schedulers': 0} for i, arg in enumerate(sys.argv): if arg.startswith('--') and i % 2 > 0: if sys.argv[i] == '--iat': input_params['mean_interarrival_time'] = sys.argv[i + 1] elif sys.argv[i] == '--greet': input_params['num_greeters'] = sys.argv[i + 1] elif sys.argv[i] == '--reg': input_params['num_reg_staff'] = sys.argv[i + 1] elif sys.argv[i] == '--vacc': input_params['num_vaccinators'] = sys.argv[i + 1] elif sys.argv[i] == '--sched': input_params['num_schedulers'] = sys.argv[i + 1] else: print(f"Unrecognized argument: {sys.argv[i]}") print(input_params) if __name__ == '__main__': main() !python ../src/vaccine_clinic/get_option_values.py --iat 3.0 --greet 2 --reg 4 --vacc 15 --sched 4 ###Output {'mean_interarrival_time': '3.0', 'num_greeters': '2', 'num_reg_staff': '4', 'num_vaccinators': '15', 'num_schedulers': '4'} ###Markdown Here we change the order and include one bad option. ###Code !python ../src/vaccine_clinic/get_option_values.py --vacc 15 --iat 3.0 --greet 2 --reg 4 --schedulers 4 ###Output Unrecognized argument: --schedulers {'mean_interarrival_time': '3.0', 'num_greeters': '2', 'num_reg_staff': '4', 'num_vaccinators': '15', 'num_schedulers': 0} ###Markdown Learning more about command line argumentsIf you are interested in a deeper dive into command line arguments, check out this tutorial done by the folks at Real Python:* [Python Command Line Arguments](https://realpython.com/python-command-line-arguments/) Tools for argument parsing and building CLI's. Clearly, it is going to be (potentially) difficult to deal with all the complexity of command line *argument parsing* manually by checking all the values in the `sys.argv` list. Thankfully, there are numerous tools for doing command line argument parsing and helping us create command line interfaces for our Python programs. Some of these tools include:* [argparse](https://docs.python.org/3/library/argparse.html) - part of the Python standard library* [click](https://click.palletsprojects.com/en/7.x/) - a popular library for CLIs that uses [function decorators]()* [fire](https://github.com/google/python-fire) - a newer CLI toolFor our simulation model, we'll use `argparse` to build our CLI since it's built in to Python and is a good thing to learn if you are new to creating CLIs. It's plenty powerful enough for our simple application. Some learning resources for `argparse` include:* [A "gentle" argparse tutorial](https://docs.python.org/3/howto/argparse.htmlid1)* https://docs.python.org/3/library/argparse.html - the official docs* https://realpython.com/command-line-interfaces-python-argparse/ - tutorial from Real Python (different that the one on Command Line Arguments mentioned above - this one focuses on argparse) Creating a CLI with argparseAs discussed in the tutorials above, there are four main steps to creating a CLI with argparse.1. Import the **argparse** library2. Create a **parser object**3. **Add arguments** of the desired types to the parser4. Use it by **calling the `arg_parse` method** of the parser objectAfter calling `arg_parse` you'll get what is known as a [Namespace object](https://docs.python.org/dev/library/argparse.htmlargparse.Namespace) with attributes corresponding to the input arguments.I rewrote the `echo_args.py` example using argparse. You can find it in `echo_args_argparse.py`. It just prints out `args` (the Namespace object) and `vars(args)` which gives a dictionary version of the arguments and their values. To access in an individual item, say 'iat', in `args`, you just use `args.iat`. ###Code !python ../src/vaccine_clinic/echo_args_argparse.py --iat 3.0 --greet 2 --reg 4 --vacc 15 --sched 4 ###Output args.iat: 3.0 args: Namespace(greet=2, iat=3.0, reg=4, sched=4, vacc=15) vars(args): {'iat': 3.0, 'greet': 2, 'reg': 4, 'vacc': 15, 'sched': 4}
Data Structures and Algorithms in Python/ch04-Recursion.ipynb
###Markdown 4.7 ExercisesFor help with exercises, please visit the [site](http://www.wiley.com/college/goodrich). Reinforcement __R-4.1__ Describe a recursive algorithm for finding the maximum element in a sequence, S, of n elements. What is your running time and space usage? ###Code def recursive_max(arr, maximum=None): if len(arr) == 0: return maximum if maximum == None: return recursive_max(arr[1:], maximum=arr[0]) else: new_max = maximum if maximum > arr[0] else arr[0] return recursive_max(arr[1:], maximum=new_max) assert recursive_max(arr=[100,20,34,33,12,78,43,3,200]) == 200 ###Output _____no_output_____ ###Markdown The time complaxity is $O(n)$. Space complexity is the same as different segments of the array are not regenerated, but accessed by the function. __R-4.2__ Draw the recursion trace for the computation of power(2, 5), using thetraditional function implemented in Code Fragment 4.11. ###Code def power(n, m, trace=False): if m == 0: return 1 if trace: print(f"power({n},{m-1})") return n * power(n, m-1, trace=trace) power(2,5, trace=True) ###Output power(2,4) power(2,3) power(2,2) power(2,1) power(2,0) ###Markdown __R-4.3__ Draw the recursion trace for the computation of power(2, 18), using the repeated squaring algorithm, as implemented in Code Fragment 4.12. ###Code power(2,18,trace=True) ###Output power(2,17) power(2,16) power(2,15) power(2,14) power(2,13) power(2,12) power(2,11) power(2,10) power(2,9) power(2,8) power(2,7) power(2,6) power(2,5) power(2,4) power(2,3) power(2,2) power(2,1) power(2,0) ###Markdown Creativity__C-4.9__ Write a short recursive Python function that finds the minimum and maximum values in a sequence without using any loops. ###Code def recursive_min_max(arr, minimum=None, maximum=None): if len(arr) == 0: return minimum, maximum if maximum == None and minimum == None: return recursive_min_max(arr[1:], minimum=arr[0], maximum=arr[0]) else: new_max = maximum if maximum > arr[0] else arr[0] new_min = minimum if minimum < arr[0] else arr[0] return recursive_min_max(arr[1:], minimum=new_min, maximum=new_max) # Testing and Plotting from random import randint import sys from time import time, sleep import matplotlib.pyplot as plt import numpy as np sys.setrecursionlimit(20000) run_times = [] loop = list(range(1000,13001, 1000)) for element in loop: stime = time() recursive_min_max(arr=[randint(1,1e20) for _ in range(int(element))]) run_times.append(time() - stime) #sleep(0.5) plt.plot(loop, run_times) run_dict = {k:v for k,v in zip(loop, run_times)} run_dict ###Output _____no_output_____ ###Markdown __C-4.10__ Describe a recursive algorithm to compute the integer part of the base-twologarithm of n using only addition and integer division. ###Code def int_log(n): if n == 1: return 0 return 1 + int_log(n//2) # Testing import math for i in range(100,15000): assert int_log(i) == int(math.log2(i)) ###Output _____no_output_____ ###Markdown __P-4.23__ Implement a recursive function with signature find(path, filename) that reports all entries of the file system rooted at the given path having the given file name. ###Code import os def find(path, filename): content = os.listdir(path) current = None for f in content: current = os.path.join(path, f) try: if os.path.isfile(os.path.join(path, f)) and f == filename: print(os.path.join(path, filename)) elif os.path.isdir(os.path.join(path, f)): find(os.path.join(path, f), filename) if f == filename: print(os.path.join(path, filename), "/", sep="") except PermissionError: print("PermissionError occurred in processing:", current) except Error: print("Error occurred in processing:", current) ### find(".", "README.md") ###Output ./README.md ###Markdown __P-4.27__ Python’s os module provides a function with signature walk(path) that is a generator yielding the tuple (dirpath, dirnames, filenames) for each subdirectory of the directory identified by string path, such that string dirpath is the full path to the subdirectory, dirnames is a list of the names of the subdirectories within dirpath, and filenames is a list of the names of non-directory entries of dirpath. For example, when visiting the cs016 subdirectory of the file system shown in Figure 4.6, the walk would yield ( /user/rt/courses/cs016 , \[ homeworks , programs \], \[ grades \]). Give your own implementation of such a walk function. ###Code import os def walk(path, topdown=True, onerror=None, followlinks=False): """ Directory tree generator. Mimicking the os.walk() function. For each directory in the directory tree rooted at top (including top itself, but excluding '.' and '..'), yields a 3-tuple dirpath, dirnames, filenames """ output = (path, [], []) # (dirpath, dirnames, filenames) try: for f in os.listdir(path): if os.path.isdir(os.path.join(path, f)): output[1].append(f) else: output[2].append(f) if topdown: yield output for f in output[1]: if followlinks and os.path.islink(os.path.join(path, f)): yield from walk(os.path.join(path, f)) elif not os.path.islink(os.path.join(path, f)): yield from walk(os.path.join(path, f)) if not topdown: yield output except PermissionError as e: #if onerror: print(e) except Exception as e: #if onerror: print(e) # Comparing to the os.walk() for e in walk(".", topdown=True, followlinks=True): print(e) print("****************************") for e in os.walk(".", topdown=True, followlinks=True): print(e) # Using the walk method for path, _, files in walk("/home/fred/Videos/"): for file in files: if file.endswith(".mp4"): print(os.path.join(path, file)) ###Output /home/fred/Videos/Classical_RL.mp4
CLIP_Patch_Detection.ipynb
###Markdown CLIP Patch DetectionThis Colab notebook demos crude object detection by spliting an image into patches and finding the highest patch-caption similarity in CLIP embedding space. ###Code #@title Install dependencies #@markdown Please execute this cell by pressing the _Play_ button #@markdown on the left. #@markdown **Note**: This installs the software on the Colab #@markdown notebook in the cloud and not on your computer. %%capture !pip install ftfy regex tqdm matplotlib !pip install git+https://github.com/openai/CLIP.git import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import math import urllib.request import matplotlib.pyplot as plt import clip from PIL import Image from torchvision import transforms #@title Helper functions #@markdown Some helper functions for loading, patchifying and visualizing images. def load_image(img_path, resize=None, pil=False): image = Image.open(image_path).convert("RGB") if resize is not None: image = image.resize((resize, resize)) if pil: return image return np.asarray(image).astype(np.float32) / 255. def viz_patches(x, figsize=(20, 20), patch_idx=None, topk=None, t=5): # x: num_patches, 3, patch_size, patch_size n = x.shape[0] nrows = int(math.sqrt(n)) fig, axes = plt.subplots(nrows, nrows, figsize=figsize) for i, ax in enumerate(axes.flatten()): im = x[i].permute(1, 2, 0).numpy() im = (im * 255.).round().astype(np.uint8) if patch_idx is not None and i == patch_idx: im[0:t] = (255, 0, 0) im[im.shape[0]-t:] = (255, 0, 0) im[:, 0:t] = (255, 0, 0) im[:, im.shape[1]-t:] = (255, 0, 0) if topk is not None: if i in topk and i != patch_idx: im[0:t] = (255, 255, 0) im[im.shape[0]-t:] = (255, 255, 0) im[:, 0:t] = (255, 255, 0) im[:, im.shape[1]-t:] = (255, 255, 0) ax.imshow(im) ax.axis("off") plt.show() def patchify(image_path, resolution, patch_size, patch_stride=None): img_tensor = transforms.ToTensor()(load_image(image_path, resolution, True)) if patch_stride is None: patch_stride = patch_size patches = img_tensor.unfold( 1, patch_size, patch_stride).unfold(2, patch_size, patch_stride) patches = patches.reshape(3, -1, patch_size, patch_size).permute(1, 0, 2, 3) return patches # N, 3, patch_size, patch_size #@title Image and Patch Settings { run: "auto" } image_url = 'https://images2.minutemediacdn.com/image/upload/c_crop,h_706,w_1256,x_0,y_64/f_auto,q_auto,w_1100/v1554995050/shape/mentalfloss/516438-istock-637689912.jpg' #@param {type:"string"} image_resolution = 900#@param {type:"integer"} patch_size = 224#@param {type:"integer"} # integer_input = 10 #@param {type:"integer"} # integer_slider = 21 #@param {type:"slider", min:0, max:100, step:1} # Download the image from the web. image_path = 'image.png' urllib.request.urlretrieve(image_url, image_path) patches = patchify(image_path, image_resolution, patch_size) print("patches: ", patches.shape) viz_patches(patches, figsize=(8, 8)) #@title Detect clip_model = "RN50" #@param ["RN50", "RN101", "RN50x4", "RN50x16", "ViT-B/32", "ViT-B/16"] image_caption = 'the dog' #@param {type:"string"} topk = 6#@param {type:"integer"} # Load CLIP model. device = "cuda" if torch.cuda.is_available() else "cpu" model, preprocess = clip.load(clip_model, device=device, jit=False) text_input = clip.tokenize([image_caption]).to(device) # Pad in case not equal to model expected input resolution. p = model.visual.input_resolution - patch_size patches_pad = torch.nn.functional.pad( patches, (p//2, p//2, p//2, p//2), "constant", 0).to(device) with torch.no_grad(): patch_embs = model.encode_image(patches_pad) text_embs = model.encode_text(text_input) patch_embs = patch_embs / patch_embs.norm(dim=-1, keepdim=True) text_embs = text_embs / text_embs.norm(dim=-1, keepdim=True) sim = patch_embs @ text_embs.t() idx_max = sim.argmax().item() topk_idxs = torch.topk(sim.flatten(), topk)[-1].cpu().numpy().tolist() viz_patches(patches, figsize=(10, 10), patch_idx=idx_max, topk=topk_idxs, t=int(0.05*patch_size)) ###Output _____no_output_____
Scikit.ipynb
###Markdown Scikit-Learn Decision trees In order to use Machine learning Python library we need to call it first. Also other libraries are going to be used. ###Code # Numerical library import numpy as np # Machine learning tools from sklearn import tree # For plotting import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Regression Adjust to sine wave **STEPS** 1) Random data 2) Select regression model 3) Predict 4) Results ###Code # 1) Create a random dataset # Se llama a un método para generar números aleatorios a partir de una función de distribución rng = np.random.RandomState(1) #print("Random state", rng) #help(np.random.RandomState) # Se generan datos aleatorios # Parámetros de rand son las dimensiones del array de salida X = np.sort(5 * rng.rand(80, 1), axis=0) #help(np.sort) #print("X-variable", X) ## ravel methods flatten i.e one line y = np.sin(X).ravel() #print("Y-variable", y) # Data is going to be modified for at each 5 elements step (remember i:j:k nomenclature) y[::5] += 3 * (0.5 - rng.rand(y[::5].size)) #print("Modified Y", y) # 2) Fit regression model # Buils regression tree of depth 2 regr_1 = tree.DecisionTreeRegressor(max_depth=2) # Buils regression tree of depth 5 number_of_search = 5 regr_2 = tree.DecisionTreeRegressor(max_depth= number_of_search) regr_1.fit(X, y) regr_2.fit(X, y) # 3) Predict X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) # 4) Plot the results plt.figure() plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data") plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2) plt.plot(X_test, y_2, color="yellowgreen", label= "max_depth=" + str(number_of_search) , linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Decision Tree Regression") plt.legend() plt.show() ###Output _____no_output_____ ###Markdown SCIKIT - IMAGE Contourn plot ###Code from skimage import measure # Construct some test data x, y = np.ogrid[-np.pi:np.pi:100j, -np.pi:np.pi:100j] r = np.sin(np.exp((np.sin(x)**3 + np.cos(y)**2))) # Find contours at a constant value of 0.8 contours = measure.find_contours(r, 0.8) # Display the image and plot all contours found fig, ax = plt.subplots() ax.imshow(r, interpolation='nearest', cmap=plt.cm.gray) for n, contour in enumerate(contours): ax.plot(contour[:, 1], contour[:, 0], linewidth=2) ax.axis('image') ax.set_xticks([]) ax.set_yticks([]) plt.show() ###Output _____no_output_____ ###Markdown Classification with sklearn*from Python Machine Learning by Sebastian Raschka under the MIT License (MIT)*This code might be directly from the book, mine, or a mix. Import iris dataset ###Code from sklearn import datasets import numpy as np iris = datasets.load_iris() X = iris.data[:, [2,3]] y = iris.target print('Class labels:', np.unique(y)) ###Output Class labels: [0 1 2] ###Markdown Split dataset into separate training and test datasets. Scale features. ###Code from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) sc = StandardScaler() sc.fit(X_train) X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test) ###Output _____no_output_____ ###Markdown Perceptron with Scikit ###Code from sklearn.linear_model import Perceptron from sklearn.metrics import accuracy_score ppn = Perceptron(n_iter=40, eta0=0.1, random_state=0) ppn.fit(X_train_std, y_train) y_pred = ppn.predict(X_test_std) print('Misclassified samples: %d' % (y_test != y_pred).sum()) print('Accuracy: %2f' % accuracy_score(y_test, y_pred)) ###Output Misclassified samples: 4 Accuracy: 0.911111 ###Markdown Plot ###Code from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) # plot all samples for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl) if test_idx: X_test, y_test = X[test_idx, :], y[test_idx] plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidths=1, marker='o', s=55, label='test set') X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X=X_combined_std, y=y_combined, classifier=ppn, test_idx=range(105, 150)) plt.xlabel('petal length [std]') plt.ylabel('petal width [std]') plt.legend(loc='upper left') plt.show() ###Output _____no_output_____ ###Markdown Logistic regression model ###Code from sklearn.linear_model import LogisticRegression lr = LogisticRegression(C=1000.0, random_state=0) lr.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=lr, test_idx=range(105, 150)) plt.xlabel('petal length [std]') plt.ylabel('petal width [std]') plt.legend(loc='upper left') plt.show() lr.predict_proba(X_test_std[0, :].reshape(1, -1)) ###Output _____no_output_____ ###Markdown SVM ###Code from sklearn.svm import SVC svm = SVC(kernel='linear', C=1.0, random_state=0) svm.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150)) plt.xlabel('petal length [std]') plt.ylabel('petal width [std]') plt.legend(loc='upper left') plt.show() ###Output _____no_output_____ ###Markdown RFB Kernel SVM ###Code svm = SVC(kernel='rbf', random_state=0, gamma=0.4, C=1.0) svm.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150)) plt.xlabel('petal length [std]') plt.ylabel('petal width [std]') plt.legend(loc='upper left') plt.show() ###Output _____no_output_____ ###Markdown Decision trees ###Code from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0) tree.fit(X_train, y_train) X_combined = np.vstack((X_train, X_test)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X_combined, y_combined, classifier=tree, test_idx=range(105, 150)) plt.xlabel('petal length [cm]') plt.ylabel('petal width [cm]') plt.legend(loc='upper left') plt.show() ###Output _____no_output_____ ###Markdown Random forest ###Code from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2) forest.fit(X_train, y_train) plot_decision_regions(X_combined, y_combined, classifier=forest, test_idx=range(105, 150)) plt.xlabel('petal length [cm]') plt.ylabel('petal width [cm]') plt.legend(loc='upper left') plt.show() ###Output _____no_output_____ ###Markdown K-nearest neighbors ###Code from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski') knn.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=knn, test_idx=range(105, 150)) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show() ###Output _____no_output_____
Analysis_Lorenz96.ipynb
###Markdown Data Analysis for Inverse Observation Data Assimilation of Lorenz96This notebook analyzes the paper's data and reproduces the plots. ###Code import os os.environ["CUDA_VISIBLE_DEVICES"]="" # everything here can run on CPU import warnings warnings.filterwarnings('ignore') from functools import partial import numpy as np import numpy.linalg as la import scipy import jax import jax.numpy as jnp import jax.numpy.linalg as jla import xarray as xr import seaborn as sns import matplotlib.pyplot as plt from cycler import cycler %matplotlib inline from dynamical_system import Lorenz96 from analysis_util import ( integrate_lorenz96_xr, compute_l1_error_lorenz96, adjust_row_labels, plot_colors, load_da_results, ) # create figure directory ! mkdir -p figures ###Output _____no_output_____ ###Markdown Copy data from Google cloudThis requires [gsutil](https://cloud.google.com/storage/docs/gsutil). ###Code !gsutil cp -r gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/ /tmp ###Output Copying gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/kolmogorov_baselineinit_hybridopt.nc... Copying gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/kolmogorov_baselineinit_obsopt.nc... Copying gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/kolmogorov_invobsinit_hybridopt.nc... Copying gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/kolmogorov_invobsinit_obsopt.nc... \ [4 files][ 43.7 MiB/ 43.7 MiB] ==> NOTE: You are performing a sequence of gsutil operations that may run significantly faster if you instead use gsutil -m cp ... Please see the -m section under "gsutil help options" for further information about when gsutil -m can be advantageous. Copying gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/lorenz96_baselineinit_hybridopt.nc... Copying gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/lorenz96_baselineinit_obsopt.nc... Copying gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/lorenz96_invobsinit_hybridopt.nc... Copying gs://gresearch/jax-cfd/projects/invobs-data-assimilation/invobs-da-results/lorenz96_invobsinit_obsopt.nc... | [8 files][ 47.0 MiB/ 47.0 MiB] Operation completed over 8 objects/47.0 MiB. ###Markdown Load data ###Code path = '/tmp/invobs-da-results' filenames = [ 'lorenz96_baselineinit_obsopt.nc', 'lorenz96_baselineinit_hybridopt.nc', 'lorenz96_invobsinit_obsopt.nc', 'lorenz96_invobsinit_hybridopt.nc', ] retained_variables = [ 'f_vals', 'eval_vals', 'X0_ground_truth', 'X0_opt', 'X0_init', ] retained_attrs = [ 'observe_every', 'grid_size', 'num_time_steps', 'dt', ] full_filenames = [os.path.join(path, filename) for filename in filenames] ds = load_da_results(full_filenames, retained_variables, retained_attrs) ###Output _____no_output_____ ###Markdown Data assimilation initialization samplesComparison of initialization schemes. ###Code sns.set(font_scale=2.6) sns.set_style('white') plt.rc('font', **{'family': 'Times New Roman'}) plt.rc( 'axes', prop_cycle=( cycler('color', [plot_colors['r'], plot_colors['b']]) + cycler('linestyle', ['-', '--']) ), ) da_init = ds[['X0_init','X0_ground_truth']].to_array() g = ( da_init .sel(opt_space='observation', n=5, init=['invobs', 'baseline']) .plot(x='x', col='init', hue='variable', lw=4, size=5, add_legend=False) ) observed = da_init.sel( opt_space='observation', n=5, variable='X0_ground_truth', )[:, ::4] observed_grid_points = np.arange(0, 40, 4) plt.xlim(-1,40) col_labels = ['inverse init', 'average init'] g.set_axis_labels('', '') [ax.set_title(t) for ax, t in zip(g.axes.ravel(), col_labels)] [ax.set_yticks([-5, 0, 5]) for ax in g.axes.ravel()] [ax.set_xticks([0, 40]) for ax in g.axes.ravel()] [ax.set_xlabel('grid', labelpad=-25) for ax in g.axes.ravel()] # g.set_titles('') for i, init_method in enumerate(da_init.init.values): g.axes[0,i].plot( observed_grid_points, observed.sel(init=init_method).data, ls='None', marker='o', markersize=10, color=plot_colors['y'], markeredgecolor='k', ) g.fig.tight_layout() plt.savefig('figures/da_init_lorenz96.pdf', bbox_inches='tight', pad_inches=0.1) ###Output _____no_output_____ ###Markdown Forecast quality ###Code lorenz96 = Lorenz96( grid_size=ds.attrs['grid_size'], observe_every=ds.attrs['observe_every'], dt=ds.attrs['dt'], ) X0_da = ds[['X0_ground_truth', 'X0_init', 'X0_opt']].to_array('data_type') \ .assign_coords({'data_type': ['gt', 'init', 'opt']}) X_da = integrate_lorenz96_xr(lorenz96, X0_da, 20) relative_scale = 166.28 # average L1 norm over independent samples l1_error = compute_l1_error_lorenz96(X_da, 'gt', scale=relative_scale) l1_error_stacked = ( l1_error .sel(opt_space=['observation', 'hybrid']) .mean(dim='n') .sel(data_type='opt', drop=True) .assign_coords( { 't': lorenz96.dt * np.arange(l1_error.sizes['t']), 'init': [s.split('_')[0] for s in l1_error.init.values], }, ) .stack(opt_method=['init', 'opt_space']) ) tuple_labels = l1_error_stacked.opt_method.values concat_labels = [ a + ' init' + ' / ' + b + ' opt' for a,b in tuple_labels] l1_error_stacked = l1_error_stacked.assign_coords({'opt_method': concat_labels}) # select to have a custom sort of the optimization methods l1_error_stacked = l1_error_stacked.sel( opt_method=[ 'invobs init / observation opt', 'invobs init / hybrid opt', 'baseline init / observation opt', 'baseline init / hybrid opt', ] ) plt.figure(figsize=(10, 7.5)) sns.set(font_scale=2.2) sns.set_style('ticks') plt.rc('font', **{'family': 'Times New Roman'}) plt.rc( 'axes', prop_cycle=( cycler('color', [plot_colors['r']]*2 + [plot_colors['b']]*2) + cycler('linestyle', ['-', 'dotted']*2) + cycler('marker', ['o', 'o', 'v', 'v']) ), ) time_steps = l1_error_stacked.coords['t'].values ax = plt.subplot(1,1,1) for opt_method in l1_error_stacked.opt_method.values: ax.plot( time_steps, l1_error_stacked.sel(opt_method=opt_method).values, markersize=13, markeredgecolor='white', lw=4, label=opt_method, ) sns.despine() plt.xlabel('time') plt.ylabel('mean relative $L_1$ error') plt.ylim(0, 0.6) plt.axvline(x=9.5 * lorenz96.dt, ymax=0.6, color='k', ls='--') plt.title('') plt.xticks(np.arange(0, 2.1, 0.5)) plt.yticks(np.arange(0, 0.61, 0.2)) # plt.legend(frameon=False) handles, labels = ax.get_legend_handles_labels() line_ordering = [2, 0, 3, 1] # legend ordering according to appearance in plot reordered_handles = [handles[i] for i in line_ordering] reordered_labels = [labels[i] for i in line_ordering] ax.legend(reordered_handles, reordered_labels, frameon=False) plt.savefig( 'figures/da_lorenz96_invobs.pdf', bbox_inches='tight', pad_inches=0.1, ) ###Output _____no_output_____ ###Markdown Summary statsCompare forecast performance on the first forecast state relative to baseline init and optimization method. ###Code summary_stats = l1_error.sel(data_type='opt', t=11).mean(dim='n') / l1_error.sel(data_type='opt', t=11, init='baseline', opt_space='observation').mean(dim='n') print( summary_stats.sel(opt_space='observation', init='baseline').values, summary_stats.sel(opt_space='hybrid', init='baseline').values, summary_stats.sel(opt_space='observation', init='invobs').values, summary_stats.sel(opt_space='hybrid', init='invobs').values, ) ###Output 1.0 0.07642817 0.25266525 0.07028136 ###Markdown Significance test between trajectoriesPerform a Z-test to evaluate significance level between optimization methods for the two initialization schemes. Inverse observation initialization ###Code time_step = 11 # beginning of forecast window num_samples = l1_error.sizes['n'] l1_error_inv = l1_error.sel(init='invobs', data_type='opt') diff_l1_error = ( l1_error_inv.sel(opt_space='observation') - l1_error_inv.sel(opt_space='hybrid') ) m = diff_l1_error.sel(t=time_step).mean(dim='n') s = diff_l1_error.sel(t=time_step).std(dim='n') Z = m / (s / np.sqrt(num_samples)) p = scipy.stats.norm.sf(np.abs(Z)) print('Z-value', Z.values) print('p-value', p) ###Output Z-value 3.7605415818595267 p-value 8.477294378820626e-05 ###Markdown Baseline initialization ###Code time_step = 11 # beginning of forecast window num_samples = l1_error.sizes['n'] l1_error_inv = l1_error.sel(init='baseline', data_type='opt') diff_l1_error = ( l1_error_inv.sel(opt_space='observation') - l1_error_inv.sel(opt_space='hybrid') ) m = diff_l1_error.sel(t=time_step).mean(dim='n') s = diff_l1_error.sel(t=time_step).std(dim='n') Z = m / (s / np.sqrt(num_samples)) p = scipy.stats.norm.sf(np.abs(Z)) print('Z-value', Z.values) print('p-value', p) ###Output Z-value 13.75386441553203 p-value 2.4140100201680256e-43 ###Markdown Assimilated trajectories ###Code baseline = ( X_da .sel(data_type=['opt', 'gt'], opt_space='observation', init='baseline') .reset_coords(['opt_space', 'init'], drop=True) ) invobs = ( X_da .sel(data_type=['opt', 'gt'], opt_space='hybrid', init='invobs') .reset_coords(['opt_space', 'init'], drop=True) ) forecast_comparison = ( xr.concat([invobs, baseline], dim='da_method') .assign_coords( da_method=['invobs', 'baseline'], t=lorenz96.dt * np.arange(baseline.sizes['t']), ) ) sns.set(font_scale=2.8) sns.set_style('white') plt.rc('font', **{'family': 'Times New Roman'}) plt.rc( 'axes', prop_cycle=( cycler('color', [plot_colors['r'], plot_colors['b']]) + cycler('linestyle', ['-', '--']) ), ) g = ( forecast_comparison .isel(n=5, t=[0, 10, 18]) .plot( x='x', row='t', hue='data_type', col='da_method', lw=4, size=5, add_legend=False, ) ) g.set_axis_labels('', '') col_labels = ['proposed', 'baseline'] [ax.set_title(t) for ax, t in zip(g.axes.ravel(), col_labels)] row_labels = [ 'initial state, t=0', 'start forecast, t=1.0', 'end forecast, t=1.8', ] adjust_row_labels(g, row_labels) [ax.set_yticks([-5, 0, 5]) for ax in g.axes.ravel()] [ax.set_xticks([0, 40]) for ax in g.axes.ravel()] for ax in g.axes.ravel()[-2:]: ax.set_xlabel('grid', labelpad=-30) plt.subplots_adjust(hspace=0, wspace=0.1) plt.savefig( 'figures/forecast_results_lorenz96.pdf', bbox_inches='tight', pad_inches=0.1, ) ###Output _____no_output_____
ph2_intro-glove/Intro_Tutorial_2-remove_list_elements.ipynb
###Markdown Intro. to Snorkel: Extracting Spouse Relations from the News Part II: Generating _and modeling_ noisy training labelsIn this part of the tutorial, we will write **labeling functions** which express various heuristics, patterns, and [_weak supervision_](http://hazyresearch.github.io/snorkel/blog/weak_supervision.html) strategies to label our data.In the wild, hand-labeled training data is rare and expensive. A common scenario is to have access to tons of unlabeled training data, and have some idea of how to label them programmatically. For example:* We may be able to think of text patterns that would indicate to people mention in a sentence are married, such as seeing the word "spouse" between the mentions.* We may have access to an external knowledge base that lists some pairs of famous people who are married, and can use these to noisily label some of our mention pairs, such as Barack and Michelle Obama.Our labeling functions will capture these types of strategies. We know that these labeling functions will not be perfect, and some may be quite low-quality, so we will _model_ their accuracies with a generative model, which Snorkel will help us easily apply.This will ultimately produce a single set of **noise-aware training labels**, which we will then use to train an end extraction model in the next notebook. For more technical details of this overall approach, see our [NIPS 2016 paper](https://arxiv.org/abs/1605.07723). ###Code %load_ext autoreload %autoreload 2 %matplotlib inline import os # TO USE A DATABASE OTHER THAN SQLITE, USE THIS LINE # Note that this is necessary for parallel execution amongst other things... # os.environ['SNORKELDB'] = 'postgres:///snorkel-intro' import numpy as np from snorkel import SnorkelSession session = SnorkelSession() ###Output _____no_output_____ ###Markdown We repeat our definition of the `Spouse` `Candidate` subclass from Parts II and III. ###Code from snorkel.models import candidate_subclass Spouse = candidate_subclass('Spouse', ['person1', 'person2']) print("Number of candidates:", session.query(Spouse).filter(Spouse.split == 1).count()) ###Output ('Number of candidates:', 2796) ###Markdown Using a _development set_In our setting here, we will use the phrase "development set" to refer to a set of examples (here, a subset of our training set) which we label by hand and use to help us develop and refine labeling functions. Unlike the _test set_, which we do not look at and use for final evaluation, we can inspect the development set while writing labeling functions.In our case, we already loaded labels for a development set (`split` 1), so we can load them again now: ###Code from snorkel.annotations import load_gold_labels L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1) ###Output _____no_output_____ ###Markdown Creating and Modeling a Noisy Training SetOur biggest step in the data programming pipeline is the creation - _and modeling_ - of a noisy training set. We'll approach this in three main steps:1. **Creating labeling functions (LFs):** This is where most of our development time would actually go into if this were a real application. Labeling functions encode our heuristics and weak supervision signals to generate (noisy) labels for our training candidates.2. **Applying the LFs:** Here, we actually use them to label our candidates!3. **Training a generative model of our training set:** Here we learn a model over our LFs, learning their respective accuracies automatically. This will allow us to combine them into a single, higher-quality label set.We'll also add some detail on how to go about _developing labeling functions_ and then _debugging our model_ of them to improve performance. 1. Creating Labeling FunctionsIn Snorkel, our primary interface through which we provide training signal to the end extraction model we are training is by writing **labeling functions (LFs)** (as opposed to hand-labeling massive training sets). We'll go through some examples for our spouse extraction task below.A labeling function isn't anything special. It's just a Python function that accepts a `Candidate` as the input argument and returns `1` if it says the `Candidate` should be marked as true, `-1` if it says the `Candidate` should be marked as false, and `0` if it doesn't know how to vote and abstains. In practice, many labeling functions are unipolar: it labels only `1`s and `0`s, or it labels only `-1`s and `0`s.Recall that our goal is to ultimately train a high-performance classification model that predicts which of our `Candidate`s are true mentions of spouse relations. It turns out that we can do this by writing potentially low-quality labeling functions! ###Code import re from snorkel.lf_helpers import ( get_left_tokens, get_right_tokens, get_between_tokens, get_text_between, get_tagged_text, ) ###Output _____no_output_____ ###Markdown Pattern-based LFsThese LFs express some common sense text patterns which indicate that a person pair might be married. For example, `LF_husband_wife` looks for words in `spouses` between the person mentions, and `LF_same_last_name` checks to see if the two people have the same last name (but aren't the same whole name). ###Code from random import randint print(randint(0, 6)) #spouses = {'spouse', 'wife', 'husband', 'ex-wife', 'ex-husband'} #spouses = {'spouse', 'wife', 'ex-wife','ex-husband'} # one fourth #spouses = { 'wife', 'ex-wife','ex-husband'} #half #spouses = { 'husband','ex-wife'} # three fourth -2 spouses = { 'ex-wife'} # three fourth #family = {'father', 'mother', 'sister', 'brother', 'son', 'daughter', # 'grandfather', 'grandmother', 'uncle', 'aunt', 'cousin'} #family = {'father', 'mother', 'brother', 'daughter', # 'grandfather', 'grandmother', 'aunt', 'cousin'} #one fourth #family = { 'mother', 'brother', # 'grandfather', 'aunt', 'cousin'} #half family = { 'brother', 'grandfather', 'aunt'} #three fourth family = family | {f + '-in-law' for f in family} #other = {'boyfriend', 'girlfriend', 'boss', 'employee', 'secretary', 'co-worker'} #other = {'boyfriend', 'girlfriend', 'employee', 'secretary' } # one fourth #other = {'boyfriend', 'employee', 'secretary' } # half #other = {'girlfriend','secretary' } # three fourth -2 other = {'secretary'} # Helper function to get last name def last_name(s): name_parts = s.split(' ') return name_parts[-1] if len(name_parts) > 1 else None def LF_husband_wife(c): return 1 if len(spouses.intersection(get_between_tokens(c))) > 0 else 0 def LF_husband_wife_left_window(c): if len(spouses.intersection(get_left_tokens(c[0], window=2))) > 0: return 1 elif len(spouses.intersection(get_left_tokens(c[1], window=2))) > 0: return 1 else: return 0 def LF_same_last_name(c): p1_last_name = last_name(c.person1.get_span()) p2_last_name = last_name(c.person2.get_span()) if p1_last_name and p2_last_name and p1_last_name == p2_last_name: if c.person1.get_span() != c.person2.get_span(): return 1 return 0 def LF_no_spouse_in_sentence(c): return -1 if np.random.rand() < 0.75 and len(spouses.intersection(c.get_parent().words)) == 0 else 0 def LF_and_married(c): return 1 if 'and' in get_between_tokens(c) and 'married' in get_right_tokens(c) else 0 def LF_familial_relationship(c): return -1 if len(family.intersection(get_between_tokens(c))) > 0 else 0 def LF_family_left_window(c): if len(family.intersection(get_left_tokens(c[0], window=2))) > 0: return -1 elif len(family.intersection(get_left_tokens(c[1], window=2))) > 0: return -1 else: return 0 def LF_other_relationship(c): return -1 if len(other.intersection(get_between_tokens(c))) > 0 else 0 ###Output _____no_output_____ ###Markdown Distant Supervision LFs In addition to writing labeling functions that describe text pattern-based heuristics for labeling training examples, we can also write labeling functions that distantly supervise examples. Here, we'll load in a list of known spouse pairs and check to see if the candidate pair matches one of these. ###Code import bz2 # Function to remove special characters from text def strip_special(s): return ''.join(c for c in s if ord(c) < 128) # Read in known spouse pairs and save as set of tuples with bz2.BZ2File('data/spouses_dbpedia.csv.bz2', 'rb') as f: known_spouses = set( tuple(strip_special(x).strip().split(',')) for x in f.readlines() ) # Last name pairs for known spouses last_names = set([(last_name(x), last_name(y)) for x, y in known_spouses if last_name(x) and last_name(y)]) def LF_distant_supervision(c): p1, p2 = c.person1.get_span(), c.person2.get_span() return 1 if (p1, p2) in known_spouses or (p2, p1) in known_spouses else 0 def LF_distant_supervision_last_names(c): p1, p2 = c.person1.get_span(), c.person2.get_span() p1n, p2n = last_name(p1), last_name(p2) return 1 if (p1 != p2) and ((p1n, p2n) in last_names or (p2n, p1n) in last_names) else 0 ###Output _____no_output_____ ###Markdown For later convenience we group the labeling functions into a list. ###Code LFs = [ LF_distant_supervision, LF_distant_supervision_last_names, LF_husband_wife, LF_husband_wife_left_window, LF_same_last_name, LF_no_spouse_in_sentence, LF_and_married, LF_familial_relationship, LF_family_left_window, LF_other_relationship ] ###Output _____no_output_____ ###Markdown Developing Labeling FunctionsAbove, we've written a bunch of labeling functions already, which should give you some sense about how to go about it. While writing them, we probably want to check to make sure that they at least work as intended before adding to our set. Suppose we're thinking about writing a simple LF: ###Code def LF_wife_in_sentence(c): """A simple example of a labeling function""" return 1 if 'wife' in c.get_parent().words else 0 ###Output _____no_output_____ ###Markdown One simple thing we can do is quickly test it on our development set (or any other set), without saving it to the database. This is simple to do. For example, we can easily get every candidate that this LF labels as true: ###Code labeled = [] print("Number of candidates:", session.query(Spouse).filter(Spouse.split == 1).count()) for c in session.query(Spouse).filter(Spouse.split == 1).all(): if LF_wife_in_sentence(c) != 0: labeled.append(c) print("Number labeled:", len(labeled)) ###Output ('Number of candidates:', 2796) ('Number labeled:', 2796) ###Markdown We can then easily put this into the Viewer as usual (try it out!):```SentenceNgramViewer(labeled, session)```We also have a simple helper function for getting the empirical accuracy of a single LF with respect to the development set labels for example. This function also returns the evaluation buckets of the candidates (true positive, false positive, true negative, false negative): ###Code from snorkel.lf_helpers import test_LF tp, fp, tn, fn = test_LF(session, LF_wife_in_sentence, split=1, annotator_name='gold') from snorkel.lf_helpers import test_LF for lf in LFs: print lf.__name__ tp, fp, tn, fn = test_LF(session, lf, split=1, annotator_name='gold') ###Output LF_distant_supervision ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 1.0 Neg. class accuracy: 0.0 Precision 0.667 Recall 1.0 F1 0.8 ---------------------------------------- TP: 2 | FP: 1 | TN: 0 | FN: 0 ======================================== LF_distant_supervision_last_names ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 1.0 Neg. class accuracy: 0.0 Precision 0.389 Recall 1.0 F1 0.56 ---------------------------------------- TP: 7 | FP: 11 | TN: 0 | FN: 0 ======================================== LF_husband_wife ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 1.0 Neg. class accuracy: 0.0 Precision 0.377 Recall 1.0 F1 0.548 ---------------------------------------- TP: 97 | FP: 160 | TN: 0 | FN: 0 ======================================== LF_husband_wife_left_window ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 1.0 Neg. class accuracy: 0.0 Precision 0.439 Recall 1.0 F1 0.61 ---------------------------------------- TP: 86 | FP: 110 | TN: 0 | FN: 0 ======================================== LF_same_last_name ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 1.0 Neg. class accuracy: 0.0 Precision 0.279 Recall 1.0 F1 0.437 ---------------------------------------- TP: 19 | FP: 49 | TN: 0 | FN: 0 ======================================== LF_no_spouse_in_sentence ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 0.0 Neg. class accuracy: 1.0 Precision 0.0 Recall 0.0 F1 0.0 ---------------------------------------- TP: 0 | FP: 0 | TN: 1688 | FN: 53 ======================================== LF_and_married ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 1.0 Neg. class accuracy: 0.0 Precision 0.125 Recall 1.0 F1 0.222 ---------------------------------------- TP: 1 | FP: 7 | TN: 0 | FN: 0 ======================================== LF_familial_relationship ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 0.0 Neg. class accuracy: 1.0 Precision 0.0 Recall 0.0 F1 0.0 ---------------------------------------- TP: 0 | FP: 0 | TN: 314 | FN: 15 ======================================== LF_family_left_window ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 0.0 Neg. class accuracy: 1.0 Precision 0.0 Recall 0.0 F1 0.0 ---------------------------------------- TP: 0 | FP: 0 | TN: 208 | FN: 1 ======================================== LF_other_relationship ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 0.0 Neg. class accuracy: 1.0 Precision 0.0 Recall 0.0 F1 0.0 ---------------------------------------- TP: 0 | FP: 0 | TN: 20 | FN: 4 ======================================== LF_dont_rely_on_LF ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 0.0 Neg. class accuracy: 1.0 Precision 0.0 Recall 0.0 F1 0.0 ---------------------------------------- TP: 0 | FP: 0 | TN: 2600 | FN: 196 ======================================== ###Markdown 2. Applying the Labeling FunctionsNext, we need to actually run the LFs over all of our training candidates, producing a set of `Labels` and `LabelKeys` (just the names of the LFs) in the database. We'll do this using the `LabelAnnotator` class, a UDF which we will again run with `UDFRunner`. **Note that this will delete any existing `Labels` and `LabelKeys` for this candidate set.** We start by setting up the class: ###Code from snorkel.annotations import LabelAnnotator labeler = LabelAnnotator(lfs=LFs) ###Output _____no_output_____ ###Markdown Finally, we run the `labeler`. Note that we set a random seed for reproducibility, since some of the LFs involve random number generators. Again, this can be run in parallel, given an appropriate database like Postgres is being used: ###Code import numpy as np np.random.seed(1701) %time L_train = labeler.apply(split=0) L_train ###Output Clearing existing... Running UDF... [========================================] 100% CPU times: user 4min 32s, sys: 677 ms, total: 4min 33s Wall time: 4min 33s ###Markdown If we've already created the labels (saved in the database), we can load them in as a sparse matrix here too: ###Code L_train = labeler.load_matrix(session, split=0) L_train ###Output _____no_output_____ ###Markdown Note that the returned matrix is a special subclass of the `scipy.sparse.csr_matrix` class, with some special features which we demonstrate below: ###Code L_train.get_candidate(session, 0) L_train.get_key(session, 0) ###Output _____no_output_____ ###Markdown We can also view statistics about the resulting label matrix.* **Coverage** is the fraction of candidates that the labeling function emits a non-zero label for.* **Overlap** is the fraction candidates that the labeling function emits a non-zero label for and that another labeling function emits a non-zero label for.* **Conflict** is the fraction candidates that the labeling function emits a non-zero label for and that another labeling function emits a *conflicting* non-zero label for. ###Code L_train.lf_stats(session) #original L_train.lf_stats(session) #removed one fourth L_train.lf_stats(session) #removed half ###Output _____no_output_____ ###Markdown 3. Fitting the Generative ModelNow, we'll train a model of the LFs to estimate their accuracies. Once the model is trained, we can combine the outputs of the LFs into a single, noise-aware training label set for our extractor. Intuitively, we'll model the LFs by observing how they overlap and conflict with each other. ###Code from snorkel.learning import GenerativeModel gen_model = GenerativeModel() gen_model.train(L_train, epochs=100, decay=0.95, step_size=0.1 / L_train.shape[0], reg_param=1e-6) gen_model.weights.lf_accuracy ###Output _____no_output_____ ###Markdown We now apply the generative model to the training candidates to get the noise-aware training label set. We'll refer to these as the training marginals: ###Code train_marginals = gen_model.marginals(L_train) ###Output _____no_output_____ ###Markdown We'll look at the distribution of the training marginals: ###Code import matplotlib.pyplot as plt plt.hist(train_marginals, bins=20) plt.show() ###Output _____no_output_____ ###Markdown We can view the learned accuracy parameters, and other statistics about the LFs learned by the generative model: ###Code gen_model.learned_lf_stats() train_marginals = gen_model.marginals(L_train) #one fourth removed from snorkel.learning import GenerativeModel gen_model = GenerativeModel() gen_model.train(L_train, epochs=100, decay=0.95, step_size=0.1 / L_train.shape[0], reg_param=1e-6) print gen_model.weights.lf_accuracy train_marginals = gen_model.marginals(L_train) import matplotlib.pyplot as plt plt.hist(train_marginals, bins=20) plt.show() gen_model.learned_lf_stats() #half removed from snorkel.learning import GenerativeModel gen_model = GenerativeModel() gen_model.train(L_train, epochs=100, decay=0.95, step_size=0.1 / L_train.shape[0], reg_param=1e-6) print gen_model.weights.lf_accuracy train_marginals = gen_model.marginals(L_train) import matplotlib.pyplot as plt plt.hist(train_marginals, bins=20) plt.show() gen_model.learned_lf_stats() #three fourth removed from snorkel.learning import GenerativeModel gen_model = GenerativeModel() gen_model.train(L_train, epochs=100, decay=0.95, step_size=0.1 / L_train.shape[0], reg_param=1e-6) print gen_model.weights.lf_accuracy train_marginals = gen_model.marginals(L_train) import matplotlib.pyplot as plt plt.hist(train_marginals, bins=20) plt.show() gen_model.learned_lf_stats() #three fourth removed -2 from snorkel.learning import GenerativeModel gen_model = GenerativeModel() gen_model.train(L_train, epochs=100, decay=0.95, step_size=0.1 / L_train.shape[0], reg_param=1e-6) print gen_model.weights.lf_accuracy train_marginals = gen_model.marginals(L_train) import matplotlib.pyplot as plt plt.hist(train_marginals, bins=20) plt.show() gen_model.learned_lf_stats() ###Output Inferred cardinality: 2 [ 0.07431849 0.07211436 0.09703035 0.09238262 0.06944212 0.81429487 0.07493895 0.08754179 0.08477781 0.08350195] ###Markdown Using the Model to Iterate on Labeling FunctionsNow that we have learned the generative model, we can stop here and use this to potentially debug and/or improve our labeling function set. First, we apply the LFs to our development set: ###Code L_dev = labeler.apply_existing(split=1) ###Output Clearing existing... Running UDF... [========================================] 100% ###Markdown And finally, we get the score of the generative model: ###Code tp, fp, tn, fn = gen_model.error_analysis(session, L_dev, L_gold_dev) # one fourth removed L_dev = labeler.apply_existing(split=1) tp, fp, tn, fn = gen_model.error_analysis(session, L_dev, L_gold_dev) # half removed L_dev = labeler.apply_existing(split=1) tp, fp, tn, fn = gen_model.error_analysis(session, L_dev, L_gold_dev) # three fourth removed L_dev = labeler.apply_existing(split=1) tp, fp, tn, fn = gen_model.error_analysis(session, L_dev, L_gold_dev) # three fourth removed -2 L_dev = labeler.apply_existing(split=1) tp, fp, tn, fn = gen_model.error_analysis(session, L_dev, L_gold_dev) ###Output Clearing existing... Running UDF... [========================================] 100% ======================================== Scores (Un-adjusted) ======================================== Pos. class accuracy: 0.245 Neg. class accuracy: 0.957 Precision 0.298 Recall 0.245 F1 0.269 ---------------------------------------- TP: 48 | FP: 113 | TN: 2487 | FN: 148 ======================================== ###Markdown Interpreting Generative Model PerformanceAt this point, we should be getting an F1 score of around 0.4 to 0.5 on the development set, which is pretty good! However, we should be very careful in interpreting this. Since we developed our labeling functions using this development set as a guide, and our generative model is composed of these labeling functions, we expect it to score very well here! In fact, it is probably somewhat _overfit_ to this set. However this is fine, since in the next tutorial, we'll train a more powerful end extraction model which will generalize beyond the development set, and which we will evaluate on a _blind_ test set (i.e. one we never looked at during development). Doing Some Error AnalysisAt this point, we might want to look at some examples in one of the error buckets. For example, one of the false negatives that we did not correctly label as true mentions. To do this, we can again just use the `Viewer`: ###Code from snorkel.viewer import SentenceNgramViewer # NOTE: This if-then statement is only to avoid opening the viewer during automated testing of this notebook # You should ignore this! import os if 'CI' not in os.environ: sv = SentenceNgramViewer(fn, session) else: sv = None sv ###Output _____no_output_____ ###Markdown We can save the best model from the hyperparameter search with a custom name so that we can reload it later. ###Code c = sv.get_selected() if sv else list(fp.union(fn))[0] c ###Output _____no_output_____ ###Markdown We can easily see the labels that the LFs gave to this candidate using simple ORM-enabled syntax: ###Code c.labels ###Output _____no_output_____ ###Markdown We can also now explore some of the additional functionalities of the `lf_stats` method for our dev set LF labels, `L_dev`: we can plug in the gold labels that we have, and the accuracies that our generative model has learned: ###Code L_dev.lf_stats(session, L_gold_dev, gen_model.learned_lf_stats()['Accuracy']) ###Output _____no_output_____ ###Markdown Note that for labeling functions with low coverage, our learned accuracies are closer to our prior of 70% accuracy. Saving our training labelsFinally, we'll save the `training_marginals`, which are our **probabilistic training labels**, so that we can use them in the next tutorial to train our end extraction model: ###Code from snorkel.annotations import save_marginals %time save_marginals(session, L_train, train_marginals) ###Output Saved 22195 marginals CPU times: user 11.8 s, sys: 24.6 ms, total: 11.8 s Wall time: 11.8 s
labs/lab4/.ipynb_checkpoints/DATA3401.2021.Fall.Lab4.Instructions-checkpoint.ipynb
###Markdown Lab 4 &ndash; DATA 3401 (Fall 2021) Lab Dates: 9/24 & 10/1 Due Date 10/8 (before the beginning of lab) Lab DescriptionThe purpose of this lab is for you to code your own tic-tac-toe game that takes input from the user for each player and visualizes the state of the board after each move- As in the previous labs, copy and paste the cells below into a jupyter notebook titled Lab 4- Solve the problems and push them to a new Lab 4 folder in your GitHub repo prior to the deadlineYou will build an n x n Tic Tac Toe game. As you do the exercises, make sure your solutions work for any size Tic Tac Toe game, not just the standard 3x3. Exercise 1:Write a function that creates an n by n matrix (a list of lists) which will represent the state of a Tic Tac Toe game. Let 0, 1, and 2 represent empty, "X", or "O". ###Code #import Random p1 = 1 p2 = 2 empty = 0 n = 3 board = list() for i in range(n): row = list() for j in range(n): row.append(empty) board.append(row) for row in board: print(row) # Test your solution here tic_tac_toe = TicTacToe() ###Output _____no_output_____ ###Markdown Exercise 2:Write a function that takes a `n` by `n` matrix representing a tic-tac-toe game, and returns -1, 0, 1, or 2 indicating the game is incomplete, the game is a draw, player 1 has won, or player 2 has one, respectively. Here are some example inputs you can use to test your code: ###Code import numpy as np winner_is_2 = [[2, 2, 0], [2, 1, 0], [2, 1, 1]] winner_is_1 = [[1, 2, 0], [2, 1, 0], [2, 1, 1]] winner_is_also_1 = [[0, 1, 0], [2, 1, 0], [2, 1, 1]] no_winner = [[1, 2, 0], [2, 1, 0], [2, 1, 2]] also_no_winner = [[1, 2, 0], [2, 1, 0], [2, 1, 0]] draw = [[1, 2, 1], [2, 1, 1], [2, 1, 2]] # Write you solution here n = 3 def game_state(board): #checking for the row for row in board: if len(set(row))==1 and row[0]==1: return "Winner is 1" newboard = np.transpose(board) #print(board) #print(newboard) for column in newboard: if len(set(column)) == 1 and column[0]==2: return "Winner is 2" #checking for diagonal if len(set([board[i][i] for i in range(len(board))])) == 1 and board[0][0]==1 or (len(set([board[i][len(board)-i-1] for i in range(len(board))])) == 1 and board[0][len(board)-1]==1): return "Winner is 1" elif len(set([board[i][i] for i in range(len(board))])) == 1 and board[0][0]==2 or (len(set([board[i][len(board)-i-1] for i in range(len(board))])) == 1 and board[0][len(board)-1])==1: return "Winner is 2" for row in board: if 0 in row: return "no winner" else: return "draw" # Test your solution here print(game_state(winner_is_2)) print(game_state(winner_is_1)) print(game_state(no_winner)) print(game_state(draw)) ###Output Winner is 2 Winner is 1 no winner draw ###Markdown Exercise 3Write a function that takes 2 integers `n` and `m` as input and draws a `n` by `m` game board. For example the following is a 3x3 board:``` --- --- --- | | | | --- --- --- | | | | --- --- --- | | | | --- --- --- ``` ###Code # Write you solution here # Test your solution here ###Output _____no_output_____ ###Markdown Exercise 4:Modify exercise 3, so that it takes a matrix of the form from exercise 2 and draws a tic-tac-tie board with "X"s and "O"s. ###Code # Write you solution here # Test your solution here ###Output _____no_output_____ ###Markdown Exercise 5:Write a function that takes a game board, player number, and `(x,y)` coordinates and places "X" or "O" in the correct location of the game board. Make sure that you only allow filling previously empty locations. Return `True` or `False` to indicate successful placement of "X" or "O". ###Code # Write you solution here # Test your solution here ###Output _____no_output_____ ###Markdown Exercise 6:Modify Exercise 5 to show column and row labels so that players can specify location using "A2" or "C1". ###Code # Write you solution here # Test your solution here ###Output _____no_output_____ ###Markdown Exercise 7:Write a function that takes a board, player number, and location specified as in exercise 6 and then calls exercise 5 to correctly modify the board. ###Code # Write you solution here # Test your solution here ###Output _____no_output_____ ###Markdown Exercise 8:Write a function is called with a board and player number, takes input from the player using python's `input`, and modifies the board using your function from exercise 7. Note that you should keep asking for input until you have gotten a valid input that results in a valid move. ###Code # Write you solution here # Test your solution here ###Output _____no_output_____ ###Markdown Exercise 9: Use all of the previous exercises to implement a full tic-tac-toe game, where an appropriate board is drawn, 2 players are repeatedly asked for a location coordinates of where they wish to place a mark, and the game status is checked until a player wins or a draw occurs. ###Code # Write you solution here # Test your solution here ###Output _____no_output_____ ###Markdown Exercise 10:Test that your game works for 5x5 Tic Tac Toe. ###Code # Test your solution here ###Output _____no_output_____ ###Markdown Exercise 11:(Extra Credit)Develop a version of the game where one player is the computer. Note that you don't need to do an extensive seach for the best move. You can have the computer simply protect against loosing and otherwise try to win with straight or diagonal patterns. ###Code # Write you solution here # Test your solution here ###Output _____no_output_____
econometrics-with-python/_build/jupyter_execute/ch02/ch02_1.ipynb
###Markdown 2.1 Random Variables and Probability Distributions[Book Link](https://www.econometrics-with-r.org/2-1-random-variables-and-probability-distributions.html) "We can use it to simulate the random outcome of a dice roll. Let’s roll the dice!" ###Code import numpy as np print(np.random.randint(1,6)) ###Output 5
analysis scripts/Language_model.ipynb
###Markdown Check to see if jupyter lab uses the correct python interpreter with '!which python'.It should be something like '/opt/anaconda3/envs/[environment name]/bin/python' (on Mac).If not, try this: https://github.com/jupyter/notebook/issues/3146issuecomment-352718675 ###Code import sys sys.executable #!which python #which does not semm to work on windows ###Output _____no_output_____ ###Markdown Install dependencies: ###Code install_packages = False if install_packages: !conda install tensorflow=2 -y !conda install -c anaconda pandas -y !conda install -c conda-forge html2text -y !conda install -c conda-forge tensorflow-hub -y # !conda install -c akode html2text -y !conda install -c conda-forge tqdm -y !conda install -c anaconda scikit-learn -y !conda install -c conda-forge matplotlib -y !conda install -c anaconda seaborn -y print("Done") ###Output _____no_output_____ ###Markdown Imports ###Code #imports import pandas as pd import numpy as np import os import time import tensorflow as tf import tensorflow_hub as hub import zipfile from html2text import HTML2Text from tqdm import tqdm import re from sklearn.metrics import pairwise_distances from sklearn.preprocessing import normalize import matplotlib.pyplot as plt import seaborn as sns ###Output _____no_output_____ ###Markdown Set pandas print optionsThis will improve readability of printed pandas dataframe. ###Code pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) ###Output _____no_output_____ ###Markdown Set global ParametersSet your parameters here:data_path: In this path put the data you have downloaded with YouTube Data Tools. output_path: Tghe files generated in this notebook will be saved here.url_dict: URLs to models on Tensorflow hub are saved here. Other models are available there.model_type: Define which model you would like to use. Choose one from url_dictnew_embeddings: If this is true, new embeddings will be generated and saved at output_path. Otherwise, embeddings are loaded from Disc. ###Code path = os.path.abspath("..") data_path = path + "\\data_raw\\all_comments.csv" output_path = "./output/" print("input path: " + data_path) new_embeddings = True url_dict = { 'Transformer' : "https://tfhub.dev/google/universal-sentence-encoder-large/5", 'DAN' : "https://tfhub.dev/google/universal-sentence-encoder/4", 'Transformer_Multilingual': "https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3" } model_type = 'DAN' #@param ['DAN','Transformer','Transformer_Multilingual'] ###Output input path: C:\Users\moritz\Downloads\social analizing\project\git\social_media_youtube_analysis_project\data_raw\all_comments.csv ###Markdown Create output directoryTry to create the directory defined by output_path ###Code try: os.mkdir(output_path) except OSError: print ("Creation of the directory %s failed" % output_path) else: print ("Successfully created the directory %s " % output_path) ###Output Creation of the directory ./output/ failed ###Markdown Load DataLoad you data as a pandas dataframe ###Code if new_embeddings: data = pd.read_csv(data_path,header=(0)) data.head() ###Output _____no_output_____ ###Markdown PreprocessingPreprocess your data:- Drop empty rows- Drop unused columns ###Code if new_embeddings: data = data.dropna(subset=['text', 'author_name']) # drop rows with no content data=data.drop(['id','likeCount'],axis=1)#, 'replyCount','authorChannelUrl','authorChannelId','isReplyTo','isReplyToName'] # drop unused columns data.head() ###Output _____no_output_____ ###Markdown - remove HTML-tags, links and usernames ###Code if new_embeddings: # Remove HTML tags tqdm.pandas() h = HTML2Text() h.ignore_links = True data['cleaned'] = data['text'].progress_apply(lambda x: h.handle(x)) print( "Removed HTML Tags.") # Remove links http_link_pattern = r'http\S+' bitly_link_pattern = r'bit.ly/\S+' data['cleaned'] = data['cleaned'].str.replace(http_link_pattern, '') data['cleaned'] = data['cleaned'].str.replace(bitly_link_pattern, '') print( "Removed Links.") # Remove user names keep_names = ["earth", "Tide", "Geologist", "A Person", "Titanic", "adventure", "Sun", "The United States Of America"] # user names we want to keep user_names = [name for name in data['author_name'].unique() if (len(name)> 3 and name not in keep_names)] data['cleaned'] = data['cleaned'].str.replace('|'.join(map(re.escape, user_names)), '') print( "Removed user names.") ###Output 100%|███████████████████████████████████████████████████████████████████████████| 9683/9683 [00:00<00:00, 11535.74it/s] ###Markdown Save or Load preprocessed dataSave your data afte preprocessing, or load preprocessed data from disc. ###Code if new_embeddings: data.to_pickle(output_path+'data_preprocessed'+'.pkl') else: data = pd.read_pickle(output_path+'data_preprocessed'+'.pkl') data.head() ###Output _____no_output_____ ###Markdown Produce Text Embeddings with Universal Sentence Encoder Load ModelLoad the model from TF-hub ###Code hub_url = url_dict[model_type] if new_embeddings: print("Loading model. This will take some time...") embed = hub.load(hub_url) ###Output Loading model. This will take some time... ###Markdown Embed DocumentsProduce embeddings of your documents. ###Code if new_embeddings: for k,g in data.groupby(np.arange(len(data))//200): if k == 0: embeddings = embed(g['cleaned']) else: embeddings_new = embed(g['cleaned']) embeddings = tf.concat(values=[embeddings,embeddings_new],axis = 0) print(k , end =" ") print("The embeddings vector is of fixed length {}".format(embeddings.shape[1])) np.save(output_path+'/embeddings'+model_type+'.npy', embeddings, allow_pickle=True, fix_imports=True) else: embeddings = np.load(output_path+'/embeddings'+model_type+'.npy', mmap_mode=None, allow_pickle=False, fix_imports=True, encoding='ASCII') embeddings.shape ###Output 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 The embeddings vector is of fixed length 512 ###Markdown Calculate Similarity Matrix with angular distance'Following Cer et al. (2018), we first computethe sentence embeddings u, v for an STS sentencepair, and then score the sentence pair similaritybased on the angular distance between the twoembedding vectors d = − arccos (uv/||u|| ||v||).' ###Code from sklearn.metrics.pairwise import cosine_similarity def cos_sim(input_vectors): similarity = cosine_similarity(input_vectors) return similarity cosine_similarity_matrix = cos_sim(np.array(embeddings)) print(cosine_similarity_matrix) ###Output [[1.0000001 0.18648897 0.12007856 ... 0.20715514 0.3323246 0.08807059] [0.18648897 0.99999976 0.05149069 ... 0.03287233 0.11378209 0.08108065] [0.12007856 0.05149069 1. ... 0.20166531 0.20372054 0.07250679] ... [0.20715514 0.03287233 0.20166531 ... 0.9999999 0.37336153 0.14946935] [0.3323246 0.11378209 0.20372054 ... 0.37336153 1.0000002 0.07230518] [0.08807059 0.08108065 0.07250679 ... 0.14946935 0.07230518 1. ]] ###Markdown Plots Similarity Plot and print a heat map showing the semantic contextual similarity between comments. ###Code import seaborn as sns def plot_similarity(labels, features, rotation): corr = np.inner(features, features) sns.set(font_scale=1.2) g = sns.heatmap( corr, xticklabels=labels, yticklabels=labels, vmin=0, vmax=1, cmap="YlOrRd") g.set_xticklabels(labels, rotation=rotation) g.set_title("Semantic Textual Similarity") num_samples = 5 off_set = 170 plot_similarity(data.iloc[off_set:off_set+num_samples]['cleaned'], embeddings[off_set:off_set+num_samples], 90) ###Output C:\Users\moritz\anaconda3\envs\social-2\lib\site-packages\matplotlib\backends\backend_agg.py:238: RuntimeWarning: Glyph 128180 missing from current font. font.set_text(s, 0.0, flags=flags) C:\Users\moritz\anaconda3\envs\social-2\lib\site-packages\matplotlib\backends\backend_agg.py:201: RuntimeWarning: Glyph 128180 missing from current font. font.set_text(s, 0, flags=flags) ###Markdown Show neighbours of a comment Define which comment to analyze ###Code comment_index = 171 comment = data["cleaned"][comment_index] comment_list = data["cleaned"].tolist() print(comment) ###Output Gat this to trending! And leaders, do about it! ###Markdown Print similar comments. ###Code def get_top_similar(sentence, sentence_list, similarity_matrix, topN): # find the index of sentence in list index = sentence_list.index(sentence) # get the corresponding row in similarity matrix similarity_row = np.array(similarity_matrix[index, :]) # get the indices of top similar indices = similarity_row.argsort()[-topN:][::-1] return [sentence_list[i] for i in indices] for i, value in enumerate(get_top_similar(comment, comment_list, cosine_similarity_matrix, 20)): print("Top similar comment {}: {}".format(i+1, value)) ###Output Top similar comment 1: Gat this to trending! And leaders, do about it! Top similar comment 2: Gat this to trending! And leaders, do about it! Top similar comment 3: Gat this to trending! And leaders, do about it! Top similar comment 4: Gat this to trending! And leaders, do about it! Top similar comment 5: @ What leaders and how do they do that exactly? Top similar comment 6: @ What leaders and how do they do that exactly? Top similar comment 7: @ What leaders and how do they do that exactly? Top similar comment 8: @ What leaders and how do they do that exactly? Top similar comment 9: Hmm..why don't the global leaders sit down and come to a unanimous agreement to improve humanity.. Top similar comment 10: Hmm..why don't the global leaders sit down and come to a unanimous agreement to improve humanity.. Top similar comment 11: American Leadership at its best! Boycott MSM Top similar comment 12: All these leaders are left-wingers who were counting on extra cash from us. Top similar comment 13: "Lead and they will follow..." Top similar comment 14: "Lead and they will follow..." Top similar comment 15: "Lead and they will follow..." Top similar comment 16: OBAMA WOULDN'T KNOW LEADERSHIP IF IT CAME UP TO HIM AND.BIT HIM IN THE ASS Top similar comment 17: Stop listning to dmfp and fox!! Top similar comment 18: Stop listning to dmfp and fox!! Top similar comment 19: Stop listning to dmfp and fox!! Top similar comment 20: omg this guys an idiot. This is going to be a shit show thanks trump and all the dumbies that voted for him
build/colab_notebook/RM_Koala_Sprint_1.ipynb
###Markdown RoboMaster AI Challenge 1.0.0Armour localisation/identification with YOLO (You Only Look Once: one-shot object detection algorithm)> Framework: darknet https://github.com/AlexeyAB/darknet> Architecture:* YOLOv3, YOLOv4 - original model focus on superior performance* YOLOv3-tiny, YOLOv4-tiny - lightweight model suitable for mobile application > Requirements:* Internet connection* Google Drive with at least 500MB free space COMP90082 - Software Project - RM-koalaTeam member (alphabetical order):* Akhtar Kurniawan (Akhtar)* Che-Hao Chang (Ryan)* Isaac Pedroza (Isaac)* Jia Yin (Jia)* Sejin Kim (Kim) System checkup Mount your Google Drive ###Code %cd .. from google.colab import drive drive.mount('/content/gdrive') ###Output _____no_output_____ ###Markdown Verify GPU and CUDA driver ###Code !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi !pip install gputil !pip install psutil import psutil import os import GPUtil as GPU GPUs = GPU.getGPUs() gpu = GPUs[0] process = psutil.Process(os.getpid()) print("GPU RAM {}MB".format(gpu.memoryTotal)) !/usr/local/cuda/bin/nvcc --version ###Output _____no_output_____ ###Markdown First run setup (Execute this section only for the first run) Clone darknet repo & Configure the runtime settings & Make the executable ###Code %cd content/gdrive/My\ Drive !git clone https://github.com/AlexeyAB/darknet %cd darknet !sed -i 's/OPENCV=0/OPENCV=1/' Makefile !sed -i 's/GPU=0/GPU=1/' Makefile !sed -i 's/CUDNN=0/CUDNN=1/' Makefile !sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/' Makefile !make ###Output _____no_output_____ ###Markdown File type overview>* .weight/.conv: Initial weight or trained weight file* .cfg: Configuration file, defines the network architecture* .data: Points to the data directory* .names: Defines the class labels Retrieve necessary files ###Code # weight files !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/trained_weight/rm_koala/yolov4-tiny_armour.weights !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/trained_weight/rm_koala/yolov4-tiny_pose.weights # loader files %cd data !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/file_list/rm_armour_train.txt !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/file_list/rm_pose_train.txt # image data !mkdir images %cd images !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/images/training/200each.zip !unzip 200each.zip %cd ../.. # configuration files %cd cfg !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/cfg/yolov4-tiny_2class_mod.cfg !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/cfg/yolov4-tiny_3class.cfg !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/cfg/rm_armour.data !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/cfg/rm_armour.names !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/cfg/rm_pose.data !wget https://github.com/cchia790411/rm_ai_challenge_2020s2_koala/raw/master/src/cfg/rm_pose.names %cd .. ###Output _____no_output_____ ###Markdown Preparations ###Code %cd content/gdrive/My\ Drive/ ###Output _____no_output_____ ###Markdown Define image displayer & File uploader ###Code def imShow(path): import cv2 import matplotlib.pyplot as plt %matplotlib inline image = cv2.imread(path) height, width = image.shape[:2] resized_image = cv2.resize(image,(3*width, 3*height), interpolation = cv2.INTER_CUBIC) fig = plt.gcf() fig.set_size_inches(18, 10) plt.axis("off") plt.imshow(cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)) plt.show() def upload(): from google.colab import files uploaded = files.upload() for name, data in uploaded.items(): with open(name, 'wb') as f: f.write(data) ###Output _____no_output_____ ###Markdown darknet framework command overview>Command format* !./darknet detector test [path to .data file] [path to config] [path to weights] [path to image]>Command line flags* -ext_output: output bounding box location* -thresh: Set confidence threshold for prediction* -out: Output result as json file* -map: Show mean average percision over training when there is a validation data set * -dont_show: Don't show prediction result in place ###Code %cd darknet !chmod +x ./darknet ###Output _____no_output_____ ###Markdown Tesing User Story 1 - Armour Localisation TC001: Armour Correctly Located ###Code !./darknet detector test cfg/rm_armour.data cfg/yolov4-tiny_2class_mod.cfg yolov4-tiny_armour.weights data/images/200each/blue_2_frame0060.jpg -dont_show imShow('predictions.jpg') ###Output _____no_output_____ ###Markdown TC002: No Armour Bounding Box is Drawn ###Code !./darknet detector test cfg/rm_armour.data cfg/yolov4-tiny_2class_mod.cfg yolov4-tiny_armour.weights data/images/robot_removed/red_1_frame0200_no_robot.jpg -dont_show imShow('predictions.jpg') ###Output _____no_output_____ ###Markdown User Story 2 - Armour Identitfication TC003: Robot Pose Correctly Identified ###Code !./darknet detector test cfg/rm_pose.data cfg/yolov4-tiny_3class.cfg yolov4-tiny_pose.weights data/images/200each/red_2_frame0100.jpg -dont_show imShow('predictions.jpg') ###Output _____no_output_____ ###Markdown TC004: No Robot Bounding Box is Drawn ###Code !./darknet detector test cfg/rm_pose.data cfg/yolov4-tiny_3class.cfg yolov4-tiny_pose.weights data/images/robot_removed/red_1_frame0200_no_robot.jpg -dont_show imShow('predictions.jpg') ###Output _____no_output_____ ###Markdown Extra - Test your own image 1. Upload your image ###Code upload() ###Output _____no_output_____ ###Markdown 2. Copy the following code into a new code block, replace "YOUR_FILE_NAME_HERE" with your file name (extension included) * Armour localisation```!./darknet detector test cfg/rm_armour.data cfg/yolov4-tiny_2class_mod.cfg yolov4-tiny_armour.weights YOUR_FILE_NAME_HEREimShow('predictions.jpg')```* Armour identification```!./darknet detector test cfg/rm_pose.data cfg/yolov4-tiny_3class.cfg yolov4-tiny_pose.weights YOUR_FILE_NAME_HEREimShow('predictions.jpg')``` 3. Execution ###Code !./darknet detector test cfg/rm_armour.data cfg/yolov4-tiny_2class_mod.cfg yolov4-tiny_armour.weights blue_1_frame0495.jpg imShow('predictions.jpg') ###Output _____no_output_____
plotting/negative_symptom_plot.ipynb
###Markdown Plot histogram of PANSS score distribution ###Code sns.set_theme(style="white", rc={"axes.facecolor": (0, 0, 0, 0)}) bins = np.histogram_bin_edges(panss_neg[panss_neg.columns[0]], 6, range=(1,6)) # Initialize the FacetGrid object pal = sns.cubehelix_palette(10, rot=-.25, light=0.5) g = sns.FacetGrid(panss_neg_melt, row="symptom", hue="symptom", aspect=15, height=1, palette=pal) # Draw the densities in a few steps g.map(sns.histplot, "score", clip_on = False, alpha = 1, linewidth = 1.5, stat = 'count', bins = 6, binrange = (1,6)) g.map(sns.histplot, "score", clip_on = False, linewidth = 2, stat = 'count', bins= 6, binrange = (1,6), element = 'step') g.map(plt.axhline, y=0, lw=0.7, clip_on=False, c = 'grey') ## Define and use a simple function to label the plot in axes coordinates def label(x, color, label): ax = plt.gca() ax.text(1, .2, label, fontweight="bold", color=color, ha="right", va="bottom", transform=ax.transAxes) def bins_labels(bins, **kwargs): bin_w = (max(bins) - min(bins)) / (len(bins) - 1) plt.xticks(np.arange(min(bins)+bin_w/2, max(bins), bin_w), bins, **kwargs) plt.xlim(bins[0], bins[-1]) g.map(label, "score") # Set the subplots to overlap g.fig.subplots_adjust(hspace= 0) # Remove axes details that don't play well with overlap g.set_titles("") g.set(xticks=[]) g.despine(bottom=True, left=False) g.set_xlabels('PANSS Score', fontsize = 15) g.set(xticks=[1, 2, 3, 4, 5, 6], yticks=[20, 40]) for i, ax in enumerate(g.axes.flat): # This only works for the left ylabels ax.yaxis.set_label_position("left") ax.yaxis.set_ticks_position('left') ax.set_yticklabels([20, 40], fontsize = 13) if i == 3: ax.set_ylabel('Count', fontsize = 15) if i == 6: ax.set_xticks([1.417, 2.250, 3.083, 3.917, 4.750, 5.583]) ax.set_xticklabels([1, 2, 3, 4, 5, 6], fontsize = 13) g.fig.suptitle('Distribution of Negative Symptoms', fontsize = 20) ###Output _____no_output_____
lab-feature-store-weather/noaa-climatology-network-daily.ipynb
###Markdown NOAA Global Historical Climatology Network Daily (GHCN-D)Find out details about AWS OpenData dataset [here](https://registry.opendata.aws/noaa-ghcn/).Detailed documentation [here](https://docs.opendata.aws/noaa-ghcn-pds/readme.html). The yearly files are formatted so that every observation is represented by a single row with the following fields:- ID = 11 character station identification code. Please see ghcnd-stations section below for an explantation- YEAR/MONTH/DAY = 8 character date in YYYYMMDD format (e.g. 19860529 = May 29, 1986)- ELEMENT = 4 character indicator of element type- DATA VALUE = 5 character data value for ELEMENT- M-FLAG = 1 character Measurement Flag- Q-FLAG = 1 character Quality Flag- S-FLAG = 1 character Source Flag- OBS-TIME = 4-character time of observation in hour-minute format (i.e. 0700 =7:00 am) ELEMENT SummaryThe five core elements are:- PRCP = Precipitation (tenths of mm)- SNOW = Snowfall (mm)- SNWD = Snow depth (mm)- TMAX = Maximum temperature (tenths of degrees C)- TMIN = Minimum temperature (tenths of degrees C) More- AWND = Average daily wind speed (tenths of meters per second)- WSF2 = Fastest 2-minute wind speed (tenths of meters per second)- WSF5 = Fastest 5-second wind speed (tenths of meters per second)- TAVG = Average temperature (tenths of degrees C) [Note that TAVG from source ’S’ corresponds to an average for the period ending at 2400 UTC rather than local midnight] Download data ###Code !aws s3 ls s3://noaa-ghcn-pds/ --no-sign-request # !aws s3 ls s3://noaa-ghcn-pds/csv.gz/ --no-sign-request # !aws s3 cp s3://noaa-ghcn-pds/csv.gz/2020.csv.gz --no-sign-request . # !aws s3 cp s3://noaa-ghcn-pds/ghcnd-stations.txt --no-sign-request . # !aws s3 ls s3://noaa-ghcn-pds/csv/ --no-sign-request # !aws s3 cp s3://noaa-ghcn-pds/csv/2020.csv --no-sign-request . ###Output _____no_output_____ ###Markdown Explore NOAA data, and establish a dataframe for all of 2020 (34 million records) ###Code import pandas as pd !ls full_2020_df = pd.read_csv('2020.csv', header=None) full_2020_df.head() full_2020_df.shape ###Output _____no_output_____ ###Markdown Map airport codes to weather station ID'sThe daily NOAA weather data is keyed by its own Station ID's. While this is helpful, there are other applications that won't have a Station ID to drive a lookup. For our example, we want to be able to find the weather at a given airport. [Here](http://www.weathergraphics.com/identifiers/) is a Master Location Identifier Database with over 45,000 government weather station identifiers worldwide. They publish a free standard version as a CSV, [here](http://www.weathergraphics.com/identifiers/master-location-identifier-database-202103_standard.csv), along with corresponding [documentation](http://www.weathergraphics.com/identifiers/master-location-identifier-database-202103.pdf).In this section of the notebook, we prepare a dataframe to map between station ID's and airport ID's. About 3,000 of the MLID records have a complete mapping between NOAA station ID's, ICAO's, and IATA's.[Here](https://en.wikipedia.org/wiki/List_of_airports_by_IATA_and_ICAO_code) is the list of IATA and ICAO codes.Here are the NOAA station columns available according to the [documentation](https://www.ncdc.noaa.gov/ghcnd-data-access):- Station ID- latitude, longitude, elevation- 2-digit State code (if applicable)- and Station name. Download the free MLID mapping file ###Code !head -6 master-location-identifier-database-202103_standard.csv ###Output ###Markdown Strip off the top 4 lines, leaving the first line as the CSV header ###Code !tail -n +5 master-location-identifier-database-202103_standard.csv > mlid.csv !wc -l mlid.csv !head -3 mlid.csv ###Output ###Markdown Prepare a dataframe of all mapping rows that have a GHCN station ID, an ICAO (4-char) airport code, and a IATA (3-char) airport code ###Code mlid_df = pd.read_csv('mlid.csv', encoding = "ISO-8859-1") mlid_df[['station_name','national_id','icao']].head() mlid_df.shape mlid_df = mlid_df[['country3','region','place_name','station_name','icao','national_id','ghcn']] mlid_df.columns = ['country3','region','place_name','station_name','icao','iata','ghcn'] mlid_df.head() mlid_df = mlid_df.dropna(subset=['ghcn', 'icao', 'iata']) mlid_df.shape ###Output _____no_output_____ ###Markdown Explore ground stations metadata ###Code !head ghcnd-stations.txt ###Output ACW00011604 17.1167 -61.7833 10.1 ST JOHNS COOLIDGE FLD ACW00011647 17.1333 -61.7833 19.2 ST JOHNS AE000041196 25.3330 55.5170 34.0 SHARJAH INTER. AIRP GSN 41196 AEM00041194 25.2550 55.3640 10.4 DUBAI INTL 41194 AEM00041217 24.4330 54.6510 26.8 ABU DHABI INTL 41217 AEM00041218 24.2620 55.6090 264.9 AL AIN INTL 41218 AF000040930 35.3170 69.0170 3366.0 NORTH-SALANG GSN 40930 AFM00040938 34.2100 62.2280 977.2 HERAT 40938 AFM00040948 34.5660 69.2120 1791.3 KABUL INTL 40948 AFM00040990 31.5000 65.8500 1010.0 KANDAHAR AIRPORT 40990 ###Markdown Find all international airports ###Code !grep "INTL AP" ghcnd-stations.txt ###Output BDM00078016 32.3667 -64.6833 6.1 L F WADE INTL AP KINDLEY FLD GSN 78016 CQC00914855 15.1167 145.7167 65.5 MP SAIPAN INTL AP 91232 GQW00041415 13.4836 144.7961 77.4 GU GUAM INTL AP GSN 91212 TDM00078970 10.5830 -61.3500 12.2 PIARCO INTL AP 78970 USC00052212 39.8308 -104.6867 1633.7 CO DENVER INTL AP 2SW USW00003017 39.8328 -104.6575 1650.2 CO DENVER INTL AP 72565 USW00003102 34.0561 -117.6003 289.3 CA ONTARIO INTL AP USW00003196 31.4208 -110.8458 1198.5 AZ NOGALES INTL AP USW00003822 32.1300 -81.2100 14.0 GA SAVANNAH INTL AP HCN 72207 USW00003856 34.6439 -86.7861 190.2 AL HUNTSVILLE INTL AP 72323 USW00003940 32.3206 -90.0778 100.6 MS JACKSON INTL AP 72235 USW00003947 39.2972 -94.7306 306.3 MO KANSAS CITY INTL AP 72446 USW00004724 43.1072 -78.9453 178.3 NY NIAGARA FALLS INTL AP USW00004742 44.6500 -73.4667 71.9 NY PLATTSBURGH INTL AP USW00012815 28.4339 -81.3250 27.4 FL ORLANDO INTL AP 72205 USW00012834 29.1828 -81.0483 9.4 FL DAYTONA BEACH INTL AP 74787 USW00012836 24.5550 -81.7522 1.2 FL KEY WEST INTL AP GSN HCN 72201 USW00012838 28.1011 -80.6439 8.2 FL MELBOURNE INTL AP 72204 USW00012839 25.7906 -80.3164 8.8 FL MIAMI INTL AP 72202 USW00012842 27.9619 -82.5403 5.8 FL TAMPA INTL AP GSN 72211 USW00012843 27.6531 -80.2428 8.5 FL VERO BEACH INTL AP 74793 USW00012844 26.6847 -80.0994 5.8 FL WEST PALM BEACH INTL AP 72203 USW00012873 27.9106 -82.6875 3.4 FL ST PETERSBURG INTL AP USW00012895 27.4981 -80.3767 7.3 FL FT PIERCE ST LUCIE CO INTL AP USW00012916 29.9933 -90.2511 1.2 LA NEW ORLEANS INTL AP GSN 72231 USW00012921 29.5442 -98.4839 240.5 TX SAN ANTONIO INTL AP GSN HCN 72253 USW00012932 27.7411 -98.0247 52.7 TX ALICE INTL AP USW00012959 26.1839 -98.2539 30.5 TX MCALLEN MILLER INTL AP USW00013722 35.8922 -78.7819 126.8 NC RALEIGH DURHAM INTL AP GSN 72306 USW00013723 36.0969 -79.9433 271.3 NC PIEDMONT TRIAD INTL AP 72317 USW00013737 36.9033 -76.1922 9.1 VA NORFOLK INTL AP HCN 72308 USW00013739 39.8683 -75.2311 3.0 PA PHILADELPHIA INTL AP 72408 USW00013740 37.5050 -77.3203 50.0 VA RICHMOND INTL AP 72401 USW00013748 34.2675 -77.8997 10.1 NC WILMINGTON INTL AP 72302 USW00013809 38.7642 -87.6056 130.8 IL LAWRENCEVILLE INTL AP USW00013874 33.6300 -84.4417 307.8 GA ATLANTA HARTSFIELD INTL AP 72219 USW00013880 32.8986 -80.0403 12.2 SC CHARLESTON INTL AP GSN 72208 USW00013893 35.0564 -89.9864 77.4 TN MEMPHIS INTL AP 72334 USW00013897 36.1189 -86.6892 182.9 TN NASHVILLE INTL AP 72327 USW00013968 36.1994 -95.8872 198.1 OK TULSA INTL AP 72356 USW00013994 38.7525 -90.3736 161.8 MO ST LOUIS LAMBERT INTL AP 72434 USW00014606 44.7978 -68.8186 45.1 ME BANGOR INTL AP 72607 USW00014609 46.1236 -67.7928 145.1 ME HOULTON INTL AP USW00014711 40.1936 -76.7633 95.1 PA MIDDLETOWN HARRISBURG INTL AP 72399 USW00014734 40.6825 -74.1694 2.1 NJ NEWARK INTL AP 72502 USW00014737 40.6508 -75.4492 118.9 PA ALLENTOWN INTL AP HCN 72517 USW00014739 42.3606 -71.0106 3.7 MA BOSTON LOGAN INTL AP 72509 USW00014740 41.9381 -72.6825 57.9 CT HARTFORD BRADLEY INTL AP 72508 USW00014742 44.4683 -73.1500 100.6 VT BURLINGTON INTL AP GSN HCN 72617 USW00014768 43.1167 -77.6767 162.5 NY ROCHESTER GTR INTL AP HCN 72529 USW00014771 43.1111 -76.1039 125.9 NY SYRACUSE HANCOCK INTL AP GSN HCN 72519 USW00014777 41.3336 -75.7269 283.5 PA WILKES-BARRE INTL AP 72513 USW00014813 41.0375 -81.4642 318.2 OH AKRON FULTON INTL AP USW00014821 39.9914 -82.8808 246.9 OH COLUMBUS PORT COLUMBUS INTL AP 72428 USW00014826 42.9667 -83.7494 234.7 MI FLINT BISHOP INTL AP 72637 USW00014827 40.9706 -85.2064 241.1 IN FT WAYNE INTL AP 72533 USW00014845 43.5331 -84.0797 201.2 MI SAGINAW MBS INTL AP USW00014860 42.0800 -80.1825 222.5 PA ERIE INTL AP HCN 72526 USW00014914 46.9253 -96.8111 274.3 ND FARGO HECTOR INTL AP 72753 USW00014916 47.9428 -97.1839 256.6 ND GRAND FORKS INTL AP 72757 USW00014918 48.5614 -93.3981 360.6 MN INTL FALLS INTL AP 72747 USW00014923 41.4653 -90.5233 180.4 IL MOLINE QUAD CITY INTL AP 72544 USW00014925 43.9042 -92.4917 397.5 MN ROCHESTER INTL AP 72644 USW00014933 41.5339 -93.6531 291.7 IA DES MOINES INTL AP GSN 72546 USW00021504 19.7192 -155.0531 11.6 HI HILO INTL AP GSN 91285 USW00022010 29.3783 -100.9269 304.5 TX DEL RIO INTL AP 72261 USW00022521 21.3239 -157.9294 2.1 HI HONOLULU INTL AP 91182 USW00023044 31.8111 -106.3758 1194.2 TX EL PASO INTL AP GSN HCN 72270 USW00023050 35.0419 -106.6156 1618.5 NM ALBUQUERQUE INTL AP GSN 72365 USW00023160 32.1314 -110.9553 776.9 AZ TUCSON INTL AP 72274 USW00023174 33.9381 -118.3889 29.6 CA LOS ANGELES INTL AP 72295 USW00023183 33.4278 -112.0039 337.4 AZ PHOENIX SKY HARBOR INTL AP GSN 72278 USW00023185 39.4839 -119.7711 1344.2 NV RENO TAHOE INTL AP HCN 72488 USW00023230 37.7214 -122.2208 1.8 CA OAKLAND METRO INTL AP 72493 USW00023234 37.6197 -122.3647 2.4 CA SAN FRANCISCO INTL AP 72494 USW00024013 48.2553 -101.2733 507.5 ND MINOT INTL AP USW00024033 45.8069 -108.5422 1091.5 MT BILLINGS LOGAN INTL AP 72677 USW00024127 40.7781 -111.9694 1287.8 UT SALT LAKE CITY INTL AP 72572 USW00024143 47.4733 -111.3822 1116.8 MT GREAT FALLS INTL AP HCN 72775 USW00024153 46.9208 -114.0925 972.9 MT MISSOULA INTL AP 72773 USW00024157 47.6217 -117.5281 717.2 WA SPOKANE INTL AP HCN 72785 USW00024217 48.7939 -122.5372 45.4 WA BELLINGHAM INTL AP USW00024229 45.5908 -122.6003 5.8 OR PORTLAND INTL AP 72698 USW00024233 47.4444 -122.3139 112.8 WA SEATTLE TACOMA INTL AP 72793 USW00025309 58.3567 -134.5639 4.9 AK JUNEAU INTL AP 70381 USW00025325 55.3567 -131.7117 23.2 AK KETCHIKAN INTL AP 70395 USW00026411 64.8039 -147.8761 131.7 AK FAIRBANKS INTL AP GSN 70261 USW00026451 61.1689 -150.0278 36.6 AK ANCHORAGE INTL AP 70273 USW00053858 30.4636 -88.5319 5.5 MS PASCAGOULA LOTT INTL AP USW00064776 44.6500 -73.4667 71.3 NY PLATTSBURGH INTL AP USW00073805 30.3489 -85.7881 17.4 FL NW FLORIDA BEACHES INTL AP USW00093193 36.7800 -119.7194 101.5 CA FRESNO YOSEMITE INTL AP GSN HCN 72389 USW00093721 39.1667 -76.6833 47.5 MD BALTIMORE WASH INTL AP 72406 USW00093730 39.4494 -74.5672 18.3 NJ ATLANTIC CITY INTL AP 72407 USW00093741 37.1319 -76.4930 12.8 VA NEWPORT NEWS INTL AP USW00093815 39.9061 -84.2186 304.8 OH DAYTON INTL AP 72429 USW00093821 38.1811 -85.7392 148.7 KY LOUISVILLE INTL AP 72423 USW00093915 31.3347 -92.5586 25.6 LA ALEXANDRIA INTL AP 74754 USW00094008 48.2138 -106.6213 696.5 MT GLASGOW INTL AP GSN HCN 72768 USW00094014 48.1739 -103.6367 579.7 ND WILLISTON SLOULIN INTL AP 72767 USW00094017 48.0944 -105.5744 605.3 MT WOLF POINT INTL AP USW00094236 42.1469 -121.7242 1244.8 OR KLAMATH FALLS INTL AP USW00094266 48.1203 -123.4983 87.8 WA PORT ANGELES INTL AP USW00094725 44.9358 -74.8458 65.2 NY MASSENA INTL AP USW00094789 40.6386 -73.7622 3.4 NY NEW YORK JFK INTL AP 74486 USW00094790 43.9922 -76.0217 96.9 NY WATERTOWN INTL AP USW00094815 42.2347 -85.5519 264.6 MI KALAMAZOO BATTLE CK INTL AP USW00094817 42.6650 -83.4181 297.5 MI PONTIAC OAKLAND CO INTL AP USW00094823 40.4847 -80.2144 366.7 PA PITTSBURGH INTL AP GSN 72520 USW00094846 41.9950 -87.9336 200.6 IL CHICAGO OHARE INTL AP 72530 USW00094961 48.7167 -94.6000 329.8 MN BAUDETTE INTL AP ###Markdown Weather history for Boston, charts by month [here](https://www.wunderground.com/history/monthly/us/ma/boston/KBOS/date/2020-2) ###Code !grep "BOSTON LOGAN" ghcnd-stations.txt target_station = 'USW00014739' # Logan airport ###Output _____no_output_____ ###Markdown Add a couple of date features ###Code def to_YYYY_MM_DD(yyyymmdd_num): yyyymmdd = str(yyyymmdd_num) y = int(yyyymmdd[0:4]) m = int(yyyymmdd[4:6]) d = int(yyyymmdd[6:8]) return f'{y}-{m:02d}-{d:02d}' full_2020_df['weather_date'] = full_2020_df[1].apply(to_YYYY_MM_DD) full_2020_df['month'] = full_2020_df['weather_date'].str[5:7].astype(int) full_2020_df.tail() full_2020_df.columns = ['station_id','date_as_num','element','value','4','5','6','7','weather_date','month'] full_2020_df.head() full_2020_df.shape ###Output _____no_output_____ ###Markdown Move from long table (34M) to wide table (12M)Instead of a separate row for each and every observation, move weather observations to columns with each row representing a single station on a single day. ###Code full_2020_df.drop(['4','5','6','7'], axis=1, inplace=True) full_2020_df.head() focused_df = full_2020_df[full_2020_df.element.isin(['PRCP','SNOW','TAVG','TMIN','AWND','WSF2','WSF5'])] focused_df.head() wide_df = focused_df.pivot(index=['station_id','weather_date'], columns='element', values='value') wide_df.shape wide_df.head() ###Output _____no_output_____ ###Markdown Look for some snowy days. ###Code wide_df[wide_df.SNOW > 0].head(20) wide_df.reset_index(inplace=True) wide_df.head() wide_df.shape ###Output _____no_output_____ ###Markdown Add features in inches, farenheit, mphThe default units of measurement don't match the typical usage needed by our models, so convert them to more usable units. ###Code def tenths_meters_per_sec_to_mph(tm): meters_per_sec = tm / 10 meters_per_hour = meters_per_sec * 60 * 60 miles_per_hour = (meters_per_hour / 1000) / 1.6 return round(miles_per_hour, 1) def tenths_mm_to_in(tmm): mm = tmm / 10 inches = mm / 25.4 return round(inches, 1) def mm_to_in(mm): inches = mm / 25.4 return round(inches, 1) def c_tenths_to_f(c_temp): return ((c_temp/10) * 1.8) + 32.0 c_tenths_to_f(41) wide_df['WSF2_MPH'] = wide_df['WSF2'].apply(tenths_meters_per_sec_to_mph) wide_df['WSF5_MPH'] = wide_df['WSF5'].apply(tenths_meters_per_sec_to_mph) wide_df['AWND_MPH'] = wide_df['AWND'].apply(tenths_meters_per_sec_to_mph) wide_df['PRCP_IN'] = wide_df['PRCP'].apply(tenths_mm_to_in) wide_df['SNOW_IN'] = wide_df['SNOW'].apply(mm_to_in) wide_df['TAVG_F'] = wide_df['TAVG'].apply(c_tenths_to_f) wide_df['TMIN_F'] = wide_df['TMIN'].apply(c_tenths_to_f) wide_df.head() ###Output _____no_output_____ ###Markdown Ingest this dataset in Feature Store ###Code airport_weather_df = wide_df.copy() airport_weather_df.columns airport_weather_df.shape ###Output _____no_output_____ ###Markdown Browse the weather at a couple of locations ###Code airport_weather_df[airport_weather_df.station_id == 'USW00014739'].head() airport_weather_df[airport_weather_df.station_id == 'USW00013897'].head() ###Output _____no_output_____ ###Markdown Handle missing data as zeros for temperature, precipitation, wind speed ###Code airport_weather_df.fillna(0, inplace=True) airport_weather_df.head() ###Output _____no_output_____ ###Markdown Join in MLID database to map station_id to iata (3-char airport code)Note that since only 3,000 stations are covered, this cuts our weather dataset from 12M rows down to 464K rows of weather data covering airports that we can look up by 3-character airport codes. ###Code mlid_df.head() mlid_df.shape airport_weather_df.shape airport_weather_df = pd.merge(airport_weather_df, mlid_df[['iata','icao','ghcn']], left_on='station_id', right_on='ghcn', how='inner') airport_weather_df.head() airport_weather_df.shape ###Output _____no_output_____ ###Markdown Add an event time to make this dataset ready to ingest to a feature groupWe will use the existing weather date (in 'YYYY-MM-DD' format) as a base, and extend it to ISO 8601 format with the time of day. For each of 3,000 airports, we have daily weather observations for all days in 2020. Once ingested, the latest feature values will be for December 31, 2020. The online store will hold these values to help with inference. The offline store will hold the history of each feature, with a separate record for each day. All times are flagged as start of day. ###Code airport_weather_df['event_time'] = airport_weather_df['weather_date']+'T00:00:00Z' airport_weather_df[['iata','event_time','AWND','TMIN_F']].head() airport_weather_df[airport_weather_df.station_id == 'USW00013897'].head() ###Output _____no_output_____ ###Markdown Create and load the feature group ###Code import Utils Utils.create_fg_from_df('airport-weather-fg-v6', airport_weather_df, id_name='iata', event_time_name='event_time', tags={'Category':'Weather'}, online=True) Utils.ingest_from_df('airport-weather-fg-v6', airport_weather_df, max_processes=24, max_workers=4, wait=True) ###Output _____no_output_____ ###Markdown Confirm the features have been loaded ###Code Utils.get_latest_feature_values('airport-weather-fg-v6', ['BOS','BNA','LAX']) ###Output _____no_output_____ ###Markdown Get the record count from the offline store, including all historyNote that there is a delay between the ingestion and the full replication of all records to the offline store, typically on the order of 5 minutes. ###Code Utils.get_historical_record_count('airport-weather-fg-v6') ###Output _____no_output_____ ###Markdown Get offline history for a specific record id (Boston Logan airport) ###Code Utils.get_historical_offline_feature_values('airport-weather-fg-v6', record_ids=['BOS']) ###Output Running query: SELECT * FROM "airport-weather-fg-v6-1622809160" WHERE iata IN ('BOS') ###Markdown Show how to ingest a CSV with one line using Data Wrangler ###Code # airport_weather_df.to_csv('tmp.csv', index=False) # !head tmp.csv # airport_weather_df.to_csv('s3://sagemaker-us-east-1-355151823911/weather/bos-weather-2020-v3.csv', index=False) # Utils.ingest_with_dw('s3://sagemaker-us-east-1-355151823911/weather/bos-weather-2020.csv', 'airport-weather-fg') ###Output _____no_output_____ ###Markdown Do some exploratory data analysis Explore wind speed data ###Code wide_df['WSF2'].hist() wide_df['AWND'].hist() tenths_meters_per_sec_to_mph(69) wide_df.head(20) ###Output _____no_output_____ ###Markdown Explore precipitation data ###Code wide_df['SNOW_IN'].hist() wide_df['PRCP_IN'].hist() wide_df[wide_df.PRCP_IN > 0.2].head(20) wide_df[wide_df.SNOW_IN >= 0.1].head(20) ###Output _____no_output_____
hw02/04_basic_arithmetic.ipynb
###Markdown Basic Arithmetic in PythonThis set of exercises gives you practice writing basic arithmetic expressions in Python. **Question 1.** In the next cell, write a Python expression equivalent to this math expression:$$1 - \frac{2^2}{2^{20}}$$The value of the expression is around 0.999996. After you have something that works, try to improve your solution by using only as many parentheses as you need. ###Code ... ###Output _____no_output_____ ###Markdown **Question 2.** Try to predict the value of the expression in the next cell. Before you run the cell, write your prediction in the cell below it. ###Code 1 - 2 ** 3 / 4 + 5 * 6 # Your prediction here: ... ###Output _____no_output_____ ###Markdown **Question 3.** Try to predict whether the value of the expression below is larger or smaller than the value of the expression in the previous question. You can use the next cell to record your answer. Then run the cell to see what happens. If you were wrong, figure out why, and include that in your written answer. ###Code 1 - 2 ** (3 / 4 + 5 * 6) ###Output _____no_output_____ ###Markdown *Write your answer here, replacing this text.* **Question 4.** A famous fact in mathematics is that$$(1 - \frac{1}{n})^{-n}$$gets very close to the number $e$, which is roughly $2.718$, when $n$ is large. Verify that it gets closer to $e$ as $n$ gets larger by writing this expression in Python and trying different values for $n$.*Hint:* If you write several expressions in a cell, only the last will be printed. If you want to see the values of several copies of the expression at once, put `print(...)` around them, as in Lab 1. ###Code ... ###Output _____no_output_____
_notebooks/2021-02-17-Primeiro-post.ipynb
###Markdown Minha página > Aprendendo a criar uma página de blog com o jupyter notebook - toc: false - branch: master - badges: false - comments: false - author: Bruno - categories: [blog] ###Code import pandas as pd import numpy as np data = np.random.randint(0,1000,(10,10)) data df1 = pd.DataFrame(data) df1 #hide_input df2 = pd.DataFrame(data) df2 #hide df3 = pd.DataFrame(data) df3 ###Output _____no_output_____
Convolutional Neural Networks Prev/week1/Convolution+model+-+Step+by+Step+-+v2.ipynb
###Markdown Convolutional Neural Networks: Step by StepWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. **Notation**:- Superscript $[l]$ denotes an object of the $l^{th}$ layer. - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.- Superscript $(i)$ denotes an object from the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example input. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer. - $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. - $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started! 1 - PackagesLet's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. ###Code import numpy as np import h5py import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) ###Output _____no_output_____ ###Markdown 2 - Outline of the AssignmentYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:- Convolution functions, including: - Zero Padding - Convolve window - Convolution forward - Convolution backward (optional)- Pooling functions, including: - Pooling forward - Create mask - Distribute value - Pooling backward (optional) This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:**Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. 3 - Convolutional Neural NetworksAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. 3.1 - Zero-PaddingZero-padding adds zeros around the border of an image: **Figure 1** : **Zero-Padding** Image (3 channels, RGB) with a padding of 2. The main benefits of padding are the following:- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the "same" convolution, in which the height/width is exactly preserved after one layer. - It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.**Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array "a" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:```pythona = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))``` ###Code # GRADED FUNCTION: zero_pad def zero_pad(X, pad): """ Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, as illustrated in Figure 1. Argument: X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images pad -- integer, amount of padding around each image on vertical and horizontal dimensions Returns: X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C) """ ### START CODE HERE ### (≈ 1 line) X_pad = np.pad(X,((0,0),(pad,pad),(pad,pad),(0,0)),"constant") ### END CODE HERE ### return X_pad np.random.seed(1) x = np.random.randn(4, 3, 3, 2) x_pad = zero_pad(x, 2) print ("x.shape =", x.shape) print ("x_pad.shape =", x_pad.shape) print ("x[1,1] =", x[1,1]) print ("x_pad[1,1] =", x_pad[1,1]) fig, axarr = plt.subplots(1, 2) axarr[0].set_title('x') axarr[0].imshow(x[0,:,:,0]) axarr[1].set_title('x_pad') axarr[1].imshow(x_pad[0,:,:,0]) ###Output x.shape = (4, 3, 3, 2) x_pad.shape = (4, 7, 7, 2) x[1,1] = [[ 0.90085595 -0.68372786] [-0.12289023 -0.93576943] [-0.26788808 0.53035547]] x_pad[1,1] = [[ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.]] ###Markdown **Expected Output**: **x.shape**: (4, 3, 3, 2) **x_pad.shape**: (4, 7, 7, 2) **x[1,1]**: [[ 0.90085595 -0.68372786] [-0.12289023 -0.93576943] [-0.26788808 0.53035547]] **x_pad[1,1]**: [[ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.]] 3.2 - Single step of convolution In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: - Takes an input volume - Applies a filter at every position of the input- Outputs another volume (usually of different size) **Figure 2** : **Convolution operation** with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. **Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html). ###Code # GRADED FUNCTION: conv_single_step def conv_single_step(a_slice_prev, W, b): """ Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation of the previous layer. Arguments: a_slice_prev -- slice of input data of shape (f, f, n_C_prev) W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev) b -- Bias parameters contained in a window - matrix of shape (1, 1, 1) Returns: Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data """ ### START CODE HERE ### (≈ 2 lines of code) # Element-wise product between a_slice and W. Do not add the bias yet. s = np.multiply(a_slice_prev,W) # Sum over all entries of the volume s. Z = np.sum(s) # Add bias b to Z. Cast b to a float() so that Z results in a scalar value. Z = Z+float(b) ### END CODE HERE ### return Z np.random.seed(1) a_slice_prev = np.random.randn(4, 4, 3) W = np.random.randn(4, 4, 3) b = np.random.randn(1, 1, 1) Z = conv_single_step(a_slice_prev, W, b) print("Z =", Z) ###Output Z = -6.99908945068 ###Markdown **Expected Output**: **Z** -6.99908945068 3.3 - Convolutional Neural Networks - Forward passIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: **Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. **Hint**: 1. To select a 2x2 slice at the upper left corner of a matrix "a_prev" (shape (5,5,3)), you would do:```pythona_slice_prev = a_prev[0:2,0:2,:]```This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below. **Figure 3** : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** This figure shows only a single channel. **Reminder**:The formulas relating the output shape of the convolution to the input shape is:$$ n_H = \lfloor \frac{n_{H_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$$$ n_W = \lfloor \frac{n_{W_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$$$ n_C = \text{number of filters used in the convolution}$$For this exercise, we won't worry about vectorization, and will just implement everything with for-loops. ###Code # GRADED FUNCTION: conv_forward def conv_forward(A_prev, W, b, hparameters): """ Implements the forward propagation for a convolution function Arguments: A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev) W -- Weights, numpy array of shape (f, f, n_C_prev, n_C) b -- Biases, numpy array of shape (1, 1, 1, n_C) hparameters -- python dictionary containing "stride" and "pad" Returns: Z -- conv output, numpy array of shape (m, n_H, n_W, n_C) cache -- cache of values needed for the conv_backward() function """ ### START CODE HERE ### # Retrieve dimensions from A_prev's shape (≈1 line) (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape # Retrieve dimensions from W's shape (≈1 line) (f, f, n_C_prev, n_C) = W.shape # Retrieve information from "hparameters" (≈2 lines) stride = hparameters['stride'] pad = hparameters['pad'] # Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines) n_H = int(np.floor((n_H_prev-f+2*pad)/stride)) + 1 n_W = int(np.floor((n_W_prev-f+2*pad)/stride)) + 1 # Initialize the output volume Z with zeros. (≈1 line) Z = np.zeros((m,n_H,n_W,n_C)) # Create A_prev_pad by padding A_prev A_prev_pad = zero_pad(A_prev,pad) for i in range(m): # loop over the batch of training examples a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation for h in range(n_H): # loop over vertical axis of the output volume for w in range(n_W): # loop over horizontal axis of the output volume for c in range(n_C): # loop over channels (= #filters) of the output volume # Find the corners of the current "slice" (≈4 lines) vert_start = h*stride vert_end = vert_start+f horiz_start = w*stride horiz_end = horiz_start+f # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line) a_slice_prev = a_prev_pad[vert_start:vert_end,horiz_start:horiz_end,:] # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line) Z[i, h, w, c] = conv_single_step(a_slice_prev,W[:,:,:,c],b[:,:,:,c]) ### END CODE HERE ### # Making sure your output shape is correct assert(Z.shape == (m, n_H, n_W, n_C)) # Save information in "cache" for the backprop cache = (A_prev, W, b, hparameters) return Z, cache np.random.seed(1) A_prev = np.random.randn(10,4,4,3) W = np.random.randn(2,2,3,8) b = np.random.randn(1,1,1,8) hparameters = {"pad" : 2, "stride": 2} Z, cache_conv = conv_forward(A_prev, W, b, hparameters) print("Z's mean =", np.mean(Z)) print("Z[3,2,1] =", Z[3,2,1]) print("cache_conv[0][1][2][3] =", cache_conv[0][1][2][3]) ###Output Z's mean = 0.0489952035289 Z[3,2,1] = [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437 5.18531798 8.75898442] cache_conv[0][1][2][3] = [-0.20075807 0.18656139 0.41005165] ###Markdown **Expected Output**: **Z's mean** 0.0489952035289 **Z[3,2,1]** [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437 5.18531798 8.75898442] **cache_conv[0][1][2][3]** [-0.20075807 0.18656139 0.41005165] Finally, CONV layer should also contain an activation, in which case we would add the following line of code:```python Convolve the window to get back one output neuronZ[i, h, w, c] = ... Apply activationA[i, h, w, c] = activation(Z[i, h, w, c])```You don't need to do it here. 4 - Pooling layer The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: - Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over. 4.1 - Forward PoolingNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. **Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.**Reminder**:As there's no padding, the formulas binding the output shape of the pooling to the input shape is:$$ n_H = \lfloor \frac{n_{H_{prev}} - f}{stride} \rfloor +1 $$$$ n_W = \lfloor \frac{n_{W_{prev}} - f}{stride} \rfloor +1 $$$$ n_C = n_{C_{prev}}$$ ###Code # GRADED FUNCTION: pool_forward def pool_forward(A_prev, hparameters, mode = "max"): """ Implements the forward pass of the pooling layer Arguments: A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev) hparameters -- python dictionary containing "f" and "stride" mode -- the pooling mode you would like to use, defined as a string ("max" or "average") Returns: A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C) cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters """ # Retrieve dimensions from the input shape (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape # Retrieve hyperparameters from "hparameters" f = hparameters["f"] stride = hparameters["stride"] # Define the dimensions of the output n_H = int(1 + (n_H_prev - f) / stride) n_W = int(1 + (n_W_prev - f) / stride) n_C = n_C_prev # Initialize output matrix A A = np.zeros((m, n_H, n_W, n_C)) ### START CODE HERE ### for i in range(m): # loop over the training examples for h in range(n_H): # loop on the vertical axis of the output volume for w in range(n_W): # loop on the horizontal axis of the output volume for c in range (n_C): # loop over the channels of the output volume # Find the corners of the current "slice" (≈4 lines) vert_start = (h)*stride vert_end = vert_start+f horiz_start = (w)*stride horiz_end = horiz_start+f # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line) a_prev_slice = A_prev[i,vert_start:vert_end,horiz_start:horiz_end,c] #print(a_prev_slice.shape) # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean. if mode == "max": A[i, h, w, c] = np.max(a_prev_slice) elif mode == "average": A[i, h, w, c] = np.mean(a_prev_slice) ### END CODE HERE ### # Store the input and hparameters in "cache" for pool_backward() cache = (A_prev, hparameters) # Making sure your output shape is correct assert(A.shape == (m, n_H, n_W, n_C)) return A, cache np.random.seed(1) A_prev = np.random.randn(2, 4, 4, 3) hparameters = {"stride" : 2, "f": 3} A, cache = pool_forward(A_prev, hparameters) print("mode = max") print("A =", A) print() A, cache = pool_forward(A_prev, hparameters, mode = "average") print("mode = average") print("A =", A) ###Output mode = max A = [[[[ 1.74481176 0.86540763 1.13376944]]] [[[ 1.13162939 1.51981682 2.18557541]]]] mode = average A = [[[[ 0.02105773 -0.20328806 -0.40389855]]] [[[-0.22154621 0.51716526 0.48155844]]]] ###Markdown **Expected Output:** A = [[[[ 1.74481176 0.86540763 1.13376944]]] [[[ 1.13162939 1.51981682 2.18557541]]]] A = [[[[ 0.02105773 -0.20328806 -0.40389855]]] [[[-0.22154621 0.51716526 0.48155844]]]] Congratulations! You have now implemented the forward passes of all the layers of a convolutional network. The remainer of this notebook is optional, and will not be graded. 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below. 5.1 - Convolutional layer backward pass Let's start by implementing the backward pass for a CONV layer. 5.1.1 - Computing dA:This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:$$ dA += \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1}$$Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. In code, inside the appropriate for-loops, this formula translates into:```pythonda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]``` 5.1.2 - Computing dW:This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:$$ dW_c += \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2}$$Where $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. In code, inside the appropriate for-loops, this formula translates into:```pythondW[:,:,:,c] += a_slice * dZ[i, h, w, c]``` 5.1.3 - Computing db:This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:$$ db = \sum_h \sum_w dZ_{hw} \tag{3}$$As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. In code, inside the appropriate for-loops, this formula translates into:```pythondb[:,:,:,c] += dZ[i, h, w, c]```**Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above. ###Code def conv_backward(dZ, cache): """ Implement the backward propagation for a convolution function Arguments: dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C) cache -- cache of values needed for the conv_backward(), output of conv_forward() Returns: dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev), numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev) dW -- gradient of the cost with respect to the weights of the conv layer (W) numpy array of shape (f, f, n_C_prev, n_C) db -- gradient of the cost with respect to the biases of the conv layer (b) numpy array of shape (1, 1, 1, n_C) """ ### START CODE HERE ### # Retrieve information from "cache" (A_prev, W, b, hparameters) = cache # Retrieve dimensions from A_prev's shape (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape # Retrieve dimensions from W's shape (f, f, n_C_prev, n_C) = W.shape # Retrieve information from "hparameters" stride = hparameters["stride"] pad = hparameters["pad"] # Retrieve dimensions from dZ's shape (m, n_H, n_W, n_C) = dZ.shape # Initialize dA_prev, dW, db with the correct shapes dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev)) dW = np.zeros((f, f, n_C_prev, n_C)) db = np.zeros((1, 1, 1, n_C)) # Pad A_prev and dA_prev A_prev_pad = zero_pad(A_prev, pad) dA_prev_pad = zero_pad(dA_prev, pad) for i in range(m): # loop over the training examples # select ith training example from A_prev_pad and dA_prev_pad a_prev_pad = A_prev_pad[i] da_prev_pad = dA_prev_pad[i] for h in range(n_H): # loop over vertical axis of the output volume for w in range(n_W): # loop over horizontal axis of the output volume for c in range(n_C): # loop over the channels of the output volume # Find the corners of the current "slice" vert_start = h vert_end = vert_start + f horiz_start = w horiz_end = horiz_start + f # Use the corners to define the slice from a_prev_pad a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] # Update gradients for the window and the filter's parameters using the code formulas given above da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c] dW[:,:,:,c] += a_slice * dZ[i, h, w, c] db[:,:,:,c] += dZ[i, h, w, c] # Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :]) dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :] ### END CODE HERE ### # Making sure your output shape is correct assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev)) return dA_prev, dW, db np.random.seed(1) dA, dW, db = conv_backward(Z, cache_conv) print("dA_mean =", np.mean(dA)) print("dW_mean =", np.mean(dW)) print("db_mean =", np.mean(db)) ###Output dA_mean = 0.634770447265 dW_mean = 1.55726574285 db_mean = 7.83923256462 ###Markdown ** Expected Output: ** **dA_mean** 1.45243777754 **dW_mean** 1.72699145831 **db_mean** 7.83923256462 5.2 Pooling layer - backward passNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. 5.2.1 Max pooling - backward pass Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: $$ X = \begin{bmatrix}1 && 3 \\4 && 2\end{bmatrix} \quad \rightarrow \quad M =\begin{bmatrix}0 && 0 \\1 && 0\end{bmatrix}\tag{4}$$As you can see, this function creates a "mask" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. **Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward. Hints:- [np.max()]() may be helpful. It computes the maximum of an array.- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:```A[i,j] = True if X[i,j] = xA[i,j] = False if X[i,j] != x```- Here, you don't need to consider cases where there are several maxima in a matrix. ###Code def create_mask_from_window(x): """ Creates a mask from an input matrix x, to identify the max entry of x. Arguments: x -- Array of shape (f, f) Returns: mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x. """ ### START CODE HERE ### (≈1 line) mask = (x==np.max(x)) ### END CODE HERE ### return mask np.random.seed(1) x = np.random.randn(2,3) mask = create_mask_from_window(x) print('x = ', x) print("mask = ", mask) ###Output x = [[ 1.62434536 -0.61175641 -0.52817175] [-1.07296862 0.86540763 -2.3015387 ]] mask = [[ True False False] [False False False]] ###Markdown **Expected Output:** **x =**[[ 1.62434536 -0.61175641 -0.52817175] [-1.07296862 0.86540763 -2.3015387 ]] **mask =**[[ True False False] [False False False]] Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will "propagate" the gradient back to this particular input value that had influenced the cost. 5.2.2 - Average pooling - backward pass In max pooling, for each input window, all the "influence" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: $$ dZ = 1 \quad \rightarrow \quad dZ =\begin{bmatrix}1/4 && 1/4 \\1/4 && 1/4\end{bmatrix}\tag{5}$$This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. **Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html) ###Code def distribute_value(dz, shape): """ Distributes the input value in the matrix of dimension shape Arguments: dz -- input scalar shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz Returns: a -- Array of size (n_H, n_W) for which we distributed the value of dz """ ### START CODE HERE ### # Retrieve dimensions from shape (≈1 line) (n_H, n_W) = shape # Compute the value to distribute on the matrix (≈1 line) average = dz/(n_H*n_W) #print(average) # Create a matrix where every entry is the "average" value (≈1 line) a = np.ones(shape)*average ### END CODE HERE ### return a a = distribute_value(2, (2,2)) print('distributed value =', a) ###Output distributed value = [[ 0.5 0.5] [ 0.5 0.5]] ###Markdown **Expected Output**: distributed_value =[[ 0.5 0.5] [ 0.5 0.5]] 5.2.3 Putting it together: Pooling backward You now have everything you need to compute backward propagation on a pooling layer.**Exercise**: Implement the `pool_backward` function in both modes (`"max"` and `"average"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ. ###Code def pool_backward(dA, cache, mode = "max"): """ Implements the backward pass of the pooling layer Arguments: dA -- gradient of cost with respect to the output of the pooling layer, same shape as A cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters mode -- the pooling mode you would like to use, defined as a string ("max" or "average") Returns: dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev """ ### START CODE HERE ### # Retrieve information from cache (≈1 line) (A_prev, hparameters) = cache # Retrieve hyperparameters from "hparameters" (≈2 lines) stride = hparameters["stride"] f = hparameters["f"] # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines) m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape m, n_H, n_W, n_C = dA.shape # Initialize dA_prev with zeros (≈1 line) dA_prev = np.zeros(A_prev.shape) for i in range(m): # loop over the training examples # select training example from A_prev (≈1 line) a_prev = A_prev[i] for h in range(n_H): # loop on the vertical axis for w in range(n_W): # loop on the horizontal axis for c in range(n_C): # loop over the channels (depth) # Find the corners of the current "slice" (≈4 lines) vert_start = h vert_end = vert_start + f horiz_start = w horiz_end = horiz_start + f # Compute the backward propagation in both modes. if mode == "max": # Use the corners and "c" to define the current slice from a_prev (≈1 line) a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c] # Create the mask from a_prev_slice (≈1 line) mask = create_mask_from_window(a_prev_slice) # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line) dA_prev[i, vert_start:vert_end, horiz_start:horiz_end, c] += np.multiply(mask, dA[i, h, w, c]) elif mode == "average": # Get the value a from dA (≈1 line) da = dA[i, h, w, c] # Define the shape of the filter as fxf (≈1 line) shape = (f, f) # Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line) dA_prev[i, vert_start:vert_end, horiz_start:horiz_end, c] += distribute_value(da, shape) ### END CODE ### # Making sure your output shape is correct assert(dA_prev.shape == A_prev.shape) return dA_prev np.random.seed(1) A_prev = np.random.randn(5, 5, 3, 2) hparameters = {"stride" : 1, "f": 2} A, cache = pool_forward(A_prev, hparameters) dA = np.random.randn(5, 4, 2, 2) dA_prev = pool_backward(dA, cache, mode = "max") print("mode = max") print('mean of dA = ', np.mean(dA)) print('dA_prev[1,1] = ', dA_prev[1,1]) print() dA_prev = pool_backward(dA, cache, mode = "average") print("mode = average") print('mean of dA = ', np.mean(dA)) print('dA_prev[1,1] = ', dA_prev[1,1]) ###Output mode = max mean of dA = 0.145713902729 dA_prev[1,1] = [[ 0. 0. ] [ 5.05844394 -1.68282702] [ 0. 0. ]] mode = average mean of dA = 0.145713902729 dA_prev[1,1] = [[ 0.08485462 0.2787552 ] [ 1.26461098 -0.25749373] [ 1.17975636 -0.53624893]]
notebooks/nlp_tutorial_part3.ipynb
###Markdown NLP Tutorial Elena KochkinaNESTA HackSTIR22.10.2019 Part III. Topic modelling Imports ###Code import nltk nltk.download('punkt') import re nltk.download('reuters') from nltk.corpus import reuters from sklearn.feature_extraction.text import CountVectorizer from sklearn.decomposition import LatentDirichletAllocation import warnings warnings.filterwarnings("ignore") ###Output _____no_output_____ ###Markdown Documents ###Code documents_example = ['I like cats and dogs', 'Cats are furry animals', 'Dogs are good friends', 'Apples and carrots are healthy foods', 'Humans should maintain a healthy diet', 'If your diet consists of burgers it is not very healthy'] vectorizer = CountVectorizer(stop_words='english') bow_matrix = vectorizer.fit_transform(documents_example) vocabulary = vectorizer.get_feature_names() ###Output _____no_output_____ ###Markdown Latent Dirichlet Allocation[documentation](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.LatentDirichletAllocation.html) ###Code no_topics = 2 lda = LatentDirichletAllocation(n_components=no_topics).fit(bow_matrix) no_top_words = 3 for topic_idx, topic in enumerate(lda.components_): print ("Topic ", topic_idx) print (" ".join([vocabulary[i] for i in topic.argsort()[:-no_top_words - 1:-1]])) for topic_idx, topic in enumerate(lda.components_): print ("Topic ", topic_idx) for i in range(len(vocabulary)): print (vocabulary[i],topic[i]) lda.transform(bow_matrix) ###Output _____no_output_____ ###Markdown Get Reuters dataset ###Code documents_train = [] labels_train = [] documents_test = [] labels_test = [] #categories = reuters.categories() categories = ['wheat','gold','ship','coffee','grain'] for cat in categories: print (cat) print (len(reuters.fileids(cat))) for fileid in reuters.fileids(cat): if fileid.startswith('training'): documents_train.append(reuters.raw(fileid)) labels_train.append(cat) else: documents_test.append(reuters.raw(fileid)) labels_test.append(cat) documents_train_preprocessed = [] for d in documents_train: newd = d.lower() newd = re.sub(r'[^A-Za-z0-9 ]+', '', newd) documents_train_preprocessed.append(newd) documents_test_preprocessed = [] for d in documents_test: newd = d.lower() newd = re.sub(r'[^A-Za-z0-9 ]+', '', newd) documents_test_preprocessed.append(newd) vectorizer = CountVectorizer(stop_words='english') X_train_bow = vectorizer.fit_transform(documents_train_preprocessed) X_test_bow = vectorizer.transform(documents_test_preprocessed) vocabulary = vectorizer.get_feature_names() ###Output _____no_output_____ ###Markdown LDA ###Code no_topics = 5 lda = LatentDirichletAllocation(n_components=no_topics).fit(X_train_bow) no_top_words = 5 for topic_idx, topic in enumerate(lda.components_): print ("Topic ", topic_idx) print (" ".join([vocabulary[i] for i in topic.argsort()[:-no_top_words - 1:-1]])) lda.transform(X_train_bow[0]) lda.transform(X_test_bow[0]) ###Output _____no_output_____
Classification/Image_proceesing/Image Classification using scikit-learn.ipynb
###Markdown Image Classification using `Supervised Learning` ###Code !pip install scikit-image from pathlib import Path import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.utils import Bunch %matplotlib inline from skimage.io import imread from skimage.transform import resize ###Output _____no_output_____ ###Markdown Load images in structured directory like it's sklearn sample dataset ###Code def load_image_files(container_path, dimension=(64, 64)): """ Load image files with categories as subfolder with different Shape and i am converting into 64.64 Parameters ---------- container_path : string or unicode of images in the SubFolders Path to the main folder holding one subfolder per category[Sunflower, Pizza,Dollars] dimension : tuple size to which image are adjusted to 64 * 64 Returns ------- Bunch images to array """ image_dir = Path(container_path) folders = [directory for directory in image_dir.iterdir() if directory.is_dir()] categories = [fo.name for fo in folders] descr = """A image classification dataset We are Converting the Data in Equal Dimension like 64*64 Number of images are in SubFolders is = 195 Names of Images with Differernt type = ['sunflower','pizza','dollar_bill'] The Data Is in Different Dimension we Have to Convert into the 64*64 images """ images = [] flat_data = [] target = [] for i, direc in enumerate(folders): for file in direc.iterdir(): img = skimage.io.imread(file) #plt.imshow(img) # Original Images we have to Converting Image #plt.show() img_resized = resize(img, dimension, anti_aliasing=True, mode='reflect') plt.imshow(img_resized) # After Converting The Image flat_data.append(img_resized.flatten()) images.append(img_resized) target.append(i) flat_data = np.array(flat_data) target = np.array(target) images = np.array(images) return Bunch(data=flat_data, target=target, target_names=categories, images=images, DESCR=descr) import skimage image_dataset = load_image_files("Data/images/") print(image_dataset.DESCR) image_dataset.data[0:5] image_dataset.images[0].shape image_dataset.target image_dataset.target_names ###Output _____no_output_____ ###Markdown Split data ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( image_dataset.data, image_dataset.target,test_size= 0.5,random_state=109) X_train.shape X_train[0:5] y_test.shape X_test.shape y_test.shape from sklearn.svm import SVC Svc = SVC(gamma='auto') Svc.fit(X_train,y_train) Svc.score(X_train,y_train) Svc.score(X_test,y_test) New_predict =Svc.predict(X_test) New_predict[0:5] y_test[0:5] pd.DataFrame({'Actual_Data':y_test, 'New_predict':New_predict}) from sklearn.metrics import classification_report,confusion_matrix cm = confusion_matrix(New_predict,y_test) import seaborn as sns sns.heatmap(cm,annot=True) print(classification_report(New_predict,y_test)) ###Output _____no_output_____ ###Markdown Train data with parameter optimitrain_test_splitn ###Code param_grid = [ {'C': [1, 10, 100, 1000], 'kernel': ['linear']}, {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.01], 'kernel': ['rbf','poly','sigmoid']} ] svc = svm.SVC() svc ###Output _____no_output_____ ###Markdown Grid Search is the Process of Performaing `Hyper parameter tuning` in order to determain the optimal values for given model ###Code clf = GridSearchCV(svc, param_grid) clf clf.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Predict ###Code y_pred = clf.predict(X_test) y_pred y_test ###Output _____no_output_____ ###Markdown Report ###Code print("Classification report for - \n{}:\n{}\n".format( clf, metrics.classification_report(y_test, y_pred))) metrics.confusion_matrix(y_test,y_pred) metrics.accuracy_score(y_test,y_pred) from sklearn.model_selection import cross_val_score cv = cross_val_score(clf,X_train,y_train,cv=10) np.min(cv) np.max(cv) np.mean(cv) clf.predict(image_dataset.data[[190]]) ###Output _____no_output_____
submodules/resource/d2l-zh/tensorflow/chapter_deep-learning-computation/parameters.ipynb
###Markdown 参数管理在选择了架构并设置了超参数后,我们就进入了训练阶段。此时,我们的目标是找到使损失函数最小化的模型参数值。经过训练后,我们将需要使用这些参数来做出未来的预测。此外,有时我们希望提取参数,以便在其他环境中复用它们,将模型保存下来,以便它可以在其他软件中执行,或者为了获得科学的理解而进行检查。之前的介绍中,我们只依靠深度学习框架来完成训练的工作,而忽略了操作参数的具体细节。本节,我们将介绍以下内容:* 访问参数,用于调试、诊断和可视化。* 参数初始化。* 在不同模型组件间共享参数。(**我们首先看一下具有单隐藏层的多层感知机。**) ###Code import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu), tf.keras.layers.Dense(1), ]) X = tf.random.uniform((2, 4)) net(X) ###Output _____no_output_____ ###Markdown [**参数访问**]我们从已有模型中访问参数。当通过`Sequential`类定义模型时,我们可以通过索引来访问模型的任意层。这就像模型是一个列表一样,每层的参数都在其属性中。如下所示,我们可以检查第二个全连接层的参数。 ###Code print(net.layers[2].weights) ###Output [<tf.Variable 'dense_1/kernel:0' shape=(4, 1) dtype=float32, numpy= array([[ 0.23318756], [-1.0404987 ], [-0.48204058], [ 1.0638199 ]], dtype=float32)>, <tf.Variable 'dense_1/bias:0' shape=(1,) dtype=float32, numpy=array([0.], dtype=float32)>] ###Markdown 输出的结果告诉我们一些重要的事情:首先,这个全连接层包含两个参数,分别是该层的权重和偏置。两者都存储为单精度浮点数(float32)。注意,参数名称允许唯一标识每个参数,即使在包含数百个层的网络中也是如此。 [**目标参数**]注意,每个参数都表示为参数类的一个实例。要对参数执行任何操作,首先我们需要访问底层的数值。有几种方法可以做到这一点。有些比较简单,而另一些则比较通用。下面的代码从第二个全连接层(即第三个神经网络层)提取偏置,提取后返回的是一个参数类实例,并进一步访问该参数的值。 ###Code print(type(net.layers[2].weights[1])) print(net.layers[2].weights[1]) print(tf.convert_to_tensor(net.layers[2].weights[1])) ###Output <class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'> <tf.Variable 'dense_1/bias:0' shape=(1,) dtype=float32, numpy=array([0.], dtype=float32)> tf.Tensor([0.], shape=(1,), dtype=float32) ###Markdown [**一次性访问所有参数**]当我们需要对所有参数执行操作时,逐个访问它们可能会很麻烦。当我们处理更复杂的块(例如,嵌套块)时,情况可能会变得特别复杂,因为我们需要递归整个树来提取每个子块的参数。下面,我们将通过演示来比较访问第一个全连接层的参数和访问所有层。 ###Code print(net.layers[1].weights) print(net.get_weights()) ###Output [<tf.Variable 'dense/kernel:0' shape=(4, 4) dtype=float32, numpy= array([[ 0.22265357, 0.69465953, 0.08816117, 0.5380345 ], [-0.24957252, 0.506622 , -0.31157494, 0.04067379], [-0.10570258, -0.00699437, -0.7334142 , -0.3944154 ], [ 0.727436 , 0.53911453, -0.8468247 , -0.64365757]], dtype=float32)>, <tf.Variable 'dense/bias:0' shape=(4,) dtype=float32, numpy=array([0., 0., 0., 0.], dtype=float32)>] [array([[ 0.22265357, 0.69465953, 0.08816117, 0.5380345 ], [-0.24957252, 0.506622 , -0.31157494, 0.04067379], [-0.10570258, -0.00699437, -0.7334142 , -0.3944154 ], [ 0.727436 , 0.53911453, -0.8468247 , -0.64365757]], dtype=float32), array([0., 0., 0., 0.], dtype=float32), array([[ 0.23318756], [-1.0404987 ], [-0.48204058], [ 1.0638199 ]], dtype=float32), array([0.], dtype=float32)] ###Markdown 这为我们提供了另一种访问网络参数的方式,如下所示。 ###Code net.get_weights()[1] ###Output _____no_output_____ ###Markdown [**从嵌套块收集参数**]让我们看看,如果我们将多个块相互嵌套,参数命名约定是如何工作的。我们首先定义一个生成块的函数(可以说是“块工厂”),然后将这些块组合到更大的块中。 ###Code def block1(name): return tf.keras.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name) def block2(): net = tf.keras.Sequential() for i in range(4): # 在这里嵌套 net.add(block1(name=f'block-{i}')) return net rgnet = tf.keras.Sequential() rgnet.add(block2()) rgnet.add(tf.keras.layers.Dense(1)) rgnet(X) ###Output _____no_output_____ ###Markdown [**设计了网络后,我们看看它是如何工作的。**] ###Code print(rgnet.summary()) ###Output Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= sequential_2 (Sequential) (2, 4) 80 _________________________________________________________________ dense_6 (Dense) (2, 1) 5 ================================================================= Total params: 85 Trainable params: 85 Non-trainable params: 0 _________________________________________________________________ None ###Markdown 因为层是分层嵌套的,所以我们也可以像通过嵌套列表索引一样访问它们。下面,我们访问第一个主要的块中、第二个子块的第一层的偏置项。 ###Code rgnet.layers[0].layers[1].layers[1].weights[1] ###Output _____no_output_____ ###Markdown 参数初始化知道了如何访问参数后,现在我们看看如何正确地初始化参数。我们在 :numref:`sec_numerical_stability`中讨论了良好初始化的必要性。深度学习框架提供默认随机初始化,也允许我们创建自定义初始化方法,满足我们通过其他规则实现初始化权重。 默认情况下,Keras会根据一个范围均匀地初始化权重矩阵,这个范围是根据输入和输出维度计算出的。偏置参数设置为0。TensorFlow在根模块和`keras.initializers`模块中提供了各种初始化方法。 [**内置初始化**]让我们首先调用内置的初始化器。下面的代码将所有权重参数初始化为标准差为0.01的高斯随机变量,且将偏置参数设置为0。 ###Code net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense( 4, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1)]) net(X) net.weights[0], net.weights[1] ###Output _____no_output_____ ###Markdown 我们还可以将所有参数初始化为给定的常数,比如初始化为1。 ###Code net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense( 4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1), ]) net(X) net.weights[0], net.weights[1] ###Output _____no_output_____ ###Markdown 我们还可以[**对某些块应用不同的初始化方法**]。例如,下面我们使用Xavier初始化方法初始化第一个神经网络层,然后将第三个神经网络层初始化为常量值42。 ###Code net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense( 4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()), tf.keras.layers.Dense( 1, kernel_initializer=tf.keras.initializers.Constant(1)), ]) net(X) print(net.layers[1].weights[0]) print(net.layers[2].weights[0]) ###Output <tf.Variable 'dense_11/kernel:0' shape=(4, 4) dtype=float32, numpy= array([[-0.7255311 , -0.6564781 , 0.14149016, -0.4179717 ], [ 0.71612567, 0.18838316, 0.22772092, 0.03770161], [-0.05643088, -0.50282514, -0.23198879, 0.52859443], [ 0.8345596 , 0.5165376 , -0.84090835, 0.69502836]], dtype=float32)> <tf.Variable 'dense_12/kernel:0' shape=(4, 1) dtype=float32, numpy= array([[1.], [1.], [1.], [1.]], dtype=float32)> ###Markdown [**自定义初始化**]有时,深度学习框架没有提供我们需要的初始化方法。在下面的例子中,我们使用以下的分布为任意权重参数$w$定义初始化方法:$$\begin{aligned} w \sim \begin{cases} U(5, 10) & \text{ 可能性 } \frac{1}{4} \\ 0 & \text{ 可能性 } \frac{1}{2} \\ U(-10, -5) & \text{ 可能性 } \frac{1}{4} \end{cases}\end{aligned}$$ 在这里,我们定义了一个`Initializer`的子类,并实现了`__call__`函数。该函数返回给定形状和数据类型的所需张量。 ###Code class MyInit(tf.keras.initializers.Initializer): def __call__(self, shape, dtype=None): data=tf.random.uniform(shape, -10, 10, dtype=dtype) factor=(tf.abs(data) >= 5) factor=tf.cast(factor, tf.float32) return data * factor net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense( 4, activation=tf.nn.relu, kernel_initializer=MyInit()), tf.keras.layers.Dense(1), ]) net(X) print(net.layers[1].weights[0]) ###Output <tf.Variable 'dense_13/kernel:0' shape=(4, 4) dtype=float32, numpy= array([[ 6.8615303, -0. , 7.944557 , -0. ], [-0. , 7.2525196, 9.074877 , -0. ], [ 9.185171 , -5.4909086, 5.9858913, -0. ], [ 7.4548893, 0. , -0. , 9.655563 ]], dtype=float32)> ###Markdown 注意,我们始终可以直接设置参数。 ###Code net.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1) net.layers[1].weights[0][0, 0].assign(42) net.layers[1].weights[0] ###Output _____no_output_____ ###Markdown [**参数绑定**]有时我们希望在多个层间共享参数:我们可以定义一个稠密层,然后使用它的参数来设置另一个层的参数。 ###Code # tf.keras的表现有点不同。它会自动删除重复层 shared = tf.keras.layers.Dense(4, activation=tf.nn.relu) net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), shared, shared, tf.keras.layers.Dense(1), ]) net(X) # 检查参数是否不同 print(len(net.layers) == 3) ###Output True
study_sessions/study_session6_ctrlstr_functions.ipynb
###Markdown Study session 6 - Control structures - functions BIOINF 575 - Fall 2020 _____ Function Definition - creating the funtion ```pythondef function_name(arg1, arg2, darg=None): instructions to compute result return result``` Function Call - running the function ```pythonfunction_result = function_name(val1, val2, dval)``` ____ Gene regulatory network"Formally speaking, a gene regulatory network or genetic regulatory network (GRN) is a collection of DNA segments in a cell which interact with each other (indirectly through their RNA and protein expression products) and with other substances in the cell, thereby governing the rates at which genes in the network are transcribed into mRNA. In general, each mRNA molecule goes on to make a specific protein (or set of proteins)." https://link.springer.com/referenceworkentry/10.1007%2F978-1-4419-9863-7_364 _____ Exercise- Create a list with tuples of 2 elements where the first elelment is a gene from the network above and the second element is a tuple of the genes it directly regulates (oranage links).- Write a function that returns the set of the genes that a given gene indirectly regulated through exactly one intermediate gene. _____ Exercise- Apply the function you wrote before for every pair of genes in the network that are regulating other genes and compute the intersection of the two sets. ____ Exercise- Write a function that counts the number of direct interactions (downstream and upstream) for a gene in a given network.- Test your function for at least 4 cases. _______ Exercise Explain what the following code does and decsribe how it computes the result it displays: ###Code genetic_code = { 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_', 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'} def translate_seq(seq, trans_dict): peptide = [] for i in range(0,len(seq)-2,3): peptide.append(trans_dict.get(seq[i:i+3],"")) return "".join(peptide) DNA_sequence = "GCCGCGCGTAGGATGCC TCCGCAACCCCAGCGTaa" peptide_set = set() for sequence in DNA_sequence.split(): peptide_set.add(translate_seq(sequence, genetic_code)) peptide_set ###Output _____no_output_____
18-05-28-Complete-Guide-to-Tensorflow-for-Deep-Learning-with-Python/00-Crash-Course-Basics/01-Pandas-Crash-Course.ipynb
###Markdown Pandas Crash CourseWe'll use numpy a lot more than pandas, but here is a quick taste in case you haven't seen it before. ###Code import pandas as pd # Import a CSV as pandas DataFrame df = pd.read_csv('salaries.csv') df # Get a spefic column from the DataFrame df['Name'] df['Salary'] df[['Name', 'Salary']] df['Age'] # Calculate mean value for the column df['Age'].mean() # Mask df['Age'] > 30 age_filter = df['Age'] > 30 df[age_filter] # Filter the DataFrame # And return values that comply with the filtering requirements df[df['Age'] > 30] ###Output _____no_output_____
1.DeepLearning/07.CNN/tf20/transfer_learning_keras_tf20.ipynb
###Markdown Transfer Learning with InceptionV3 (From ImageNet to Cifar-10)- https://gogul09.github.io/software/flower-recognition-deep-learning ###Code # boilerplate code import tensorflow as tf print(tf.__version__) from tensorflow import keras import cv2 #python -m pip install opencv-python import numpy as np from tensorflow.keras.datasets import cifar10 from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.optimizers import SGD from tensorflow.keras.callbacks import LearningRateScheduler import math num_classes = 10 def load_cifar10_data(img_rows, img_cols): # Load cifar10 training and test sets (X_train, Y_train), (X_test, Y_test) = cifar10.load_data() # Resize training images X_train = np.array([cv2.resize(img, (img_rows, img_cols)) for img in X_train[:, :, :, :]]) X_test = np.array([cv2.resize(img, (img_rows, img_cols)) for img in X_test[:, :, :, :]]) # X_train = X_train.astype('float16') / 255.0 # X_test = X_test.astype('float16') / 255.0 # Transform targets to keras compatible format Y_train = to_categorical(Y_train, num_classes) Y_test = to_categorical(Y_test, num_classes) print("X_train: {0}".format(X_train.shape)) print("Y_train: {0}".format(Y_train.shape)) print("X_test: {0}".format(X_test.shape)) print("Y_test: {0}".format(Y_test.shape)) return X_train, Y_train, X_test, Y_test X_train, y_train, X_test, y_test = load_cifar10_data(299, 299) from tensorflow.keras.applications.inception_v3 import InceptionV3 def build_model(nb_classes): base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=[299, 299, 3]) # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) # and a logistic layer predictions = Dense(nb_classes, activation='softmax')(x) # this is the model we will train model = Model(inputs=base_model.input, outputs=predictions) # first: train only the top layers (which were randomly initialized) # i.e. freeze all convolutional InceptionV3 layers for layer in base_model.layers: layer.trainable = False return model model = build_model(10) model.summary() initial_lrate = 0.01 def decay(epoch, steps=100): drop = 0.96 epochs_drop = 8 lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop)) return lrate lr_sc = LearningRateScheduler(decay, verbose=1) sgd = SGD(lr=initial_lrate, momentum=0.9, nesterov=True) model.compile( loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'] ) epochs = 35 history = model.fit( x=X_train, y=y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=256, callbacks=[lr_sc] ) ###Output Train on 50000 samples, validate on 10000 samples Epoch 00001: LearningRateScheduler reducing learning rate to 0.01. Epoch 1/35 50000/50000 [==============================] - 173s 3ms/sample - loss: 0.9602 - accuracy: 0.6899 - val_loss: 75.5155 - val_accuracy: 0.1106 Epoch 00002: LearningRateScheduler reducing learning rate to 0.01. Epoch 2/35 50000/50000 [==============================] - 164s 3ms/sample - loss: 0.6719 - accuracy: 0.7707 - val_loss: 79.6195 - val_accuracy: 0.1105 Epoch 00003: LearningRateScheduler reducing learning rate to 0.01. Epoch 3/35 50000/50000 [==============================] - 167s 3ms/sample - loss: 0.6184 - accuracy: 0.7882 - val_loss: 99.9694 - val_accuracy: 0.1073 Epoch 00004: LearningRateScheduler reducing learning rate to 0.01. Epoch 4/35 50000/50000 [==============================] - 168s 3ms/sample - loss: 0.5885 - accuracy: 0.7982 - val_loss: 95.5520 - val_accuracy: 0.1095 Epoch 00005: LearningRateScheduler reducing learning rate to 0.01. Epoch 5/35 50000/50000 [==============================] - 168s 3ms/sample - loss: 0.5680 - accuracy: 0.8039 - val_loss: 96.9085 - val_accuracy: 0.1118 Epoch 00006: LearningRateScheduler reducing learning rate to 0.01. Epoch 6/35 50000/50000 [==============================] - 167s 3ms/sample - loss: 0.5489 - accuracy: 0.8102 - val_loss: 120.0123 - val_accuracy: 0.1062 Epoch 00007: LearningRateScheduler reducing learning rate to 0.01. Epoch 7/35 50000/50000 [==============================] - 168s 3ms/sample - loss: 0.5322 - accuracy: 0.8161 - val_loss: 120.5379 - val_accuracy: 0.1116 Epoch 00008: LearningRateScheduler reducing learning rate to 0.0096. Epoch 8/35 50000/50000 [==============================] - 167s 3ms/sample - loss: 0.5186 - accuracy: 0.8216 - val_loss: 137.5567 - val_accuracy: 0.1023 Epoch 00009: LearningRateScheduler reducing learning rate to 0.0096. Epoch 9/35 50000/50000 [==============================] - 168s 3ms/sample - loss: 0.5067 - accuracy: 0.8250 - val_loss: 135.9308 - val_accuracy: 0.1071 Epoch 00010: LearningRateScheduler reducing learning rate to 0.0096. Epoch 10/35 50000/50000 [==============================] - 168s 3ms/sample - loss: 0.4931 - accuracy: 0.8290 - val_loss: 135.0156 - val_accuracy: 0.1075 Epoch 00011: LearningRateScheduler reducing learning rate to 0.0096. Epoch 11/35 50000/50000 [==============================] - 166s 3ms/sample - loss: 0.4871 - accuracy: 0.8297 - val_loss: 119.6107 - val_accuracy: 0.1249 Epoch 00012: LearningRateScheduler reducing learning rate to 0.0096. Epoch 12/35 50000/50000 [==============================] - 167s 3ms/sample - loss: 0.4720 - accuracy: 0.8368 - val_loss: 133.6296 - val_accuracy: 0.1099 Epoch 00013: LearningRateScheduler reducing learning rate to 0.0096. Epoch 13/35 50000/50000 [==============================] - 166s 3ms/sample - loss: 0.4627 - accuracy: 0.8401 - val_loss: 137.7038 - val_accuracy: 0.1115 Epoch 00014: LearningRateScheduler reducing learning rate to 0.0096. Epoch 14/35 50000/50000 [==============================] - 165s 3ms/sample - loss: 0.4529 - accuracy: 0.8440 - val_loss: 139.3622 - val_accuracy: 0.1120 Epoch 00015: LearningRateScheduler reducing learning rate to 0.0096. Epoch 15/35 50000/50000 [==============================] - 164s 3ms/sample - loss: 0.4383 - accuracy: 0.8471 - val_loss: 159.8944 - val_accuracy: 0.1095 Epoch 00016: LearningRateScheduler reducing learning rate to 0.009216. Epoch 16/35 50000/50000 [==============================] - 164s 3ms/sample - loss: 0.4269 - accuracy: 0.8524 - val_loss: 156.2141 - val_accuracy: 0.1092 Epoch 00017: LearningRateScheduler reducing learning rate to 0.009216. Epoch 17/35 50000/50000 [==============================] - 165s 3ms/sample - loss: 0.4172 - accuracy: 0.8556 - val_loss: 141.9600 - val_accuracy: 0.1108 Epoch 00018: LearningRateScheduler reducing learning rate to 0.009216. Epoch 18/35 50000/50000 [==============================] - 164s 3ms/sample - loss: 0.4067 - accuracy: 0.8596 - val_loss: 158.9483 - val_accuracy: 0.1139 Epoch 00019: LearningRateScheduler reducing learning rate to 0.009216. Epoch 19/35 50000/50000 [==============================] - 165s 3ms/sample - loss: 0.3942 - accuracy: 0.8645 - val_loss: 157.3088 - val_accuracy: 0.1111 Epoch 00020: LearningRateScheduler reducing learning rate to 0.009216. Epoch 20/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.3854 - accuracy: 0.8674 - val_loss: 171.4200 - val_accuracy: 0.1055 Epoch 00021: LearningRateScheduler reducing learning rate to 0.009216. Epoch 21/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.3739 - accuracy: 0.8720 - val_loss: 149.3375 - val_accuracy: 0.1153 Epoch 00022: LearningRateScheduler reducing learning rate to 0.009216. Epoch 22/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.3622 - accuracy: 0.8768 - val_loss: 157.7161 - val_accuracy: 0.1247 Epoch 00023: LearningRateScheduler reducing learning rate to 0.009216. Epoch 23/35 50000/50000 [==============================] - 163s 3ms/sample - loss: 0.3507 - accuracy: 0.8794 - val_loss: 157.6633 - val_accuracy: 0.1165 Epoch 00024: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 24/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.3390 - accuracy: 0.8849 - val_loss: 167.3696 - val_accuracy: 0.1098 Epoch 00025: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 25/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.3278 - accuracy: 0.8920 - val_loss: 166.1665 - val_accuracy: 0.1176 Epoch 00026: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 26/35 50000/50000 [==============================] - 163s 3ms/sample - loss: 0.3165 - accuracy: 0.8953 - val_loss: 162.0232 - val_accuracy: 0.1153 Epoch 00027: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 27/35 50000/50000 [==============================] - 161s 3ms/sample - loss: 0.3059 - accuracy: 0.9012 - val_loss: 158.9673 - val_accuracy: 0.1154 Epoch 00028: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 28/35 50000/50000 [==============================] - 163s 3ms/sample - loss: 0.2946 - accuracy: 0.9040 - val_loss: 168.1538 - val_accuracy: 0.1211 Epoch 00029: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 29/35 50000/50000 [==============================] - 161s 3ms/sample - loss: 0.2847 - accuracy: 0.9083 - val_loss: 162.2235 - val_accuracy: 0.1226 Epoch 00030: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 30/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.2734 - accuracy: 0.9118 - val_loss: 171.6387 - val_accuracy: 0.1206 Epoch 00031: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 31/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.2630 - accuracy: 0.9166 - val_loss: 185.3488 - val_accuracy: 0.1155 Epoch 00032: LearningRateScheduler reducing learning rate to 0.008493465599999998. Epoch 32/35 50000/50000 [==============================] - 161s 3ms/sample - loss: 0.2500 - accuracy: 0.9231 - val_loss: 187.1196 - val_accuracy: 0.1176 Epoch 00033: LearningRateScheduler reducing learning rate to 0.008493465599999998. Epoch 33/35 50000/50000 [==============================] - 161s 3ms/sample - loss: 0.2411 - accuracy: 0.9261 - val_loss: 175.4048 - val_accuracy: 0.1178 Epoch 00034: LearningRateScheduler reducing learning rate to 0.008493465599999998. Epoch 34/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.2306 - accuracy: 0.9304 - val_loss: 177.7775 - val_accuracy: 0.1156 Epoch 00035: LearningRateScheduler reducing learning rate to 0.008493465599999998. Epoch 35/35 50000/50000 [==============================] - 162s 3ms/sample - loss: 0.2187 - accuracy: 0.9365 - val_loss: 173.0359 - val_accuracy: 0.1231
Assignment1/IGA-411 Assignment1 RW.ipynb
###Markdown IGA 411A The Energy-Climate ChallengeAssignment 1, Sept. 15, 2020Ramon Weber 1. a Assumptions:US Energy use 2018 = 103 exajoule / per yearExajoule (1 EJ = 10^18 J)Terawatt (1 TW = 1000 GW) is 10^(12) J/sec ###Code num_sec_year = 365*24*60*60 j = 103 * (10**18) # joule per year W = j/num_sec_year TW = W / (10**12) kW = W / 1000 print(TW) num_people = 327075203 #Source: https://www.census.gov/popclock/ for September 13, 2018 us_usage = kW/num_people print("kW/person ", us_usage) world_people = 7631091040 # Source https://www.worldometers.info/world-population/world-population-by-year/ for 2018 world_usage_kW = us_usage*world_people world_usage_TW = (world_usage_kW*1000)/(10**12) print("us_usage", TW, " world_usage", world_usage_TW) print("world_usage_kW", world_usage_kW) ###Output 3.2661085743277525 kW/person 9.98580309473278 us_usage 3.2661085743277525 world_usage 76.20257252341959 world_usage_kW 76202572523.41959 ###Markdown 1. b1850: 25 EJ1950: 100 EJ2000: 450 EJ ###Code def convEJTW(year, EJ, num_people): j = EJ * (10**18) # joule per year W = j/num_sec_year TW = W / (10**12) kW = W / 1000 energy_usage = kW/num_people print(year, "- energy usage per person in kW", energy_usage) print(year, "- energy usage in TW: ", TW) print() convEJTW(2000, 450, 6143493823) convEJTW(1950, 100, 2584034261) convEJTW(1850, 25, 1200000000) convEJTW(2018, 630, 7594000000) def energyTot(num_people, energy_per_p): world_people = num_people world_usage_kW = energy_per_p*world_people world_usage_TW = (world_usage_kW*1000)/(10**12) print(" world_usage", world_usage_TW, "TW") energyTot(7594000000, 2.630651692095298) energyTot(9700000000, 9.98580309473278) energyTot(9700000000, 3.5) ###Output world_usage 19.97716894977169 TW world_usage 96.86229001890797 TW world_usage 33.95 TW ###Markdown 2. a ###Code years = 30 increase = 1.01 newtot = 2.630651692095298 for i in range(years): newtot *= increase print(newtot) ###Output 3.5457210298093202
examples/resizing_tutorial.ipynb
###Markdown Ordinary situation ###Code X_train, y_train = load_basic_motions(split='TRAIN', return_X_y=True) X_test, y_test = load_basic_motions(split='TEST', return_X_y=True) steps = [ ('concatenate', ColumnConcatenator()), ('classify', TimeSeriesForestClassifier(n_estimators=100))] clf = Pipeline(steps) clf.fit(X_train, y_train) clf.score(X_test, y_test) ###Output D:\Python37\lib\site-packages\sklearn\base.py:197: FutureWarning: From version 0.24, get_params will raise an AttributeError if a parameter cannot be retrieved as an instance attribute. Previously it would return None. FutureWarning) ###Markdown If time serial are unequal length -> error inside algorithm ###Code # randomly cut the data series def random_cut(df): for row_i in range(df.shape[0]): for dim_i in range(df.shape[1]): ts = df.at[row_i, f'dim_{dim_i}'] df.at[row_i, f'dim_{dim_i}'] = pd.Series(ts.tolist()[:random.randint(len(ts)-5, len(ts)-3)]) # here is a problem X_train, y_train = load_basic_motions(split='TRAIN', return_X_y=True) X_test, y_test = load_basic_motions(split='TEST', return_X_y=True) for df in [X_train, X_test]: random_cut(df) try: steps = [ ('concatenate', ColumnConcatenator()), ('classify', TimeSeriesForestClassifier(n_estimators=100))] clf = Pipeline(steps) clf.fit(X_train, y_train) clf.score(X_test, y_test) except IndexError as e: print(f"IndexError: {e}") ###Output _____no_output_____ ###Markdown lets investigate the error There are two errors. First is in the way that np.hstack stacks columns that have inequal sized arrays in cells. ###Code # all ok np.hstack([ [[1,1],[2,2],[3,3]], [[4,4],[5,5],[6,6]] ]).shape # this throws an error np.hstack([ [[1,1,6],[2,2],[3,3]], [[4,4],[5,5],[6,6]] ]).shape # and this not this thing stacks not horizontally but vertically... np.hstack([ [[1,1,6],[2,2],[3,3]], [[4,4],[5,5, 2],[6,6]] ]).shape ###Output _____no_output_____ ###Markdown second error may appear if you use unidimensional time series ###Code # here error is in the algorithm itself - index out of range from sktime.datasets import load_gunpoint X_train, y_train = load_gunpoint(split='TRAIN', return_X_y=True) X_test, y_test = load_gunpoint(split='TEST', return_X_y=True) for df in [X_train, X_test]: random_cut(df) clf = TimeSeriesForestClassifier(n_estimators=100) clf.fit(X_train, y_train) clf.score(X_test, y_test) ###Output _____no_output_____ ###Markdown Now the resizing enrolls ###Code from sktime.transformers.resizing import TSResizeTransform X_train, y_train = load_basic_motions(split='TRAIN', return_X_y=True) X_test, y_test = load_basic_motions(split='TEST', return_X_y=True) for df in [X_train, X_test]: random_cut(df) steps = [ ('transform', TSResizeTransform(50)), ('concatenate', ColumnConcatenator()), ('classify', TimeSeriesForestClassifier(n_estimators=100))] clf = Pipeline(steps) clf.fit(X_train, y_train) clf.score(X_test, y_test) # code import numpy as np import pandas as pd from scipy import interpolate from sktime.transformers.base import BaseTransformer class TSResizeTransform(BaseTransformer): """Transformer that get casual dataframe of time series and resizes Series to user length via scipy interp1d between received points. """ def __init__(self, length): """ Parameters ---------- length : integer, the length of time series to resize to. """ assert(length>0) self.length = length super(TSResizeTransform).__init__() def __resizeCell(self, cell): f = interpolate.interp1d(list(np.linspace(0, 1, len(cell))), list(cell)) return f(np.linspace(0, 1, self.length)) def __resizeCol(self, coll): return coll.apply(self.__resizeCell) def transform(self, X, y=None): """Resizes time series in each cell of dataframe and returns it. Parameters ---------- X : nested pandas DataFrame of shape [n_samples, n_features] Nested dataframe with time-series in cells. Returns ------- Xt : pandas DataFrame Transformed pandas DataFrame with same number of rows and columns """ return X.apply(self.__resizeCol) ###Output _____no_output_____
experiment4_1/Convex_SNNs.ipynb
###Markdown Build train and test sets ###Code # generate training data P_train = 100 x_lim = 4 x1 = np.linspace(-x_lim, x_lim, P_train) x2 = np.linspace(-x_lim, x_lim, P_train) X1_train, X2_train = np.meshgrid(x1, x2) def func(x1, x2): y = 0.3*(x1**2 + x2**2) return y Y_targ_train = func(X1_train, X2_train) Y_ravelled = Y_targ_train.ravel() X_train = np.vstack((X1_train.ravel(), X2_train.ravel())).T Y_train = Y_ravelled[:, None] T = 4 # simulation time dt = 3e-03 # time step t_span = np.arange(0, T, dt) num_bins = t_span.size buffer_bins = int(1/dt) buffer_zeros = int(buffer_bins/2) K = 2 x_sample = np.zeros((K, num_bins)) data_index = 1 print(X_train) print(X_train[data_index, :][:, None]) x_sample[:, buffer_zeros:] = X_train[data_index, :][:, None] print(x_sample) print(x_sample.shape) # create test data P_test = 500 x_lim = 4 x1 = np.linspace(-x_lim, x_lim, P_test) x2 = np.linspace(-x_lim, x_lim, P_test) X1_test, X2_test = np.meshgrid(x1, x2) Y_targ_test = func(X1_test, X2_test) Y_ravelled_test = Y_targ_test.ravel() X_test = np.vstack((X1_test.ravel(), X2_test.ravel())).T Y_test = Y_ravelled_test[:, None] # Plot a 3D surface fig = plt.figure(figsize=(8, 9)) ax1 = fig.add_subplot(111, projection='3d') ax1.set_xlabel('$x_1$') ax1.set_ylabel('$x_2$') ax1.set_zlabel('$y$') # Plot a 3D surface target_surface = ax1.plot_surface(X1_test, X2_test, Y_targ_test, alpha=0.5) fig.suptitle('Target surface', fontsize=18) plt.show() ###Output _____no_output_____ ###Markdown Network parameters ###Code # setting up dimensions and initial parameters M = 1 K = 2 N = 50 leak = 2 # initialize my gamma matrix random_state = np.random.RandomState(seed=3) D_weights_init = random_state.rand(M, N) D_weights_init = D_weights_init / np.linalg.norm(D_weights_init, axis=0) G_weights_init = D_weights_init.copy().T F_weights_init = random_state.randn(K, N).T omega_init = -G_weights_init @ D_weights_init thresholds_init = 2*random_state.rand(N) - 1 F_weights_2_init = random_state.randn(N, N).T G_weights_2_init = G_weights_init omega_2_init = omega_init thresholds_2_init = 2*random_state.rand(N) - 1 ###Output _____no_output_____ ###Markdown Before learningWe run the network with the inital parameters and compute an average readout for each input sample. Thus, we can get the SNN surface before learning.Also, since the output dimension of the network is one, we can use a maxout function - refer to equation (10) of the paper - to get the network output (without discretization error). We also check which neuron contributes to the code for each input sample (active constraints) by looking at the argument that maximizes the maxout function. ###Code T = 4 # simulation time dt = 3e-03 # time step t_span = np.arange(0, T, dt) num_bins = t_span.size buffer_bins = int(1/dt) buffer_zeros = int(buffer_bins/2) x_sample = np.zeros((K, num_bins)) # initialize network parameters D_weights = D_weights_init.copy() G_weights = G_weights_init.copy() F_weights = F_weights_init.copy() omega = omega_init.copy() thresholds = thresholds_init.copy() G_weights_2 = G_weights_2_init.copy() F_weights_2 = F_weights_2_init.copy() omega_2 = omega_2_init.copy() thresholds_2 = thresholds_2_init.copy() y_readout = [] for data_index in range(X_train.shape[0]): x_sample[:, buffer_zeros:] = X_train[data_index, :][:, None] rates = snn_cvx.run_snn_trial( x_sample, F_weights, omega, thresholds, dt, leak, ) y_readout += [np.copy(D_weights[0, :] @ rates)] average_readouts = np.array(y_readout)[:, buffer_zeros + 500:].mean(axis=1) # Plot a 3D surface fig = plt.figure(figsize=(8, 9)) ax1 = fig.add_subplot(111, projection='3d') ax1.set_xlabel('$x_1$') ax1.set_ylabel('$x_2$') ax1.set_zlabel('$y$') # Plot a 3D surface readout_surf_snn = ax1.plot_surface(X1_train, X2_train, average_readouts.reshape(Y_targ_train.shape), alpha=0.5) fig.suptitle('SNN surface before learning', fontsize=18) plt.show() # plot contours and active-inactive neurons active_neurons_init = np.zeros(X_test.shape[0]) * np.nan y_predict_init = np.zeros(X_test.shape[0]) for i, x in enumerate(X_test): y_out_init, n_act = snn_cvx.run_maxout(x, F_weights_init, G_weights_init, thresholds_init) y_predict_init[i] = y_out_init active_neurons_init[i] = n_act y_predict_init_reshaped = y_predict_init.reshape(Y_targ_test.shape) active_neurons_init_reshaped = active_neurons_init.reshape(Y_targ_test.shape) # make contour plots zlim =14 ticks = [(i, i) for i in np.linspace(x_lim, -x_lim, 3)] c_ticks = [(i, i) for i in np.linspace(0, zlim, 2)] clim = (0, zlim) bounds = (-x_lim, -x_lim, x_lim, x_lim) nlevels = 40 cmap_neurons = 'glasbey_dark' cmap_contour='gray' alpha=1 img = hv.Image(y_predict_init_reshaped, kdims=['$x_1$', '$x_2$'], vdims='$y$', bounds=bounds).opts( cmap=cmap_contour, invert_zaxis=True, clims=clim) levels = list(np.linspace(-zlim, zlim, nlevels)) img_contour_init = hv.operation.contours(img, group='Y', levels=levels).opts( xticks=3, yticks=3, colorbar=True, cmap=cmap_contour, cbar_ticks=c_ticks, clim=clim, alpha=alpha, linewidth=1.5, ) # show which neuron is active img_nactive_init = hv.Image(active_neurons_init_reshaped, kdims=['$x_1$', '$x_2$'], vdims='$y$', bounds=bounds).opts(cmap=cmap_neurons, alpha=0.6) # plot contours and active neurons img_nactive_init*img_contour_init ###Output _____no_output_____ ###Markdown Train network parameters ###Code T = 4 # simulation time dt = 3e-03 # time step t_span = np.arange(0, T, dt) num_bins = t_span.size buffer_bins = int(1/dt) buffer_zeros = int(buffer_bins/2) x_sample = np.zeros((K, num_bins)) # initialize network parameters D_weights = D_weights_init.copy() G_weights = G_weights_init.copy() F_weights = F_weights_init.copy() omega = omega_init.copy() thresholds = thresholds_init.copy() G_weights_2 = G_weights_2_init.copy() F_weights_2 = F_weights_2_init.copy() omega_2 = omega_2_init.copy() thresholds_2 = thresholds_2_init.copy() # run supervised learning alpha_thresh_init = 1e-03 alpha_F_init = 1e-03 leak_thresh = 0. num_epochs = 100 thresholds_array_fit = np.zeros((N, num_epochs)) F_weights_array_fit = np.zeros((N, K, num_epochs)) decrease_learning_rate = True for epoch in range(num_epochs): print ('iteration: ',epoch+1) data_index_list = np.arange(X_train.shape[0]) np.random.shuffle(data_index_list) if decrease_learning_rate: alpha_thresh = alpha_thresh_init * np.exp(-0.0001 * (epoch + 1)) alpha_F = alpha_F_init * np.exp(-0.0001 * (epoch + 1)) else: alpha_thresh = alpha_thresh_init alpha_F = alpha_F_init for data_index in data_index_list: x_sample[:, buffer_zeros:] = X_train[data_index, :][:, None] y_sample = Y_train[data_index, :] thresholds, F_weights, thresholds_2, F_weights_2 = snn_cvx.update_weights_2( x_sample, y_sample, F_weights,F_weights_2, G_weights,G_weights_2, omega,omega_2, thresholds,thresholds_2, buffer_bins, dt, leak, leak_thresh, alpha_thresh, alpha_F, mu=0., sigma_v=0. ) thresholds_array_fit[:, epoch] = thresholds F_weights_array_fit[:, :, epoch] = F_weights ###Output _____no_output_____ ###Markdown After learning ###Code # run snn with learnt parameters x_sample = np.zeros((K, num_bins)) # call learnt parameters F_weights_fit = F_weights_array_fit[:, :, -1] thresholds_fit = thresholds_array_fit[:, -1] y_readout = [] for data_index in range(X_train.shape[0]): x_sample[:, buffer_zeros:] = X_train[data_index, :][:, None] rates = snn_cvx.run_snn_trial( x_sample, F_weights_fit, omega, thresholds_fit, dt, leak, ) y_readout += [np.copy(D_weights[0, :] @ rates)] average_readouts_fit = np.array(y_readout)[:, buffer_zeros + 500:].mean(axis=1) # Plot a 3D surface fig = plt.figure(figsize=(8, 9)) ax1 = fig.add_subplot(111, projection='3d') ax1.set_xlabel('$x_1$') ax1.set_ylabel('$x_2$') ax1.set_zlabel('$y$') # Plot a 3D surface readout_surf_snn = ax1.plot_surface(X1_train, X2_train, average_readouts_fit.reshape(Y_targ_train.shape), alpha=0.5) fig.suptitle('SNN surface after learning', fontsize=18) plt.show() # plot contours and active-inactive neurons after learning parameters active_neurons_fit = np.zeros(X_test.shape[0]) * np.nan y_predict_fit = np.zeros(X_test.shape[0]) for i, x in enumerate(X_test): y_out_fit, n_act = snn_cvx.run_maxout(x, F_weights_fit, G_weights, thresholds_fit) y_predict_fit[i] = y_out_fit active_neurons_fit[i] = n_act y_predict_fit_reshaped = y_predict_fit.reshape(Y_targ_test.shape) active_neurons_fit_reshaped = active_neurons_fit.reshape(Y_targ_test.shape) # make contour plots zlim =14 ticks = [(i, i) for i in np.linspace(x_lim, -x_lim, 3)] c_ticks = [(i, i) for i in np.linspace(0, zlim, 2)] clim = (0, zlim) bounds = (-x_lim, -x_lim, x_lim, x_lim) nlevels = 40 cmap_neurons = 'glasbey_dark' cmap_contour='gray' alpha=1 img = hv.Image(y_predict_fit_reshaped, kdims=['$x_1$', '$x_2$'], vdims='$y$', bounds=bounds).opts( cmap=cmap_contour, invert_zaxis=True, clims=clim) levels = list(np.linspace(-zlim, zlim, nlevels)) img_contour_fit = hv.operation.contours(img, group='Y', levels=levels).opts( xticks=3, yticks=3, colorbar=True, cmap=cmap_contour, cbar_ticks=c_ticks, clim=clim, alpha=alpha, linewidth=1.5, ) # show which neuron is active img_nactive_fit = hv.Image(active_neurons_fit_reshaped, kdims=['$x_1$', '$x_2$'], vdims='$y$', bounds=bounds).opts(cmap=cmap_neurons, alpha=0.6) # plot contours and active neurons img_nactive_fit*img_contour_fit ###Output _____no_output_____
10 - Splitting test and training data with scikit-learn/10 - Train Test split.ipynb
###Markdown Splitting test and training dataWhen you train a data model you may need to split up your data into test and training data setsTo accomplish this task we will use the [scikit-learn](https://scikit-learn.org/stable/) libraryscikit-learn is an open source, BSD licensed library for data science for preprocessing and training models. Before we can split our data test and training data, we need to do some data preparation ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Let's load our csv file with information about flights and flight delaysUse **shape** to find out how many rows and columns are in the original DataFrame ###Code delays_df = pd.read_csv('Data/Lots_of_flight_data.csv') delays_df.shape ###Output _____no_output_____ ###Markdown Split data into features and labelsCreate a DataFrame called X containing only the features we want to use to train our model.**Note** You can only use numeric values as features, if you have non-numeric values you must apply different techniques such as Hot Encoding to convert these into numeric values before using them as features to train a model. Check out Data Science courses for more information on these techniques! ###Code X = delays_df.loc[:,['DISTANCE', 'CRS_ELAPSED_TIME']] X.head() ###Output _____no_output_____ ###Markdown Create a DataFrame called y containing only the value we want to predict with our model. In our case we want to predict how many minutes late a flight will arrive. This information is in the ARR_DELAY column. ###Code y = delays_df.loc[:,['ARR_DELAY']] y.head() ###Output _____no_output_____ ###Markdown Split into test and training dataUse **scikitlearn train_test_split** to move 30% of the rows into Test DataFramesThe other 70% of the rows into DataFrames we can use to train our modelNOTE: by specifying a value for *random_state* we ensure that if we run the code again the same rows will be moved into the test DataFrame. This makes our results repeatable. ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) ###Output _____no_output_____ ###Markdown We now have a DataFrame **X_train** which contains 70% of the rowsWe will use this DataFrame to train our model ###Code X_train.shape ###Output _____no_output_____ ###Markdown The DataFrame **X_test** contains the remaining 30% of the rowsWe will use this DataFrame to test our trained model, so we can check it's accuracy ###Code X_test.shape ###Output _____no_output_____ ###Markdown **X_train** and **X_test** contain our featuresThe features are the columns we think can help us predict how late a flight will arrive: **DISTANCE** and **CRS_ELAPSED_TIME** ###Code X_train.head() ###Output _____no_output_____ ###Markdown The DataFrame **y_train** contains 70% of the rowsWe will use this DataFrame to train our model If you don't need to keep the original DataFrame, you can just delete the rows within the existing DataFrame instead of creating a new one**inplace=*True*** indicates you want to drop the rows in the specified DataFrame ###Code y_train.shape ###Output _____no_output_____ ###Markdown The DataFrame **y_test** contains the remaining 30% of the rowsWe will use this DataFrame to test our trained model, so we can check it's accuracy ###Code y_test.shape ###Output _____no_output_____ ###Markdown **y_train** and **y_test** contain our labelThe label is the columns we want to predict with our trained model: **ARR_DELAY****NOTE:** a negative value for ARR_DELAY indicates a flight arrived early ###Code y_train.head() ###Output _____no_output_____
demos/error_and_fp/Floating Point and the Series for the Exponential Function.ipynb
###Markdown Floating Point Arithmetic and the Series for the Exponential FunctionCopyright (C) 2010-2020 Luke OlsonCopyright (C) 2020 Andreas KloecknerMIT LicensePermission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included inall copies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS INTHE SOFTWARE. ###Code import numpy as np import matplotlib.pyplot as pt ###Output _____no_output_____ ###Markdown What this demo does is sum the series$$ \exp(x) \approx \sum_{i=0}^n \frac{x^i}{i!},$$for varying $n$, and varying $x$. It then prints the partial sum, the true value, and the final term of the series. ###Code a = 0.0 x = 1e0 # flip sign true_f = np.exp(x) e = [] for i in range(0, 10): # crank up d = np.prod( np.arange(1, i+1).astype(np.float)) # series for exp a += x**i / d print(a, np.exp(x), x**i / d) e.append(abs(true_f-a)/true_f) pt.semilogy(e) ###Output _____no_output_____
Scripts/IDEA_3/Energy_Preserving_Neural_Network_3_deep.ipynb
###Markdown This notebook explores the Energy Preserving Neural Network Idea!------------------------------------------------------------------------------------------------------------------- Dataset used => MNIST------------------------------------------------------------------------------------------------------------------- Technology used => TensorFlow ###Code # import all the required packages: # packages used for processing: from __future__ import print_function # making backward compatible import matplotlib.pyplot as plt # for visualization import numpy as np # THE TensorFlow framework import tensorflow as tf # use the tensorflow's archived version of the MNIST dataset from tensorflow.examples.tutorials.mnist import input_data # for operating system related stuff import os import sys # for memory usage of objects from subprocess import check_output # to plot the images inline %matplotlib inline # Input data files are available in the "../Data/" directory. def exec_command(cmd): ''' function to execute a shell command and see it's output in the python console @params cmd = the command to be executed along with the arguments ex: ['ls', '../input'] ''' print(check_output(cmd).decode("utf8")) # check the structure of the project directory exec_command(['ls', '../..']) # set a seed value for the script seed_value = 3 np.random.seed(seed_value) # set this seed for a device independant consistent behaviour ''' Set the constants for the script ''' # various paths of the files base_data_path = "../../Data" # the data path base_model_path = "../../Models/IDEA_3" # constant values for the script num_digits = 10 # This is defined. There are 10 labels for 10 digits img_dim = 28 # images are 28 x 28 sized num_channels = 1 # images are grayscale # Hyper parameters for tweaking. # ================================================================================================================= training_batch_size = 64 # 64 images in each batch no_of_epochs = 50 # network architecture related parameters: ''' Note that the number of layers will be fixed. you can tweak the number of hidden neurons in these layers: ''' num_hidden_lay_1 = 512 num_hidden_lay_2 = 512 num_hidden_lay_3 = num_digits # learning rate required for other optimizers: learning_rate = 3e-4 # lolz! the karpathy constant # ================================================================================================================= mnist = input_data.read_data_sets(os.path.join(base_data_path, "MNIST_data"), seed=seed_value, one_hot=True) train_X = mnist.train.images; train_Y = mnist.train.labels dev_X = mnist.validation.images; dev_Y = mnist.validation.labels test_X = mnist.test.images; test_Y = mnist.test.labels # print all the shapes: print("Training Data shapes: ", train_X.shape, train_Y.shape) print("Development Data shapes: ", dev_X.shape, dev_Y.shape) print("Test Data shapes: ", test_X.shape, test_Y.shape) # define the total_train_examples, total_dev_examples and the total_test_examples using the above arrays total_train_examples = train_X.shape[0] total_dev_examples = dev_X.shape[0] total_test_examples = test_X.shape[0] input_dimension = train_X.shape[1] # just double checking if all the values are correct: print("Training_data_size =", total_train_examples) print("Development_data_size =", total_dev_examples) print("Test_data_size =", total_test_examples) print("Input data dimensions =", input_dimension) ''' Randomized cell: Behaviour changes upon running multiple times ''' random_index = np.random.randint(total_train_examples) # bring the random image from the training data random_image = train_X[random_index].reshape((img_dim, img_dim)) label_for_random_image = np.argmax(train_Y[random_index]) # display this random image: plt.figure().suptitle("Image for digit: " + str(label_for_random_image)) plt.imshow(random_image); ###Output _____no_output_____ ###Markdown Experimentation: In this notebook, I'll compare three different versions of neural networks. First is the usual neural network that uses batch_normalization and ReLU's everywhere. Second is the scattering network that fully preserves the energy (norm) of the input vector. Third is the hybrid of the two. ###Code tf.reset_default_graph() # define the placeholders for the model: with tf.name_scope("Input_Placeholders"): tf_input_images = tf.placeholder(tf.float32, shape=(None, input_dimension), name="input_images") tf_input_labels = tf.placeholder(tf.float32, shape=(None, num_digits), name="input_labels") # add input images summary: input_image_summary = tf.summary.image("Input_images", tf.reshape(tf_input_images, shape=(-1, img_dim, img_dim, num_channels))) ###Output _____no_output_____ ###Markdown define the three layers for the modified neural network: ###Code # define the first layer: with tf.variable_scope("layer_1"): # create the matrix variable lay_1_connections = tf.get_variable("lay1_connections", shape=(num_hidden_lay_1, input_dimension), dtype=tf.float32, initializer=tf.truncated_normal_initializer(seed=seed_value)) with tf.name_scope("transformed_weights"): transformed_lay_1_connections = tf.nn.softmax(lay_1_connections, dim=0, name="softmax") # define the outputs of the layer1: lay_1_out = tf.matmul(transformed_lay_1_connections, tf.transpose(tf_input_images)) # This is a simple matmul! no biases req. # add histogram summary over the lay_1_connections: lay_1_connections_summary = tf.summary.histogram("lay_1_connections", lay_1_connections) # print the tensor shape of the lay_1_out print("Layer_1 output:", lay_1_out) # define the second layer: with tf.variable_scope("layer_2"): # create the matrix variable lay_2_connections = tf.get_variable("lay2_connections", shape=(num_hidden_lay_2, num_hidden_lay_1), dtype=tf.float32, initializer=tf.truncated_normal_initializer(seed=seed_value)) with tf.name_scope("transformed_weights"): transformed_lay_2_connections = tf.nn.softmax(lay_2_connections, dim=0, name="softmax") # define the outputs of the layer2: lay_2_out = tf.matmul(transformed_lay_2_connections, lay_1_out) # This is a simple matmul! no biases req. lay_2_connections_summary = tf.summary.histogram("lay_2_connections", lay_2_connections) # print the tensor shape of the lay_2_out print("Layer_2 output:", lay_2_out) # define the final layer: with tf.variable_scope("layer_3"): # create the matrix variable lay_3_connections = tf.get_variable("lay3_connections", shape=(num_hidden_lay_3, num_hidden_lay_2), dtype=tf.float32, initializer=tf.truncated_normal_initializer(seed=seed_value)) with tf.name_scope("transformed_weights"): transformed_lay_3_connections = tf.nn.softmax(lay_3_connections, dim=0, name="softmax") # define the outputs of the layer3: lay_3_out = tf.matmul(transformed_lay_3_connections, lay_2_out) # This is a simple matmul! no biases req. lay_3_connections_summary = tf.summary.histogram("lay_3_connections", lay_3_connections) # print the tensor shape of the lay_3_out print("Layer_3 output:", lay_3_out) # define the predictions obtained from these computations: with tf.name_scope("Predictions"): predictions = tf.nn.softmax(tf.transpose(lay_3_out)) # print the shape of the predictions: print("Predictions:", predictions) ###Output Predictions: Tensor("Predictions/Softmax:0", shape=(?, 10), dtype=float32) ###Markdown define the loss for optimization: ###Code # loss definition: with tf.name_scope("Loss"): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=tf.transpose(lay_3_out), labels=tf_input_labels)) # attach a scalar summary over this: loss_summary = tf.summary.scalar("Loss", loss) # define the accuracy calculation module: with tf.name_scope("Accuracy"): correct = tf.equal(tf.argmax(predictions, axis=-1), tf.argmax(tf_input_labels, axis=-1)) accuracy = tf.reduce_mean(tf.cast(correct, dtype=tf.float32)) # attach a scalar summary for the accuracy calculates accuracy_summary = tf.summary.scalar("Accuracy", accuracy) # define the trainer of the operation with tf.name_scope("Trainer"): train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss) # define the tensorflow errands with tf.name_scope("Errands"): init = tf.global_variables_initializer() all_summaries = tf.summary.merge_all() ###Output _____no_output_____ ###Markdown Start the session to train the model ###Code # define the model to train: model_name = "Model_fully_energy_preserving" # generate the model saving path: model_save_path = os.path.join(base_model_path, model_name) with tf.Session() as sess: # create a tensorboard writer tensorboard_writer = tf.summary.FileWriter(logdir=model_save_path, graph=sess.graph, filename_suffix=".bot") # create a saver saver = tf.train.Saver(max_to_keep=3) # restore the session if the checkpoint exists: if(os.path.isfile(os.path.join(model_save_path, "checkpoint"))): saver.restore(sess, tf.train.latest_checkpoint(model_save_path)) else: # initialize all the variables: sess.run(init) global_step = 0 print("Starting the training process . . .") for epoch in range(no_of_epochs): # run through the batches of the data: accuracies = [] # initialize this to an empty list for batch in range(int(np.ceil(float(total_train_examples) / training_batch_size))): start = batch * training_batch_size; end = start + training_batch_size # extract the relevant data: batch_data_X = train_X[start: end] batch_data_Y = train_Y[start: end] # This is batch gradient descent: _, cost, acc, sums = sess.run([train_step, loss, accuracy, all_summaries], feed_dict={tf_input_images: batch_data_X, tf_input_labels: batch_data_Y}) # append the acc to the accuracies list accuracies.append(acc) # save the summarys tensorboard_writer.add_summary(sums, global_step) # increment the global step global_step += 1 print("epoch =", epoch, "\tcost =", cost) # evaluate the accuracy of the training epoch: print("accuracy =", sum(accuracies) / len(accuracies)) # evaluate the dev-set accuracy: dev_acc = sess.run(accuracy, feed_dict={tf_input_images: dev_X, tf_input_labels: dev_Y}) print("Obtained Dev accuracy = ", dev_acc) # save the model after every epoch saver.save(sess, os.path.join(model_save_path, model_name), global_step=(epoch + 10)) print("Training complete . . .") # Once, the training is complete: # print the test accuracy: test_acc = sess.run(accuracy, feed_dict={tf_input_images: test_X, tf_input_labels: test_Y}) print("Obtained Test accuracy = ", test_acc) ###Output Starting the training process . . . epoch = 0 cost = 2.2703 accuracy = 0.0989946705441 Obtained Dev accuracy = 0.0958 epoch = 1 cost = 2.25064 accuracy = 0.0989401647301 Obtained Dev accuracy = 0.096 epoch = 2 cost = 2.2099 accuracy = 0.119688711242 Obtained Dev accuracy = 0.169 epoch = 3 cost = 2.13676 accuracy = 0.266388081395 Obtained Dev accuracy = 0.3342 epoch = 4 cost = 2.02401 accuracy = 0.505171996112 Obtained Dev accuracy = 0.521 epoch = 5 cost = 1.87112 accuracy = 0.602246850798 Obtained Dev accuracy = 0.5954 epoch = 6 cost = 1.69004 accuracy = 0.626931928271 Obtained Dev accuracy = 0.6278 epoch = 7 cost = 1.50509 accuracy = 0.651423207341 Obtained Dev accuracy = 0.6544 epoch = 8 cost = 1.33793 accuracy = 0.679832848837 Obtained Dev accuracy = 0.6896 epoch = 9 cost = 1.19599 accuracy = 0.710707364318 Obtained Dev accuracy = 0.7154 epoch = 10 cost = 1.07689 accuracy = 0.735555959302 Obtained Dev accuracy = 0.739 epoch = 11 cost = 0.975672 accuracy = 0.754742005814 Obtained Dev accuracy = 0.7592 epoch = 12 cost = 0.888021 accuracy = 0.770058139535 Obtained Dev accuracy = 0.7756 epoch = 13 cost = 0.810884 accuracy = 0.782588420566 Obtained Dev accuracy = 0.7896 epoch = 14 cost = 0.742258 accuracy = 0.793435077542 Obtained Dev accuracy = 0.7988 epoch = 15 cost = 0.680881 accuracy = 0.801786579434 Obtained Dev accuracy = 0.8082 epoch = 16 cost = 0.625961 accuracy = 0.809538517442 Obtained Dev accuracy = 0.8184 epoch = 17 cost = 0.576974 accuracy = 0.816224563953 Obtained Dev accuracy = 0.8246 epoch = 18 cost = 0.533506 accuracy = 0.822202034884 Obtained Dev accuracy = 0.83 epoch = 19 cost = 0.49515 accuracy = 0.82757994186 Obtained Dev accuracy = 0.8344 epoch = 20 cost = 0.461461 accuracy = 0.831813226744 Obtained Dev accuracy = 0.8374 epoch = 21 cost = 0.43195 accuracy = 0.835864825581 Obtained Dev accuracy = 0.8426 epoch = 22 cost = 0.406106 accuracy = 0.83957122093 Obtained Dev accuracy = 0.847 epoch = 23 cost = 0.383442 accuracy = 0.842114825581 Obtained Dev accuracy = 0.8508 epoch = 24 cost = 0.363507 accuracy = 0.84429505814 Obtained Dev accuracy = 0.8524 epoch = 25 cost = 0.345906 accuracy = 0.846202761628 Obtained Dev accuracy = 0.8546 epoch = 26 cost = 0.330298 accuracy = 0.84765625 Obtained Dev accuracy = 0.856 epoch = 27 cost = 0.316394 accuracy = 0.84898255814 Obtained Dev accuracy = 0.858 epoch = 28 cost = 0.303952 accuracy = 0.850181686047 Obtained Dev accuracy = 0.8592 epoch = 29 cost = 0.29277 accuracy = 0.851653343023 Obtained Dev accuracy = 0.8602 epoch = 30 cost = 0.282678 accuracy = 0.852925145349 Obtained Dev accuracy = 0.8612 epoch = 31 cost = 0.273533 accuracy = 0.854342296512 Obtained Dev accuracy = 0.8624 epoch = 32 cost = 0.265214 accuracy = 0.855632267442 Obtained Dev accuracy = 0.8646 epoch = 33 cost = 0.25762 accuracy = 0.856922238372 Obtained Dev accuracy = 0.8646 epoch = 34 cost = 0.250663 accuracy = 0.857885174419 Obtained Dev accuracy = 0.8648 epoch = 35 cost = 0.24427 accuracy = 0.85913880814 Obtained Dev accuracy = 0.8656 epoch = 36 cost = 0.238378 accuracy = 0.860192587209 Obtained Dev accuracy = 0.8674 epoch = 37 cost = 0.232933 accuracy = 0.860992005814 Obtained Dev accuracy = 0.868 epoch = 38 cost = 0.227888 accuracy = 0.861936773256 Obtained Dev accuracy = 0.8686 epoch = 39 cost = 0.223202 accuracy = 0.863026889535 Obtained Dev accuracy = 0.8692 epoch = 40 cost = 0.21884 accuracy = 0.863753633721 Obtained Dev accuracy = 0.869 epoch = 41 cost = 0.21477 accuracy = 0.864226017442 Obtained Dev accuracy = 0.8694 epoch = 42 cost = 0.210965 accuracy = 0.864916424419 Obtained Dev accuracy = 0.8704 epoch = 43 cost = 0.2074 accuracy = 0.865588662791 Obtained Dev accuracy = 0.8708 epoch = 44 cost = 0.204056 accuracy = 0.866242732558 Obtained Dev accuracy = 0.8716 epoch = 45 cost = 0.200911 accuracy = 0.866842296512 Obtained Dev accuracy = 0.8718 epoch = 46 cost = 0.19795 accuracy = 0.867296511628 Obtained Dev accuracy = 0.8726 epoch = 47 cost = 0.195157 accuracy = 0.86773255814 Obtained Dev accuracy = 0.8726 epoch = 48 cost = 0.192519 accuracy = 0.868295784884 Obtained Dev accuracy = 0.8732 epoch = 49 cost = 0.190022 accuracy = 0.869058866279 Obtained Dev accuracy = 0.8744 Training complete . . . Obtained Test accuracy = 0.8759
Chapter3_MLIntroduction/KnnSklearn/knn.ipynb
###Markdown Dataset Prerp ###Code iris = datasets.load_iris() x = iris.data[:, :2] # we only take the first two features. y = iris.target class_names = iris.target_names descriptions = iris.DESCR print(descriptions) x.shape ###Output _____no_output_____ ###Markdown Dataset split ###Code num_sample, num_feature = x.shape num_class = len(class_names) test_size = num_sample // 3 random_idxs = np.random.permutation(num_sample) x_train = x[random_idxs[:-test_size]] y_train = y[random_idxs[:-test_size]] x_test = x[random_idxs[-test_size:]] y_test = y[random_idxs[-test_size:]] x_test.shape ###Output _____no_output_____ ###Markdown KNN Model ###Code from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier(n_neighbors=3) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) y_pred s = 0 for y, p in zip(y_test, y_pred): if y == p: s += 1 print(s/len(y_test)) clf.score(x_test, y_test) ###Output _____no_output_____ ###Markdown Try different hyperparameter ###Code weights = ['uniform', 'distance'] ns = range(1, 20) best = {'score': 0, 'n': 0, 'w': 0} for w in weights: for n in ns: clf = KNeighborsClassifier(n_neighbors=n, weights=w) clf.fit(x_train, y_train) acc = clf.score(x_test, y_test) print(acc) if acc > best['score']: best['score'] = acc best['n'] = n best['w'] = w print("Best result:") print("Accuracy:", best['score']) print("n_neighbors:", best['n']) print("weights:", best['w']) clf = KNeighborsClassifier(n_neighbors=5) clf.fit(x_train, y_train) clf.predict_proba(x_test) ###Output _____no_output_____ ###Markdown Visualizing ###Code from typing import Any import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap cmap_background = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_points = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) def make_mesh_grid(x0: np.ndarray, x1: np.ndarray) -> np.ndarray: step_width = 0.05 offset = 0.5 x0a = np.arange(np.min(x0) - offset, np.max(x0) + offset, step_width) x1a = np.arange(np.min(x1) - offset, np.max(x1) + offset, step_width) xx0, xx1 = np.meshgrid(x0a, x1a) return xx0, xx1 def plot_decision_border( clf: KNeighborsClassifier, x_train: np.ndarray, y_train: np.ndarray, x_test: np.ndarray, y_test: np.ndarray, ) -> None: fig, ax = plt.subplots(figsize=(12, 8)) X0 = x_train[:,0] X1 = x_train[:,1] xx0, xx1, make_mesh_grid(X0, X1) plot_decision_border(clf, x_train, x_test, y_train, y_test) ###Output _____no_output_____
experiments/aaron/topic_modeling.ipynb
###Markdown Transform ###Code original_top_1000 = pd.read_feather('../../data/transform/bag_of_words_top_1000.feather') bag_of_words_top_1000_topics = bag_of_words_top_1000 #scale by word count bag_of_words_top_1000_topics['sum'] = original_top_1000.iloc[:,16:].sum(axis=1) bag_of_words_top_1000_topics['love'] = bag_of_words_top_1000_topics['love']/bag_of_words_top_1000_topics['sum'] bag_of_words_top_1000_topics['religion'] = bag_of_words_top_1000_topics['religion']/bag_of_words_top_1000_topics['sum'] bag_of_words_top_1000_topics['death'] = bag_of_words_top_1000_topics['death']/bag_of_words_top_1000_topics['sum'] bag_of_words_top_1000_topics.drop('sum', axis=1, inplace=True) #rank and scale between 0 and 1 rank_order = np.array(range(len(bag_of_words_top_1000_topics)))/(len(bag_of_words_top_1000_topics)-1) bag_of_words_top_1000_topics = bag_of_words_top_1000_topics.sort_values(by='religion') bag_of_words_top_1000_topics = bag_of_words_top_1000_topics.reset_index(drop=True) bag_of_words_top_1000_topics['religion'] = rank_order bag_of_words_top_1000_topics = bag_of_words_top_1000_topics.sort_values(by='death') bag_of_words_top_1000_topics = bag_of_words_top_1000_topics.reset_index(drop=True) bag_of_words_top_1000_topics['death'] = rank_order bag_of_words_top_1000_topics = bag_of_words_top_1000_topics.sort_values(by='love') bag_of_words_top_1000_topics = bag_of_words_top_1000_topics.reset_index(drop=True) bag_of_words_top_1000_topics['love'] = rank_order #bag_of_words_top_1000_topics.to_feather('../../data/clean/bag_of_words_top_1000_weightedtopics.feather') #yearly topics print('Identify topic weights each year') bag_with_year = bag_of_words_top_1000_topics[bag_of_words_top_1000_topics['release_year_'].notnull()].reset_index() weights_yearly_clean = {} weights_yearly_clean['song_count'] = [0] * len(bag_with_year['release_year_'].unique()) weights_yearly_clean['love'] = [0] * len(bag_with_year['release_year_'].unique()) weights_yearly_clean['death'] = [0] * len(bag_with_year['release_year_'].unique()) weights_yearly_clean['religion'] = [0] * len(bag_with_year['release_year_'].unique()) weights_yearly_clean['year'] = [0] * len(bag_with_year['release_year_'].unique()) for i, val in enumerate(bag_with_year['release_year_'].unique()): temp = bag_with_year[bag_with_year['release_year_']==val] weights_yearly_clean['song_count'][i] = len(temp) weights_yearly_clean['year'][i] = val weights_yearly_clean['love'][i] = temp.love.mean() weights_yearly_clean['death'][i] = temp.death.mean() weights_yearly_clean['religion'][i] = temp.religion.mean() topics_per_year = pd.DataFrame(weights_yearly_clean).sort_values(by='year').reset_index(drop=True) #artist weights print('Identify topic weights for each artist (~21k iterations)') weights_artist_clean = {} weights_artist_clean['love'] = [0] * len(bag_of_words_top_1000_topics.artist_id_.unique()) weights_artist_clean['death'] = [0] * len(bag_of_words_top_1000_topics.artist_id_.unique()) weights_artist_clean['religion'] = [0] * len(bag_of_words_top_1000_topics.artist_id_.unique()) weights_artist_clean['artist_id'] = [0] * len(bag_of_words_top_1000_topics.artist_id_.unique()) weights_artist_clean['artist_name'] = [0] * len(bag_of_words_top_1000_topics.artist_id_.unique()) weights_artist_clean['longitude'] = [0] * len(bag_of_words_top_1000_topics.artist_id_.unique()) weights_artist_clean['latitude'] = [0] * len(bag_of_words_top_1000_topics.artist_id_.unique()) weights_artist_clean['song_count'] = [0] * len(bag_of_words_top_1000_topics.artist_id_.unique()) for i, val in tqdm(enumerate(bag_of_words_top_1000_topics.artist_id_.unique())): temp = bag_of_words_top_1000_topics[bag_of_words_top_1000_topics.artist_id_==val] weights_artist_clean['artist_id'][i] = val weights_artist_clean['artist_name'][i] = temp.artist_name_.iloc[0] weights_artist_clean['longitude'][i] = temp.longitude_.iloc[0] weights_artist_clean['latitude'][i] = temp.latitude_.iloc[0] weights_artist_clean['love'][i] = temp.love.mean() weights_artist_clean['death'][i] = temp.death.mean() weights_artist_clean['religion'][i] = temp.religion.mean() weights_artist_clean['song_count'][i] = len(temp) topics_per_artist = pd.DataFrame(weights_artist_clean).sort_values(by='artist_name').reset_index(drop=True) topics_per_artist.to_feather('../../data/transform/artist_topic_weights.feather') topics_per_year.to_feather('../../data/transform/year_topic_weights.feather') ###Output _____no_output_____
cnn_vs_dense/cnn_versus_dense.ipynb
###Markdown OutlineCNN-based architectures have become integral to virtually all existing computer-vision solutions. Given their central role, it is important to better understand some of their limitations. From a theoretical perspective, CNN-based architectures can be viewed as ordinary DNN's albeit with strong weight-sharing structure.In the explorations presented here, we focus on the MNIST due mostly to the small size of the input images. For a list of results on the MNIST task, see [this page](http://yann.lecun.com/exdb/mnist/).Below, we will explore the following aspects: - How much does input normalization matter? - Are CNNs less suscpetibile to overfitting due to having smaller model capacities compared to fully-connected DNNs? - Last and perhaps most important of all, can CNNs be effective when the input image is completely scrambled, albeit in a reversable way? ###Code import os import sys import json import numpy as np import matplotlib.pyplot as plt import tensorflow as tf ###Output _____no_output_____ ###Markdown Utilities ###Code def dataset_splitter(x_set, y_set, fraction=0.2): """Splits the data set into train and validation sets. Args: x_set (np.ndarray): y_set (np.ndarray): fraction (float): The fraction of the data to be Returns: (x_train, y_trin), (x_validation, y_validation) """ dataset_size = y_set.shape[0] split_mask = np.where( np.random.uniform(size=dataset_size) > fraction, True, False) np.random.shuffle(split_mask) x_train = x_set[split_mask] y_train = y_set[split_mask] x_validation = x_set[np.logical_not(split_mask)] y_validation = y_set[np.logical_not(split_mask)] return ( (x_train, y_train), (x_validation, y_validation)) class Scrambler(): def __init__(self, target_shape): self._target_shape = target_shape self._initialize_scramble() def _initialize_scramble(self): self._scrambling_array = np.arange(np.prod(self._target_shape)) np.random.shuffle(self._scrambling_array) self._scrambling_array_inverse = np.argsort(self._scrambling_array) def scramble(self, x): x_shape = x.shape if np.prod(x[0].shape) == np.prod(self._target_shape): return x.reshape( (x_shape[0], self._scrambling_array.shape[0]) )[:, self._scrambling_array].reshape(x_shape) elif np.prod(x.shape) == np.prod(self._target_shape): return x.flatten()[self._scrambling_array].reshape(x_shape) else: raise ValueError(f"The input shape, {x.shape}, is not valid!") def unscramble(self, x): x_shape = x.shape if np.prod(x[0].shape) == np.prod(self._target_shape): return x.reshape( (x_shape[0], self._scrambling_array.shape[0]) )[:, self._scrambling_array_inverse].reshape(x_shape) elif np.prod(x.shape) == np.prod(self._target_shape): return x.flatten()[self._scrambling_array_inverse].reshape(x_shape) else: raise ValueError(f"The input shape, {x.shape}, is not valid!") def set_scrambling_array(self, scrambing_array, copy=True): if scrambing_array.shape != self._scrambling_array.shape: raise ValueError(f"The array does not have the right shape:\n\ {scrambing_array.shape} vs. {self._scrambling_array.shape}!") if copy: self._scrambling_array = np.copy(scrambing_array) self._scrambling_array_inverse = np.argsort(self._scrambling_array) else: self._scrambling_array = scrambing_array self._scrambling_array_inverse = np.argsort(self._scrambling_array) def get_scrambling_array(self, copy=True): if copy: return np.copy(self._scrambling_array) else: return self._scrambling_array class SequenceGenerator(tf.keras.utils.Sequence): def __init__( self, x_set, y_set, batch_size, preprocessor_func=None ): """Initializes the instance of Sequence. Args: x_set (np.ndarray): Inputs/Features y_set (np.ndarray): Labels batch_size (int): Size of batches to be yielded. preprocessor_func: A callable that if provided with be applied on the feature batches. """ self._x, self._y = x_set, y_set self._batch_size = int(batch_size) self._preprocessor_func = preprocessor_func def set_batch_size(self, batch_size): self._batch_size = int(batch_size) def set_preprocessor(self, preprocessor_func): self._preprocessor_func = preprocessor_func def __len__(self): return (len(self._y) // self._batch_size) def __getitem__(self, idx): batch_x = self._x[idx * self._batch_size: (idx + 1) * self._batch_size] if self._preprocessor_func is not None: batch_x = self._preprocessor_func(batch_x) batch_y = self._y[idx * self._batch_size: (idx + 1) * self._batch_size] return (batch_x, batch_y) def get_config(self): """Not used for serialization! """ return { "x shape": str(self._x.shape), "y shape": str(self._y.shape), "batch size": self._batch_size, "input preprocessing": self._preprocessor_func is not None} def evaluate_model(model, x, y=None): """Evaluates model on the provided dataset. Accepts both x, y np.ndarrays and generators. Args: model (tf.keras.Model) x (np.ndarray or generator) y (np.ndarray or None) """ if y is None: eval_results = model.evaluate( x, return_dict=True, verbose=0) else: eval_results = model.evaluate( x, y, return_dict=True, verbose=0) for name, val in eval_results.items(): print(f"\t{name:16}{round(float(val), 5)}") def training_history_plots(history_callback, **kwargs): """Plots the training history from an instance of the history callback. Args: history_callback (tf.keras.callbacks.History): Model training history. """ train_losses = history_callback.history["loss"] val_losses = history_callback.history.get("val_loss", None) train_accuracy = history_callback.history.get("accuracy", None) val_accuracy = history_callback.history.get("val_accuracy", None) num_epochs = len(train_losses) epochs = np.linspace(start=1, stop=num_epochs, num=num_epochs, endpoint=True) fig = plt.figure(figsize=kwargs.get("figsize", (18., 13.))) plt.suptitle("Training History", fontsize=18) # ax1 = plt.subplot(2, 1, 1) ax1.set_ylabel("Crossentropy", fontsize=14., color="gray") ax1.plot(epochs, train_losses, marker="", lw=2.0, color="orange", label="Train") if val_losses is not None : ax1.plot(epochs, val_losses, marker="", lw=2.0, color="steelblue", label="Validation") ax1.grid() ax1.set_axisbelow(True) ax1.legend(loc="upper right", fontsize=12) # ax2 = plt.subplot(2, 1, 2) ax2.set_xlabel("Epoch", fontsize=14., color="gray") ax2.set_ylabel("Accuracy", fontsize=14., color="gray") if train_accuracy is not None : ax2.plot(epochs, train_accuracy, marker="", lw=2.0, color="orange", label="Train") if val_accuracy is not None : ax2.plot(epochs, val_accuracy, marker="", lw=2.0, color="steelblue", label="Validation") ax2.grid() ax2.set_axisbelow(True) ax2.legend(loc="upper right", fontsize=12) # plt.show() def lr_schedule(epoch, lr, num_initial_epochs=5, decay_rate=0.75, min_lr=0.0001): """Learning rate schedule. """ if (epoch < num_initial_epochs): return lr elif lr > min_lr: return decay_rate * lr else: return lr ###Output _____no_output_____ ###Markdown Load MNIST Dataset ###Code (x_train_set, y_train_set), (x_test, y_test) = tf.keras.datasets.mnist.load_data() print(f"\t Train Set: {x_train_set.shape}, {y_train_set.shape}") print(f"\t Test Set: {x_test.shape}, {y_test.shape}") ###Output Train Set: (60000, 28, 28), (60000,) Test Set: (10000, 28, 28), (10000,) ###Markdown Split Train Set into Train and Validation Sets ###Code (x_train, y_train), (x_validation, y_validation) = dataset_splitter( x_train_set, y_train_set, 0.2) train_set_size_frac = round(y_train.shape[0] / y_train_set.shape[0], 3) valid_set_size_frac = round(y_validation.shape[0] / y_train_set.shape[0], 3) print(f"\t Training Set: {x_train.shape}, {y_train.shape} is {train_set_size_frac}\ fracton of the original set.") print(f"\t Validation Set: {x_validation.shape}, {y_validation.shape} is {valid_set_size_frac}\ fracton of the original set.") ###Output Training Set: (47946, 28, 28), (47946,) is 0.799 fracton of the original set. Validation Set: (12054, 28, 28), (12054,) is 0.201 fracton of the original set. ###Markdown Instantiate Sequence Generators ###Code # test_sequence_generator = SequenceGenerator( # x_test, y_test, batch_size=32) # print("Test Generator Config:\t", json.dumps( # test_sequence_generator.get_config(), indent=4), end="\n\n") train_sequence_generator = SequenceGenerator( x_train, y_train, batch_size=32) # print("Train Generator Config:\t", json.dumps( # train_sequence_generator.get_config(), indent=4), end="\n\n") validation_sequence_generator = SequenceGenerator( x_validation, y_validation, batch_size=32) # print("Validation Generator Config:\t", json.dumps( # validation_sequence_generator.get_config(), indent=4)) ###Output Train Generator Config: { "x shape": "(47946, 28, 28)", "y shape": "(47946,)", "batch size": 32, "input preprocessing": false } Validation Generator Config: { "x shape": "(12054, 28, 28)", "y shape": "(12054,)", "batch size": 32, "input preprocessing": false } ###Markdown Explorations LeNet-5: ###Code def lenet5(name="LeNet-5"): inputs = tf.keras.Input(shape=(28, 28, 1), name="input") x = tf.keras.layers.Conv2D( filters=6, kernel_size=(5, 5), strides=(1, 1), padding="same", data_format="channels_last", dilation_rate=(1, 1), groups=1, activation=tf.keras.activations.tanh, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="conv_1")(inputs) x = tf.keras.layers.AveragePooling2D( pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name="avg_pooling_1")(x) x = tf.keras.layers.Conv2D( filters=16, kernel_size=(5, 5), strides=(1, 1), padding="valid", data_format="channels_last", dilation_rate=(1, 1), groups=1, activation=tf.keras.activations.tanh, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="conv_2")(x) x = tf.keras.layers.AveragePooling2D( pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name="avg_pooling_2")(x) x = tf.keras.layers.Conv2D( filters=120, kernel_size=(5, 5), strides=(1, 1), padding="valid", data_format="channels_last", dilation_rate=(1, 1), groups=1, activation=tf.keras.activations.tanh, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="conv_3")(x) x = tf.keras.layers.Flatten(name="flatten")(x) x = tf.keras.layers.Dense( units=84, activation=tf.keras.activations.tanh, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="dense_1")(x) outputs = tf.keras.layers.Dense( units=10, activation=tf.keras.activations.softmax, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="output")(x) return tf.keras.Model(inputs=inputs, outputs=outputs, name=name) lenet5_model = lenet5() lenet5_model.summary(print_fn=(lambda *args: print("\t", *args))) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.0) # optimizer = tf.keras.optimizers.Adam( # learning_rate=0.001, beta_1=0.9, beta_2=0.999, # epsilon=1e-07, amsgrad=False, name="Adam") loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, name="crossentropy") metrics = [ tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")] lenet5_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) num_epochs = 100 # Learning rate schedule lr_schedule_callback = tf.keras.callbacks.LearningRateScheduler( schedule=lambda epoch, lr: lr_schedule( epoch, lr, num_initial_epochs=5, decay_rate=0.8, min_lr=0.001), verbose=0) # Early stoppying callback: early_stopping_callback = tf.keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=0.001, patience=20, verbose=1, mode="min", baseline=None, restore_best_weights=True) train_sequence_generator.set_preprocessor(lambda x: x[..., np.newaxis]) train_sequence_generator.set_batch_size(32) validation_sequence_generator.set_preprocessor(lambda x: x[..., np.newaxis]) validation_sequence_generator.set_batch_size(32) # Fit model fit_history = lenet5_model.fit( train_sequence_generator, epochs=num_epochs, verbose=0, validation_data=validation_sequence_generator, shuffle=True, class_weight=None, workers=8, callbacks=[ lr_schedule_callback, early_stopping_callback ]) training_history_plots(fit_history) evaluate_model(lenet5_model, x_test[..., np.newaxis], y_test) ###Output loss 0.0296 accuracy 0.9898 ###Markdown A Fully-Connected Model: ###Code def construct_dense_model(name="FCM"): inputs = tf.keras.Input(shape=(28, 28), name="input") x = tf.keras.layers.Flatten(name="flatten")(inputs) x = tf.keras.layers.Dense( units=512, activation=tf.keras.activations.tanh, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="dense_1")(x) # x = tf.keras.layers.Dropout(rate=0.1, name="dropout_1")(x) x = tf.keras.layers.Dense( units=128, activation=tf.keras.activations.tanh, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="dense_3")(x) outputs = tf.keras.layers.Dense( units=10, activation=tf.keras.activations.softmax, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="output")(x) return tf.keras.Model(inputs=inputs, outputs=outputs, name=name) dense_model = construct_dense_model() dense_model.summary(print_fn=(lambda *args: print("\t", *args))) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.0) # optimizer = tf.keras.optimizers.Adam( # learning_rate=0.001, beta_1=0.9, beta_2=0.999, # epsilon=1e-07, amsgrad=False, name="Adam") loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, name="crossentropy") metrics = [ tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")] dense_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) num_epochs = 100 lr_schedule_callback = tf.keras.callbacks.LearningRateScheduler( schedule=lambda epoch, lr: lr_schedule( epoch, lr, num_initial_epochs=5, decay_rate=0.8, min_lr=0.001), verbose=0) # Early stoppying callback: early_stopping_callback = tf.keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=0.0005, patience=20, verbose=1, mode="min", baseline=None, restore_best_weights=True) train_sequence_generator.set_preprocessor(lambda x: x / 127.5 - 1.0) train_sequence_generator.set_batch_size(32) validation_sequence_generator.set_preprocessor(lambda x: x / 127.5 - 1.0) validation_sequence_generator.set_batch_size(32) # Fit model fit_history = dense_model.fit( train_sequence_generator, epochs=num_epochs, verbose=0, validation_data=validation_sequence_generator, shuffle=True, class_weight=None, workers=8, callbacks=[ lr_schedule_callback, early_stopping_callback ]) training_history_plots(fit_history) evaluate_model(dense_model, (x_test / 127.5 - 1.0), y_test) ###Output loss 0.06946 accuracy 0.978 ###Markdown Scrambled Input This time, we scramble the input images to see if the models retain their predictive power. Test the Scrambling Function ###Code num_rows = 2 num_columns = 9 scrambler = Scrambler(x_test.shape[-2:]) scrambled_images = scrambler.scramble(x_test[0:num_columns, ...]) unscrambled_images = scrambler.unscramble(scrambled_images) fig = plt.figure(figsize=(18., 4.)) for col in range(num_columns): ax = plt.subplot(num_rows, num_columns, col + 1) ax.imshow(scrambled_images[col,...], cmap="gray") ax.set_axis_off() # ax = plt.subplot(num_rows, num_columns, num_columns + col + 1) ax.imshow(unscrambled_images[col,...], cmap="gray") ax.set_axis_off() plt.show() ###Output _____no_output_____ ###Markdown Train LeNet-5 ###Code # Create Model lenet5_model_2 = lenet5() # Compile: optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.0) loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, name="crossentropy") metrics = [tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")] lenet5_model_2.compile(optimizer=optimizer, loss=loss, metrics=metrics) num_epochs = 100 # Learning rate schedule lr_schedule_callback = tf.keras.callbacks.LearningRateScheduler( schedule=lambda epoch, lr: lr_schedule( epoch, lr, num_initial_epochs=5, decay_rate=0.8, min_lr=0.001), verbose=0) # Early stoppying callback: early_stopping_callback = tf.keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=0.001, patience=20, verbose=1, mode="min", baseline=None, restore_best_weights=True) scrambler = Scrambler(x_test.shape[-2:]) train_sequence_generator.set_preprocessor( lambda x: scrambler.scramble(x[..., np.newaxis])) train_sequence_generator.set_batch_size(32) # validation_sequence_generator.set_preprocessor( lambda x: scrambler.scramble(x[..., np.newaxis])) validation_sequence_generator.set_batch_size(32) # Fit model fit_history = lenet5_model_2.fit( train_sequence_generator, epochs=num_epochs, verbose=0, validation_data=validation_sequence_generator, shuffle=True, class_weight=None, workers=8, callbacks=[ lr_schedule_callback, early_stopping_callback ]) training_history_plots(fit_history) evaluate_model( lenet5_model_2, scrambler.scramble(x_test[..., np.newaxis]), y_test) ###Output loss 0.12035 accuracy 0.9625 ###Markdown Train the Dense Model: ###Code # Construct Model dense_model_2 = construct_dense_model() # Compile optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.0) loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, name="crossentropy") metrics = [tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")] dense_model_2.compile(optimizer=optimizer, loss=loss, metrics=metrics) num_epochs = 100 # Learning rate schedule lr_schedule_callback = tf.keras.callbacks.LearningRateScheduler( schedule=lambda epoch, lr: lr_schedule( epoch, lr, num_initial_epochs=5, decay_rate=0.8, min_lr=0.001), verbose=0) # Early stoppying callback: early_stopping_callback = tf.keras.callbacks.EarlyStopping( monitor="val_loss", min_delta=0.001, patience=20, verbose=1, mode="min", baseline=None, restore_best_weights=True) scrambler = Scrambler(x_test.shape[-2:]) train_sequence_generator.set_preprocessor( lambda x: scrambler.scramble(x / 127.5 - 1.0)) train_sequence_generator.set_batch_size(32) # validation_sequence_generator.set_preprocessor( lambda x: scrambler.scramble(x / 127.5 - 1.0)) validation_sequence_generator.set_batch_size(32) # Fit model fit_history = dense_model_2.fit( train_sequence_generator, epochs=num_epochs, verbose=0, validation_data=validation_sequence_generator, shuffle=True, class_weight=None, workers=8, callbacks=[ lr_schedule_callback, early_stopping_callback ]) training_history_plots(fit_history) evaluate_model( dense_model_2, scrambler.scramble(x_test / 127.5 - 1.0), y_test) ###Output loss 0.06231 accuracy 0.9796
courses/datacamp/notes/python/matplotlibTMP/scatterplot.ipynb
###Markdown ---title: "Show errors in bar, plot, and boxplot"date: 2020-04-12T14:41:32+02:00author: "Othmane Rifki"type: technical_notedraft: false--- ###Code import matplotlib.pyplot as plt import pandas as pd climate_change = pd.read_csv('climate_change.csv', parse_dates=['date'], index_col='date') ###Output _____no_output_____ ###Markdown Adding error-bars to a bar chart ###Code fig, ax = plt.subplots() # Add data: "co2", "relative_temp" as x-y, index as color ax.scatter(climate_change['co2'], climate_change['relative_temp'], c=climate_change.index) # Set the x-axis label to "CO2 (ppm)" ax.set_xlabel('CO2 (ppm)') # Set the y-axis label to "Relative temperature (C)" ax.set_ylabel('Relative temperature (C)') plt.show() ###Output _____no_output_____
4_synthetic_data_attention/Error_modes/distribution_4/m_9/Error_Mode3_non_linear_clssy.ipynb
###Markdown Generate dataset ###Code y = np.random.randint(0,7,2100) idx= [] for i in range(7): print(i,sum(y==i)) idx.append(y==i) x = np.zeros((2100,2)) x[idx[0],:] = np.random.uniform(low=[1.5,8],high=[2,7],size=(sum(idx[0]),2)) x[idx[1],:] = np.random.uniform(low=[1.5,5],high=[2,6],size=(sum(idx[1]),2)) x[idx[2],:] = np.random.uniform(low=[1.5,3],high=[2,4],size=(sum(idx[2]),2)) x[idx[3],:] = np.random.uniform(low=[1.5,1],high=[2,2],size=(sum(idx[3]),2)) x[idx[4],:] = np.random.uniform(low=[1.5,-1],high=[2,0],size=(sum(idx[4]),2)) x[idx[5],:] = np.random.uniform(low=[1.5,-2],high=[2,-3],size=(sum(idx[5]),2)) x[idx[6],:] = np.random.uniform(low=[2.1,-0.3],high=[2.2,-0.2],size=(sum(idx[6]),2)) #x[idx[7],:] = np.random.uniform(low=[2.5,1],high=[3.5,4],size=(sum(idx[7]),2)) for i in range(7): plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i)) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig("dist_4.png",bbox_inches="tight") plt.savefig("dist_4.pdf",bbox_inches="tight") foreground_classes = {'class_0','class_1'} background_classes = {'class_2'} fg_class = np.random.randint(0,6) fg_idx = np.random.randint(0,9) #m=2 a = [] for i in range(9): #m=2 if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(6,7) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) print(a.shape) print(fg_class , fg_idx) a.shape np.reshape(a,(18,1)) desired_num = 3000 mosaic_list =[] mosaic_label = [] fore_idx=[] for j in range(desired_num): fg_class = np.random.randint(0,6) fg_idx = np.random.randint(0,9) #m=2 a = [] np.random.seed(i+j) for i in range(9): #m=2 if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) # print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(6,7) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) # print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) mosaic_list.append(np.reshape(a,(18,1))) mosaic_label.append(fg_class) fore_idx.append(fg_idx) mosaic_list = np.concatenate(mosaic_list,axis=1).T # print(mosaic_list) print(np.shape(mosaic_label)) print(np.shape(fore_idx)) class MosaicDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list, mosaic_label, fore_idx): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list self.label = mosaic_label self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx], self.fore_idx[idx] batch = 250 msd = MosaicDataset(mosaic_list, mosaic_label , fore_idx) train_loader = DataLoader( msd,batch_size= batch ,shuffle=True) class Wherenet(nn.Module): def __init__(self): super(Wherenet,self).__init__() self.linear1 = nn.Linear(2,1) #self.linear2 = nn.Linear(100,200) #self.linear3 = nn.Linear(200,1) def forward(self,z): x = torch.zeros([batch,9],dtype=torch.float64) #m=2 y = torch.zeros([batch,2], dtype=torch.float64) #x,y = x.to("cuda"),y.to("cuda") for i in range(9): #m=9 x[:,i] = self.helper(z[:,2*i:2*i+2])[:,0] #print(k[:,0].shape,x[:,i].shape) x = F.softmax(x,dim=1) # alphas x1 = x[:,0] for i in range(9): #m=9 x1 = x[:,i] #print() y = y+torch.mul(x1[:,None],z[:,2*i:2*i+2]) return y , x def helper(self,x): #x = F.relu(self.linear1(x)) #x = F.relu(self.linear2(x)) x = self.linear1(x) return x trainiter = iter(train_loader) input1,labels1,index1 = trainiter.next() torch.manual_seed(1236) where = Wherenet().double() where = where out_where,alphas = where(input1) out_where.shape,alphas.shape class Whatnet(nn.Module): def __init__(self): super(Whatnet,self).__init__() self.linear1 = nn.Linear(2,2000) self.linear2 = nn.Linear(2000,4000) self.linear3 = nn.Linear(4000,6) #self.linear4 = nn.Linear(15,20) #self.linear5 = nn.Linear(20,25) #self.linear6 = nn.Linear(25,6) def forward(self,x): x = F.relu(self.linear1(x)) x = F.relu(self.linear2(x)) #x = F.relu(self.linear3(x)) #x = F.relu(self.linear4(x)) #x = F.relu(self.linear5(x)) x = self.linear3(x) return x torch.manual_seed(1236) what = Whatnet().double() # what(out_where) test_data_required = 1000 mosaic_list_test =[] mosaic_label_test = [] fore_idx_test=[] for j in range(test_data_required): fg_class = np.random.randint(0,6) fg_idx = np.random.randint(0,9) #m=2 a = [] for i in range(9): #m=2 np.random.seed(i+j+3000) if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) # print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(6,7) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) # print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) mosaic_list_test.append(np.reshape(a,(18,1))) mosaic_label_test.append(fg_class) fore_idx_test.append(fg_idx) mosaic_list_test = np.concatenate(mosaic_list_test,axis=1).T print(mosaic_list_test.shape) test_data = MosaicDataset(mosaic_list_test,mosaic_label_test,fore_idx_test) test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False) col1=[] col2=[] col3=[] col4=[] col5=[] col6=[] col7=[] col8=[] col9=[] col10=[] col11=[] col12=[] col13=[] correct = 0 total = 0 count = 0 flag = 1 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 with torch.no_grad(): for data in train_loader: inputs, labels , fore_idx = data avg_inp,alphas = where(inputs) outputs = what(avg_inp) _, predicted = torch.max(outputs.data, 1) for j in range(labels.size(0)): count += 1 focus = torch.argmax(alphas[j]) if alphas[j][focus] >= 0.5 : argmax_more_than_half += 1 else: argmax_less_than_half += 1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true += 1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false += 1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false += 1 total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) ) print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) ) print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) ) print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) ) print("argmax_more_than_half ==================> ",argmax_more_than_half) print("argmax_less_than_half ==================> ",argmax_less_than_half) print(count) print("="*100) col1.append(0) col2.append(argmax_more_than_half) col3.append(argmax_less_than_half) col4.append(focus_true_pred_true) col5.append(focus_false_pred_true) col6.append(focus_true_pred_false) col7.append(focus_false_pred_false) correct = 0 total = 0 count = 0 flag = 1 focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 with torch.no_grad(): for data in test_loader: inputs, labels , fore_idx = data avg_inp,alphas = where(inputs) outputs = what(avg_inp) _, predicted = torch.max(outputs.data, 1) for j in range(labels.size(0)): focus = torch.argmax(alphas[j]) if alphas[j][focus] >= 0.5 : argmax_more_than_half += 1 else: argmax_less_than_half += 1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true += 1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false += 1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false += 1 total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) print("total correct", correct) print("total train set images", total) print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) ) print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) ) print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) ) print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) ) print("argmax_more_than_half ==================> ",argmax_more_than_half) print("argmax_less_than_half ==================> ",argmax_less_than_half) col8.append(argmax_more_than_half) col9.append(argmax_less_than_half) col10.append(focus_true_pred_true) col11.append(focus_false_pred_true) col12.append(focus_true_pred_false) col13.append(focus_false_pred_false) focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 criterion = nn.CrossEntropyLoss() optimizer_where = optim.Adam(where.parameters(), lr=0.01)#,momentum=0.9) optimizer_what = optim.Adam(what.parameters(), lr=0.01)#, momentum=0.9) nos_epochs = 150 train_loss=[] test_loss =[] train_acc = [] test_acc = [] for epoch in range(nos_epochs): # loop over the dataset multiple times focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 running_loss = 0.0 cnt=0 iteration = desired_num // batch #training data set for i, data in enumerate(train_loader): inputs , labels , fore_idx = data #inputs,labels,fore_idx = inputs.to(device),labels.to(device),fore_idx.to(device) # zero the parameter gradients optimizer_what.zero_grad() optimizer_where.zero_grad() avg_inp,alphas = where(inputs) outputs = what(avg_inp) _, predicted = torch.max(outputs.data, 1) loss = criterion(outputs, labels) loss.backward() optimizer_what.step() optimizer_where.step() running_loss += loss.item() if cnt % 6 == 5: # print every 6 mini-batches print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / 6)) running_loss = 0.0 cnt=cnt+1 if epoch % 5 == 4: for j in range (batch): focus = torch.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): argmax_more_than_half +=1 else: argmax_less_than_half +=1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true +=1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false +=1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false +=1 if epoch % 5 == 4: col1.append(epoch) col2.append(argmax_more_than_half) col3.append(argmax_less_than_half) col4.append(focus_true_pred_true) col5.append(focus_false_pred_true) col6.append(focus_true_pred_false) col7.append(focus_false_pred_false) #************************************************************************ #testing data set with torch.no_grad(): focus_true_pred_true =0 focus_false_pred_true =0 focus_true_pred_false =0 focus_false_pred_false =0 argmax_more_than_half = 0 argmax_less_than_half =0 for data in test_loader: inputs, labels , fore_idx = data #inputs,labels,fore_idx = inputs.to(device),labels.to(device),fore_idx.to(device) # print(inputs.shtorch.save(where.state_dict(),"model_epoch"+str(epoch)+".pt")ape,labels.shape) avg_inp,alphas = where(inputs) outputs = what(avg_inp) _, predicted = torch.max(outputs.data, 1) for j in range (batch): focus = torch.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): argmax_more_than_half +=1 else: argmax_less_than_half +=1 if(focus == fore_idx[j] and predicted[j] == labels[j]): focus_true_pred_true += 1 elif(focus != fore_idx[j] and predicted[j] == labels[j]): focus_false_pred_true +=1 elif(focus == fore_idx[j] and predicted[j] != labels[j]): focus_true_pred_false +=1 elif(focus != fore_idx[j] and predicted[j] != labels[j]): focus_false_pred_false +=1 col8.append(argmax_more_than_half) col9.append(argmax_less_than_half) col10.append(focus_true_pred_true) col11.append(focus_false_pred_true) col12.append(focus_true_pred_false) col13.append(focus_false_pred_false) #torch.save(where.state_dict(),"where_model_epoch"+str(epoch)+".pt") #torch.save(what.state_dict(),"what_model_epoch"+str(epoch)+".pt") print('Finished Training') #torch.save(where.state_dict(),"where_model_epoch"+str(nos_epochs)+".pt") #torch.save(what.state_dict(),"what_model_epoch"+str(epoch)+".pt") columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ] df_train = pd.DataFrame() df_test = pd.DataFrame() df_train[columns[0]] = col1 df_train[columns[1]] = col2 df_train[columns[2]] = col3 df_train[columns[3]] = col4 df_train[columns[4]] = col5 df_train[columns[5]] = col6 df_train[columns[6]] = col7 df_test[columns[0]] = col1 df_test[columns[1]] = col8 df_test[columns[2]] = col9 df_test[columns[3]] = col10 df_test[columns[4]] = col11 df_test[columns[5]] = col12 df_test[columns[6]] = col13 df_train plt.plot(col1,col2, label='argmax > 0.5') plt.plot(col1,col3, label='argmax < 0.5') plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("training data") plt.title("On Training set") plt.show() plt.plot(col1,col4, label ="focus_true_pred_true ") plt.plot(col1,col5, label ="focus_false_pred_true ") plt.plot(col1,col6, label ="focus_true_pred_false ") plt.plot(col1,col7, label ="focus_false_pred_false ") plt.title("On Training set") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("training data") plt.savefig("dist_4_train.png",bbox_inches="tight") plt.savefig("dist_4_train.pdf",bbox_inches="tight") plt.show() df_test plt.plot(col1,col8, label='argmax > 0.5') plt.plot(col1,col9, label='argmax < 0.5') plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("Testing data") plt.title("On Testing set") plt.show() plt.plot(col1,col10, label ="focus_true_pred_true ") plt.plot(col1,col11, label ="focus_false_pred_true ") plt.plot(col1,col12, label ="focus_true_pred_false ") plt.plot(col1,col13, label ="focus_false_pred_false ") plt.title("On Testing set") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("Testing data") plt.savefig("dist_4_test.png",bbox_inches="tight") plt.savefig("dist_4_test.pdf",bbox_inches="tight") plt.show() # where.state_dict()["linear1.weight"][:] = torch.Tensor(np.array([[ 0, -1]])) # where.state_dict()["linear1.bias"][:] = torch.Tensor(np.array([0])) for param in where.named_parameters(): print(param) # what.state_dict()["linear1.weight"][:] = torch.Tensor(np.array([[ 5, 0], # [0,5], # [ 0, 0]])) # what.state_dict()["linear1.bias"][:] = torch.Tensor(np.array([0, 0, 0])) # for param in what.named_parameters(): # print(param) xx,yy= np.meshgrid(np.arange(1.4,3,0.03),np.arange(-3.5,8.5,0.03)) X = np.concatenate((xx.reshape(-1,1),yy.reshape(-1,1)),axis=1) X = torch.Tensor(X).double() Y = where.helper(X) Y1 = what(X) X.shape,Y.shape X = X.detach().numpy() Y = Y[:,0].detach().numpy() fig = plt.figure(figsize=(6,6)) cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Y.reshape(xx.shape)) plt.xlabel("X1") plt.ylabel("X2") fig.colorbar(cs) for i in range(7): plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i)) #plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig("dist_4_fc.png")#,bbox_inches='tight') plt.savefig("dist_4_fc.pdf") Y1 = Y1.detach().numpy() Y1 = torch.softmax(torch.Tensor(Y1),dim=1) _,Z4= torch.max(Y1,1) Z1 = Y1[:,0] Z2 = Y1[:,1] #Z3 = Y1[:,2] np.unique(Z4) #fig = plt.figure(figsize=(6,6)) # plt.scatter(X[:,0],X[:,1],c=Z1) # plt.scatter(X[:,0],X[:,1],c=Z2) # plt.scatter(X[:,0],X[:,1],c=Z3) #cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Z1.reshape(xx.shape)) # #plt.colorbar(cs) # cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Z2.reshape(xx.shape)) # #plt.colorbar(cs) # cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Z3.reshape(xx.shape)) #plt.colorbar(cs) # plt.xlabel("X1") # plt.ylabel("X2") #ax.view_init(60,100) #plt.savefig("non_interpretable_class_2d.pdf",bbox_inches='tight') avrg = [] lbl = [] with torch.no_grad(): for i, data in enumerate(train_loader): inputs , labels , fore_idx = data avg_inp,alphas = where(inputs) avrg.append(avg_inp) lbl.append(labels.numpy()) avrg= np.concatenate(avrg,axis=0) lbl = np.concatenate(lbl,axis=0) indices = [] for i in range(6): print(i,sum(lbl==i)) indices.append(lbl==i) cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Z4.reshape(xx.shape)) for i in range(6): plt.scatter(avrg[indices[i],0],avrg[indices[i],1],label="class_"+str(i)) # plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) #plt.scatter(avrg[:,0],avrg[:,1],c="c") plt.savefig("dist_4_db.png",bbox_inches="tight") plt.savefig("dist_4_db.pdf",bbox_inches="tight") true = [] pred = [] acc= 0 for i, data in enumerate(train_loader): inputs , labels , fore_idx = data avg_inp,alphas = where(inputs) outputs = what(avg_inp) _, predicted = torch.max(outputs.data, 1) true.append(labels) pred.append(predicted) acc+=sum(predicted == labels) true = np.concatenate(true,axis=0) pred = np.concatenate(pred,axis=0) from sklearn.metrics import confusion_matrix confusion_matrix(true,pred) sum(true==pred)/3000 ###Output _____no_output_____
notebooks/reco-tut-vrr-01-data-ingestion.ipynb
###Markdown --- ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import nltk import re from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import LabelEncoder import cufflinks as cf nltk.download('wordnet') nltk.download('stopwords') df = pd.read_csv('./data/bronze/reviews.csv', na_values=' ') df.head() df.info() print('Unique counts:', df.nunique()) print('Kind of ratings:',df.Rating.unique()) #remove extra columns and keep the necessary ones for analysis df = pd.DataFrame(df.drop(['Bubble_Count', 'Review_Count'], axis=1)) #remove new line characters from variable columns df = df.replace(r'\n',' ', regex=True) #remove the numbers in the review column df.Reviews = df.Reviews.str.replace('\d+', '') #fills the rating and Review_counts variable missing values with the mean and median respectively df = df.fillna({'Rating': df.Rating.median(), 'Review_counts': df.Review_counts.mean()}) #drop all missing values df.dropna(axis=0, how='any', inplace=True) #reset index df.reset_index(drop=True, inplace=True) df.head() df.info() # Let's understand the two lists: reviews (text_train) and their labels (y_train) print("Type : ", type(df.Reviews)) print("Length of reviews: ", len(df.Reviews)) print("Review at index 6:\n ", df.Reviews[6]) print("Label of the review at Index 6: ", df.Rating[6]) ###Output Type : <class 'pandas.core.series.Series'> Length of reviews: 571 Review at index 6: vacation spot! There are three levels and so each family got their very own Label of the review at Index 6: 5.0 ###Markdown Vacation Rentals Reviews Word Count Distribution ###Code df['word_count'] = df['Reviews'].apply(lambda x: len(str(x).split())) reviews_lengths = list(df['word_count']) print("Number of descriptions:",len(reviews_lengths), "\nAverage word count", np.average(reviews_lengths), "\nMinimum word count", min(reviews_lengths), "\nMaximum word count", max(reviews_lengths)) cf.go_offline() cf.set_config_file(offline=False, world_readable=True) df['word_count'].iplot( kind='hist', bins = 50, linecolor='black', xTitle='word count', yTitle='count', title='Word Count Distribution in rental reviews') #Shows the average ratings of each city and province and the number of reviews obtained from each city #The higher number of ratings indicates what cities are getting more visits and in which province visits are more df.groupby(['Province', 'City']).agg({'Rating':'mean','Review_counts':'sum'}).sort_values(by= ['Province','Review_counts'], ascending=False) #Showing the best performing cities in terms of Review Ratings a = df.groupby(['City']).agg({'Rating':'mean','Review_counts':'sum'}).sort_values(by= ['Rating'], ascending=False) b = a[:10] #the 10 best performing cities b z = a.tail(10) #the 10 lowest performing cities z #Label Encoding for Categorical Target Variable lb = LabelEncoder() df['Rating'] = lb.fit_transform(df['Rating']) y = df.Rating Rating = df.Rating df.Rating.value_counts() !mkdir -p ./data/silver df.to_parquet('./data/silver/reviews.parquet.gzip', compression='gzip') ###Output _____no_output_____
sphinx/scikit-intro/source/xgboost.ipynb
###Markdown XGBoost[XGBoost](https://xgboost.readthedocs.io/), or `eXtreme Gradient Boosting`, is gradient boosting library. Although `scikit-learn` has several [boosting algorithms available](https://scikit-learn.org/stable/modules/classes.htmlmodule-sklearn.ensemble), XGBoost's implementations are parallelized and takes advantage of GPU computing. A few of the types of learners XGBoost has include gradient boosting for regression, classification and survival analysis (e.g. Accelerated Failure Time `AFT`). There are no shortages of boosting libraries; here's a few more.- [LightGBM](https://lightgbm.readthedocs.io)- [CatBoost](https://catboost.ai/) RegressionLet's see how XGBoost works on regression problems by first simulating data. ###Code import numpy as np import random from sklearn.datasets import make_regression random.seed(37) np.random.seed(37) X, y = make_regression(**{ 'n_samples': 1000, 'n_features': 10, 'n_targets': 1, 'bias': 5.3, 'random_state': 37 }) print(f'X shape = {X.shape}, y shape {y.shape}') ###Output X shape = (1000, 10), y shape (1000,) ###Markdown We will split the generated data into training and testing sets. ###Code from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=37) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) ###Output (800, 10) (800,) (200, 10) (200,) ###Markdown The `XGBRegressor` class is used to learn a boosted regression model. Note that the objective is specified to `reg:squarederror`. A list of objectives is [available](https://xgboost.readthedocs.io/en/latest/parameter.htmllearning-task-parameters). Here, we specify only 10 estimators. ###Code import xgboost as xgb model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=10, seed=37) model.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown We will measure the performance of the model using `mean absolute error` (MAE) and `root mean squared error` (RMSE). ###Code from sklearn.metrics import mean_squared_error, mean_absolute_error y_pred = model.predict(X_test) mae = mean_absolute_error(y_test, y_pred) rmse = np.sqrt(mean_squared_error(y_test, y_pred)) print(mae) print(rmse) ###Output 35.00964208044854 44.165107049077356 ###Markdown As you increase the number of estimators, the performance should increase, as measured by `MAE` and `RMSE`. There is a point of diminishing returns, however. ###Code import pandas as pd def get_performance(n_estimators): model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=n_estimators, seed=37) model.fit(X_train, y_train) y_pred = model.predict(X_test) mae = mean_absolute_error(y_test, y_pred) rmse = np.sqrt(mean_squared_error(y_test, y_pred)) return {'mae': mae, 'rmse': rmse} n_estimators = list(range(10, 101, 1)) results = pd.DataFrame([get_performance(n) for n in n_estimators], index=n_estimators) import matplotlib.pyplot as plt plt.style.use('ggplot') fig, axes = plt.subplots(1, 2, figsize=(15, 3)) _ = results.mae.plot(ax=axes[0]) _ = results.rmse.plot(ax=axes[1]) axes[0].set_title('XGBoost Regression Performance') axes[1].set_title('XGBoost Regression Performance') axes[0].set_ylabel('MAE') axes[1].set_ylabel('RMSE') plt.tight_layout() ###Output _____no_output_____ ###Markdown ClassificationLet's turn our attention to using XGBoost for classification by generating data for a classification problem. ###Code from sklearn.datasets import make_classification X, y = make_classification(**{ 'n_samples': 2000, 'n_features': 20, 'n_informative': 10, 'n_redundant': 0, 'n_repeated': 0, 'n_classes': 2, 'random_state': 37 }) print(f'X shape = {X.shape}, y shape {y.shape}') ###Output X shape = (2000, 20), y shape (2000,) ###Markdown We will split the data into training and testing sets. ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=37) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) ###Output (1600, 20) (1600,) (400, 20) (400,) ###Markdown The input data must be transformed from numpy arrays into `DMatrix`. ###Code dtrain = xgb.DMatrix(X_train, y_train) dtest = xgb.DMatrix(X_test, y_test) print(dtrain.num_row(), dtrain.num_col()) print(dtest.num_row(), dtest.num_col()) ###Output 1600 20 400 20 ###Markdown Now we are ready to learn a classification model with XGBoost. ###Code param = { 'max_depth':2, 'eta':1, 'objective':'binary:logistic', 'eval_metric': 'logloss', 'seed': 37 } num_round = 20 model = xgb.train(param, dtrain, num_round) ###Output _____no_output_____ ###Markdown We will measure the performance of the model using `Area Under the Curve` (the Receiver Operating Characteristic curve) and the `Average Precision Score`. ###Code from sklearn.metrics import roc_auc_score, average_precision_score y_pred = model.predict(dtest) auc = roc_auc_score(y_test, y_pred) aps = average_precision_score(y_test, y_pred) print(auc) print(aps) ###Output 0.951115116017121 0.9450649144817633 ###Markdown SurvivalLet's see how survival regression works with XGBoost. Let's sample some data. ###Code X, y = make_regression(**{ 'n_samples': 1000, 'n_features': 4, 'n_targets': 0, 'random_state': 37 }) coef = np.array([2.0, -1.0, 3.5, 4.4]) baseline = np.e / np.power(1 + np.e, 2.0) y = np.exp(-X.dot(coef)) * baseline print(f'X shape = {X.shape}, y shape {y.shape}, coef shape = {coef.shape}') ###Output X shape = (1000, 4), y shape (1000,), coef shape = (4,) ###Markdown Here, we will split the data into training and testing sets. ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=37) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) ###Output (800, 4) (800,) (200, 4) (200,) ###Markdown Let's turn the inputs into the appropriate format. Note that we do not supply the duration into the `DMatrix` but set the lower and upper bound using the `set_float_info()` method. ###Code dtrain = xgb.DMatrix(X_train) dtest = xgb.DMatrix(X_test) dtrain.set_float_info('label_lower_bound', y_train) dtest.set_float_info('label_lower_bound', y_test) dtrain.set_float_info('label_upper_bound', y_train) dtest.set_float_info('label_upper_bound', y_test) print(dtrain.num_row(), dtrain.num_col()) print(dtest.num_row(), dtest.num_col()) ###Output 800 4 200 4 ###Markdown The following shows how to specify a survival model with AFT. ###Code params = { 'objective': 'survival:aft', 'eval_metric': 'aft-nloglik', 'aft_loss_distribution': 'logistic', 'aft_loss_distribution_scale': 1.0, 'tree_method': 'exact', 'learning_rate': 0.05, 'max_depth': 5 } model = xgb.train(params, dtrain, num_boost_round=40, evals=[(dtrain, 'train')]) ###Output [0] train-aft-nloglik:2.58474 [1] train-aft-nloglik:2.13443 [2] train-aft-nloglik:1.76805 [3] train-aft-nloglik:1.51551 [4] train-aft-nloglik:1.30791 [5] train-aft-nloglik:1.15456 [6] train-aft-nloglik:1.01593 [7] train-aft-nloglik:0.90408 [8] train-aft-nloglik:0.80288 [9] train-aft-nloglik:0.71937 [10] train-aft-nloglik:0.64163 [11] train-aft-nloglik:0.57327 [12] train-aft-nloglik:0.51536 [13] train-aft-nloglik:0.46241 [14] train-aft-nloglik:0.41428 [15] train-aft-nloglik:0.37151 [16] train-aft-nloglik:0.33085 [17] train-aft-nloglik:0.29569 [18] train-aft-nloglik:0.26403 [19] train-aft-nloglik:0.23559 [20] train-aft-nloglik:0.20816 [21] train-aft-nloglik:0.18294 [22] train-aft-nloglik:0.16056 [23] train-aft-nloglik:0.13968 [24] train-aft-nloglik:0.11968 [25] train-aft-nloglik:0.10160 [26] train-aft-nloglik:0.08428 [27] train-aft-nloglik:0.06922 [28] train-aft-nloglik:0.05431 [29] train-aft-nloglik:0.04075 [30] train-aft-nloglik:0.02782 [31] train-aft-nloglik:0.01519 [32] train-aft-nloglik:0.00409 [33] train-aft-nloglik:-0.00611 [34] train-aft-nloglik:-0.01670 [35] train-aft-nloglik:-0.02580 [36] train-aft-nloglik:-0.03482 [37] train-aft-nloglik:-0.04267 [38] train-aft-nloglik:-0.05033 [39] train-aft-nloglik:-0.05770 ###Markdown Below, we evaluate the model using the [c-index](https://medium.com/analytics-vidhya/concordance-index-72298c11eac7). ###Code from itertools import combinations def get_status(p1, p2): x1, y1 = p1[0], p1[1] x2, y2 = p2[0], p2[1] r = (y2 - y1) * (x2 - x1) if r > 0: return 1 elif r < 0: return -1 else: return 0 y_pred = model.predict(dtest) pairs = combinations([(y_t, y_p) for y_t, y_p in zip(y_test, y_pred)], 2) results = [get_status(p1, p2) for p1, p2 in pairs] c = sum([1 for r in results if r == 1]) n = len(results) print(c / n) ###Output 0.9332160804020101 ###Markdown The c-index of the predictions from the training is as expected; it's higher than the testing set. ###Code y_pred = model.predict(dtrain) pairs = combinations([(y_t, y_p) for y_t, y_p in zip(y_train, y_pred)], 2) results = [get_status(p1, p2) for p1, p2 in pairs] c = sum([1 for r in results if r == 1]) n = len(results) print(c / n) ###Output 0.9664893617021276
notebooks/viewlims.ipynb
###Markdown ViewlimsCreates two identical panels. Zooming in on the right panel will showa rectangle in the first panel, denoting the zoomed region. ###Code import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle # We just subclass Rectangle so that it can be called with an Axes # instance, causing the rectangle to update its shape to match the # bounds of the Axes class UpdatingRect(Rectangle): def __call__(self, ax): self.set_bounds(*ax.viewLim.bounds) ax.figure.canvas.draw_idle() # A class that will regenerate a fractal set as we zoom in, so that you # can actually see the increasing detail. A box in the left panel will show # the area to which we are zoomed. class MandelbrotDisplay(object): def __init__(self, h=500, w=500, niter=50, radius=2., power=2): self.height = h self.width = w self.niter = niter self.radius = radius self.power = power def __call__(self, xstart, xend, ystart, yend): self.x = np.linspace(xstart, xend, self.width) self.y = np.linspace(ystart, yend, self.height).reshape(-1, 1) c = self.x + 1.0j * self.y threshold_time = np.zeros((self.height, self.width)) z = np.zeros(threshold_time.shape, dtype=complex) mask = np.ones(threshold_time.shape, dtype=bool) for i in range(self.niter): z[mask] = z[mask]**self.power + c[mask] mask = (np.abs(z) < self.radius) threshold_time += mask return threshold_time def ax_update(self, ax): ax.set_autoscale_on(False) # Otherwise, infinite loop # Get the number of points from the number of pixels in the window dims = ax.patch.get_window_extent().bounds self.width = int(dims[2] + 0.5) self.height = int(dims[2] + 0.5) # Get the range for the new area xstart, ystart, xdelta, ydelta = ax.viewLim.bounds xend = xstart + xdelta yend = ystart + ydelta # Update the image object with our new data and extent im = ax.images[-1] im.set_data(self.__call__(xstart, xend, ystart, yend)) im.set_extent((xstart, xend, ystart, yend)) ax.figure.canvas.draw_idle() md = MandelbrotDisplay() Z = md(-2., 0.5, -1.25, 1.25) fig1, (ax1, ax2) = plt.subplots(1, 2) ax1.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max())) ax2.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max())) rect = UpdatingRect([0, 0], 0, 0, facecolor='None', edgecolor='black', linewidth=1.0) rect.set_bounds(*ax2.viewLim.bounds) ax1.add_patch(rect) # Connect for changing the view limits ax2.callbacks.connect('xlim_changed', rect) ax2.callbacks.connect('ylim_changed', rect) ax2.callbacks.connect('xlim_changed', md.ax_update) ax2.callbacks.connect('ylim_changed', md.ax_update) ax2.set_title("Zoom here") plt.show() ###Output _____no_output_____
QuasiNewton/quasi_newton_en.ipynb
###Markdown Quasi Newton methods Newton method vs. GD (B.T. Polyak Introduction to optimization, Ch. 3, $\S$ 1 )Method | Convergence speed | Complexity | Affine invariance | Restrictions to $f(x)$:---: | :---: | :---: | :---: | :---GD | Global linear | $O(n) + $ step size search | No | Differentiable, Lipschits gradientNewton method | Local quadratic | $O(n^3) + $ step size search | Yes | Twice differentiable; Lipschitz and positive definite hessian How reduce computational and memory complexity?- Computational complexity can be reduced with - quasi Newton methods aka methods of variable metric - they require storing of $n \times n$ matrix - Computational **and** memory complexity can be reduced with - limited memory quasi Newton methods, e.g. [L-BFGS](https://en.wikipedia.org/wiki/Limited-memory_BFGS) (Limited Broyden-Fletcher-Goldfarb-Shanno) - they **do not** require storing of any matrtix - instead of matrix they require storing $k \ll n$ vectors from из $\mathbb{R}^n$ Unified approach to get Newton method and gradient descent- Gradient descent is obtained from the linear approximation (or quadratic upper bound):$$f_G(x) \approx f(y) + \langle f'(y), x - y \rangle + \frac{1}{2}(x-y)^{\top} \frac{1}{\alpha}I(x - y)$$where $\alpha \in (0, 1/L], f(x) \leq f_G(x)$, which means $f_G$ is global upper estimate of $f(x)$- Newton method is obtained from second order approximation$$f_N(x) \approx f(y) + \langle f'(y), x - y \rangle + \frac{1}{2} (x-y)^{\top}f''(y)(x-y)$$ **Idea:** use intermediate approximation in the form$$f_q(x) \approx f(y) + \langle f'(y), x - y \rangle + \frac{1}{2} (x-y)^{\top}{\color{red}{B(y)}}(x-y),$$which gives the following rule for update current point:$$x_{k+1} = x_k - \alpha_k B^{-1}_k f'(x_k) = x_k - \alpha_k H_k f'(x_k)$$ Some history...- The first quasi Newton method was proposed by physicist William Davidon in the middle of 1950s for accelerating of computations using unstable computers, which crashed before the calculation was finished- His paper about this method was not accepted for publication and it was just technical report more than 30 years- It was finally [published](http://epubs.siam.org/doi/abs/10.1137/0801001) in 1991 in the first volume of [SIAM Journal on Optimization](https://www.siam.org/journals/siopt.php) General scheme of quasi Newton methods```pythondef QuasiNewtonMethod(f, x0, epsilon, **kwargs): x = x0 H = I while True: h = -H.dot(grad_f(x)) if StopCriterion(x, f, h, **kwargs) < epsilon: break alpha = SelectStepSize(x, h, f, **kwargs) x = x + alpha * h H = UpdateH(H, f(x), grad_f(x)) return x``` How to find $B_{k+1}$?In the point $x_{k+1}$ the following relation holds:$$f_q(h) \approx f(x_{k+1}) + \langle f'(x_{k+1}), h \rangle + \frac{1}{2}h^{\top}B_{k+1}h$$From the definition follows that $B_{k+1} \in \mathbb{S}^n_{++}$.What are the natural restrictions for $f_q(h)$? $$f_q'(-\alpha_k h_k) = f'(x_k) \qquad f'_q(0) = f'(x_{k+1}),$$where the first equation gives$$f'(x_{k+1}) - \alpha_k B_{k+1}h_k = f'(x_k),$$and the second one holds by construction. Secant equationThe first equation gives$$B_{k+1}s_k = y_k,$$where $s_k = x_{k+1} - x_k$ and $y_k = f'(x_{k+1}) - f'(x_k)$.This equation has solution only if $s^{\top}_k y_k > 0$. Why? **Q:** does the relation between vectors $s_k$ and $y_k$ always hold?**Hint**: remember about Wolfe condition **Q:** is matrix $B_{k+1}$ unique? How define $B_{k+1}$ uniquely?\begin{align*}& \min_B \| B_k - B \| \\\text{s.t. } & B = B^{\top}\\& Bs_k = y_k\end{align*} DFP (Davidon-Fletcher-Powell)$$B_{k+1} = (I - \rho_k y_k s^{\top}_k)B_k(I - \rho_k s_ky^{\top}_k) + \rho_k y_k y^{\top}_k,$$where $\rho_k = \dfrac{1}{y^{\top}_k s_k}$,or with Sherman-Morrison-Woodbury formula$$B^{-1}_{k+1} = H_{k+1} = H_k - \dfrac{H_ky_k y_k^{\top}H_k}{y^{\top}_kH_ky_k} + \dfrac{s_ks^{\top}_k}{y^{\top}_ks_k}$$**Q:** what rank has difference between $B_{k+1} (H_{k+1})$ and $B_{k} (H_{k})$? SummaryGeneral idea of quasi Newton methods: instead of compute complete hessian in every iteration,update current hessian or its approximtion with easy computing transformations BFGS**Q:** what is natural modification of DFP method? \begin{align*}& \min_H \| H_k - H \| \\\text{s.t. } & H = H^{\top}\\& Hy_k = s_k\end{align*} Update equation for BFGS:$$H_{k+1} = (I - \rho_k s_ky^{\top}_k)H_k(I - \rho_k y_k s^{\top}_k) + \rho_k s_k s^{\top}_k,$$where $\rho_k = \dfrac{1}{y^{\top}_k s_k}$ Implementation details- No operations with complexity $O(n^3)$, e.g. exclude matrix by matrix multiplication and solving linear system (cf. [implementation in SciPy](https://github.com/scipy/scipy/blob/v0.18.1/scipy/optimize/optimize.pyL874-L976))- Only Wolfe rule guarantees that $y_k^{\top}s_k > 0$- Parameters in the Wolfe rule are usually the following - $\alpha_0 = 1$ is necessary for superlinear convergence - $\beta_1 = 10^{-4}$, $\beta_2 = 0.9$- Initialization of $H_0$ - identity matrix - $H_0 = \frac{y_0^{\top}s_0}{y_0^{\top}y_0}I$ **after** first step, but before update $H_1$. In computing $x_1$, $H_0 = I$ is used - $H_0 = \delta \|g_0\|^{-1}_2 I$, parameter $\delta$ is required in advance, $g_0$ is gradient in point $x_0$- While using $B$ instead of $H$, one has to store $B$ implicitly in the form of $LDL^{\top}$ decomposition and update this decomposition, not the matrix itself. This is performed for $O(n^2)$. Computing $h_k$ is equivalent to solving linear systen with factorized matrix, therefore it requires $O(n^2)$, too. This approach controls stability of the method with diagonal value of the matrix $D$. Practically standard choice is to use $H$ Convergence**Theorem**Let $f$ be twice continuously differentiable and its hessian is Lipschitz. Also assume that sequance generated by BFGS method converges to minimizer $x^*$ such that $$\sum_{k=1}^{\infty} \|x_k - x^*\| < \infty.$$Then $x_k \to x^*$ suprlinearly. Self-correcting property- If BFGS gives poor hessian approximation in some iteration, then after some iterations this faulty will be fixed **automatically**, i.e. the method corrects itself- This propety is active only in the case of appropriate step size selection, for example with the Wolfe rule- DFP method is significantly worse in correcting poor hessian approximation- This property is illustrated in the experiments below Limited-memory BFGS (L-BFGS)- BFGS requires not the matrix $H$, but function that computes prodcut of this matrix by antigradient - As far as we need local accurate hessian approximation in every iteration, old vectors $s$ and $y$ can decrease quality od hessian approximation**Idea**- Storing $k \ll n$ last vectors $s$ and $y$ - memory reduction from $n^2$ to $kn$- Computing matrix by vector product in the two-for-loop recursion manner without explicit forming matrix $H$ Relationship with non-linear conjugate gradient method- In the Hestens-Stiefel method$$h_{k+1} = -f'(x_{k+1}) + \beta_{k+1} h_{k}, \quad \beta_{k+1} = \frac{y_k^{\top}f'(x_{k+1})}{y_k^{\top} h_k}$$or$$h_{k+1} = -\left(I - \frac{s_k y_k^{\top}}{y_k^{\top}s_k}\right)f'(x_{k+1}) = -\hat{H}_{k+1} f'(x_{k+1})$$- Matrix $\hat{H}_{k+1}$ is nonsymmetric and not positive definite, but matrix$$H_{k+1} = \left(I - \frac{s_k y_k^{\top}}{y_k^{\top}s_k}\right)\left(I - \frac{y_k s_k^{\top}}{y_k^{\top}s_k}\right) + \frac{s_ks_k^{\top}}{y_k^{\top}s_k}$$satisfies all requirements on the matrix in BFGS and equation is equal to the update of $H_k$, if $H_k = I$, i.e. $k=1$ in L-BFGS and $H_0 = I$- Moreover, if the step size is selected with steepest descent method, Hestens-Stiefel formula and L-BFGS formula for $k=1$ are the same Barzilai-Borwein method- The first [paper](http://pages.cs.wisc.edu/~swright/726/handouts/barzilai-borwein.pdf) about this method was published in 1988, in the journal IMA Journal of Numerical Analysis- The [paper](http://papers.nips.cc/paper/6286-barzilai-borwein-step-size-for-stochastic-gradient-descent.pdf) from NIPS-2016 proposed its modification in the case of stochastic gradient estimation- Idea: combination of steepest descent method and quasi Newton method Method idea- Steepest descent: $x_{k+1} = x_k - \alpha_k f'(x_k)$, $\alpha_k = \arg \min\limits_{\alpha > 0} f(x_{k+1})$- Newton method $x_{k+1} = x_k - (f''(x_k))^{-1} f'(x_k)$- Approximate of the hessian with diagonal matrix$$\alpha_k f'(x_k) = \alpha_k I f'(x_k) = \left( \frac{1}{\alpha_k} I \right)^{-1} f'(x_k) \approx f''(x_k))^{-1} f'(x_k)$$- How to find $\alpha_k$? Secant equation again- For exact hessian $$f''(x_{k})(x_{k} - x_{k-1}) = f'(x_{k}) - f'(x_{k-1})$$- For approximate hessian$$\alpha_k^{-1} s_{k-1} \approx y_{k-1}$$- Problem of approximation one vector with scaling of the other vector- The simplest quasi Newton method is reduced to the method of step size selection Three ways to find $\alpha_k$- The first way - Problem $$ \min_{\beta} \|\beta s_{k-1} - y_{k-1} \|^2_2 $$ - Solution $$ \alpha = \frac{1}{\beta} = \frac{s^{\top}_{k-1} s_{k-1}}{s^{\top}_{k-1} y_{k-1}} $$ - The second way - Problem $$ \min_{\alpha} \| s_{k-1} - \alpha y_{k-1} \|^2_2 $$ - Solution $$ \alpha = \frac{s^{\top}_{k-1} y_{k-1}}{y^{\top}_{k-1} y_{k-1}} $$ - The third way is called non-monotone line search: specific mofifcation of the Armijo rule taking into account history of changing objective function values, more details see in the [paper](https://www.math.lsu.edu/~hozhang/papers/nonmonotone.pdf) 2004, in SIAM Journal on Optimization Experiments Analytical center of the linear inequality system$$f(x) = - \sum_{i=1}^m \log(1 - a_i^{\top}x) - \sum\limits_{i = 1}^n \log (1 - x^2_i) \to \min_x$$ ###Code import numpy as np import liboptpy.unconstr_solvers as methods import liboptpy.step_size as ss %matplotlib inline import matplotlib.pyplot as plt import scipy.optimize as scopt plt.rc("text", usetex=True) n = 3000 m = 100 x0 = np.zeros(n) max_iter = 100 tol = 1e-5 A = np.random.rand(m, n) * 10 f = lambda x: -np.sum(np.log(1 - A.dot(x))) - np.sum(np.log(1 - x*x)) grad_f = lambda x: np.sum(A.T / (1 - A.dot(x)), axis=1) + 2 * x / (1 - np.power(x, 2)) def bb_method(f, gradf, x0, tol=1e-6, maxiter=100, callback=None, alpha_type=1): it = 0 x_prev = x0.copy() current_tol = np.linalg.norm(gradf(x_prev)) alpha = 1e-4 while current_tol > tol and it < maxiter: it += 1 current_grad = gradf(x_prev) if it != 1: g = current_grad - prev_grad if alpha_type == 1: alpha = g.dot(s) / g.dot(g) elif alpha_type == 2: alpha = s.dot(s) / g.dot(s) if callback: callback(x_prev) x_next = x_prev - alpha * current_grad current_tol = np.linalg.norm(gradf(x_next)) prev_grad = current_grad s = x_next - x_prev x_prev = x_next if callback: callback(x_prev) return x_next method = { "BB 1": methods.fo.BarzilaiBorweinMethod(f, grad_f, init_alpha=1e-4, type=1), "BFGS": methods.fo.BFGS(f, grad_f), "DFP": methods.fo.DFP(f, grad_f), "LBFGS": methods.fo.LBFGS(f, grad_f), } for m in method: print("\t Method {}".format(m)) _ = method[m].solve(x0=x0, tol=tol, max_iter=max_iter, disp=True) print("\t Method BFGS Scipy") scopt_conv = [] scopt_res = scopt.minimize(f, x0, method="BFGS", jac=grad_f, callback=lambda x: scopt_conv.append(x), tol=tol, options={"maxiter": max_iter}) print("Result: {}".format(scopt_res.message)) if scopt_res.success: print("Convergence in {} iterations".format(scopt_res.nit)) print("Function value = {}".format(f(scopt_res.x))) plt.figure(figsize=(8, 6)) for m in method: plt.semilogy([np.linalg.norm(grad_f(x)) for x in method[m].get_convergence()], label=m) plt.semilogy([np.linalg.norm(grad_f(x)) for x in [x0] + scopt_conv], label="BFGS SciPy") plt.ylabel("$\|f'(x_k)\|_2$", fontsize=18) plt.xlabel("Number of iterations, $k$", fontsize=18) plt.legend(fontsize=18) plt.xticks(fontsize=18) _ = plt.yticks(fontsize=18) for m in method: print("\t Method {}".format(m)) %timeit method[m].solve(x0=x0, tol=tol, max_iter=max_iter) %timeit scopt.minimize(f, x0, method="BFGS", jac=grad_f, tol=tol, options={"maxiter": max_iter}) ###Output Method BB 1 6.03 ms ± 59.3 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) Method BFGS ###Markdown Ill-conditioned problem ###Code n = 50 D = np.arange(1, n+1) U = np.random.randn(n, n) U, _ = np.linalg.qr(U) A = U.dot(np.diag(D)).dot(U.T) b = np.random.randn(n) eig_vals = np.linalg.eigvals(A) print("Condition number = {}".format(np.max(eig_vals) / np.min(eig_vals))) f = lambda x: 0.5 * x.T.dot(A.dot(x)) - b.dot(x) gradf = lambda x: A.dot(x) - b x0 = np.random.randn(n) method = { "BB 1": methods.fo.BarzilaiBorweinMethod(f, gradf, init_alpha=1e-4, type=1), "BB 2": methods.fo.BarzilaiBorweinMethod(f, gradf, init_alpha=1e-4, type=2), "BFGS": methods.fo.BFGS(f, gradf), "DFP": methods.fo.DFP(f, gradf), "GD": methods.fo.GradientDescent(f, gradf, ss.ExactLineSearch4Quad(A, b)), "LBFGS": methods.fo.LBFGS(f, gradf, hist_size=10), } for m in method: print("\t Method {}".format(m)) _ = method[m].solve(x0=x0, tol=tol, max_iter=max_iter, disp=True) print("\t Method BFGS Scipy") scopt_conv = [] scopt_res = scopt.minimize(f, x0, method="BFGS", jac=gradf, callback=lambda x: scopt_conv.append(x), tol=tol, options={"maxiter": max_iter}) print("Result: {}".format(scopt_res.message)) if scopt_res.success: print("Convergence in {} iterations".format(scopt_res.nit)) print("Function value = {}".format(f(scopt_res.x))) plt.figure(figsize=(12, 8)) fontsize = 26 for m in method: plt.semilogy([np.linalg.norm(gradf(x)) for x in method[m].get_convergence()], label=m) plt.semilogy([np.linalg.norm(gradf(x)) for x in [x0] + scopt_conv], label='BFGS SciPy') plt.legend(fontsize=fontsize) plt.ylabel("$\|f'(x_k)\|_2$", fontsize=fontsize) plt.xlabel("Number of iterations, $k$", fontsize=fontsize) plt.xticks(fontsize=fontsize) _ = plt.yticks(fontsize=fontsize) ###Output _____no_output_____
ml/01-numpy/08-diagonals.ipynb
###Markdown 对角线这里,使用与之前不同的导入方法,使用numpy中的函数前,需要加上 np. ###Code import numpy as np a = np.array([11,21,31,12,22,32,13,23,33]) a.shape = 3,3 a ###Output _____no_output_____ ###Markdown 查看它的对角线元素: ###Code a.diagonal() ###Output _____no_output_____ ###Markdown 可以使用偏移来查看它的次对角线,正数表示右移,负数表示左移: ###Code a.diagonal(offset=1) a.diagonal(offset=-1) ###Output _____no_output_____ ###Markdown 可以使用花式索引来得到对角线 ###Code i = [0,1,2] a[i, i] ###Output _____no_output_____ ###Markdown 可以更新对角线的值: ###Code a[i, i] = 2 a ###Output _____no_output_____ ###Markdown 修改次对角线的值 ###Code i = np.array([0,1]) a[i, i + 1] = 1 a a[i + 1, i] = -1 a ###Output _____no_output_____
ReproduceDegScore/degscore_ensemble.ipynb
###Markdown Code to reproduce DegScore from Kaggle datasets ###Code def get_ensemble(sequence, n=1000): structs = sample_structures(sequence, n_samples=n, package='vienna') structs = [list(write_loop_assignments(s)) for s in structs] return np.array(structs) def encode_input(df, window_size=1, pad=10, seq=True, struct=True, ensemble_size=0): '''Creat input/output for regression model for predicting structure probing data. Inputs: dataframe (in EternaBench RDAT format) window_size: size of window (in one direction). so window_size=1 is a total window size of 3 pad: number of nucleotides at start to not include seq (bool): include sequence encoding struct (bool): include bpRNA structure encoding Outputs: Input array (n_samples x n_features): array of windowed input features feature_names (list): feature names ''' #MAX_LEN = 68 BASES = ['A','U','G','C'] STRUCTS = ['H','E','I','M','B','S'] inpts = [] labels = [] feature_kernel=[] if seq: feature_kernel.extend(BASES) if struct: feature_kernel.extend(STRUCTS) feature_names = ['%s_%d' % (k, val) for val in range(-1*window_size, window_size+1) for k in feature_kernel] for i, row in tqdm(df.iterrows(), desc='Encoding inputs', total=len(df)): MAX_LEN = len(row['sequence'])-39 #68 for RYOS-I arr = np.zeros([MAX_LEN,len(feature_kernel)]) if ensemble_size > 0: # stochastically sample ensemble ensemble = get_ensemble(row['sequence'], n=ensemble_size) else: # use MEA structure ensemble = np.array([list(row['predicted_loop_type'])]) for index in range(pad,MAX_LEN): ctr=0 #encode sequence if seq: for char in BASES: if row['sequence'][index]==char: arr[index,ctr]+=1 ctr+=1 if struct: loop_assignments = ''.join(ensemble[:,index]) for char in STRUCTS: prob = loop_assignments.count(char) / len(loop_assignments) arr[index,ctr]+=prob ctr+=1 # add zero padding to the side padded_arr = np.vstack([np.zeros([window_size,len(feature_kernel)]),arr[pad:], np.zeros([window_size,len(feature_kernel)])]) for index in range(pad,MAX_LEN): new_index = index+window_size-pad tmp = padded_arr[new_index-window_size:new_index+window_size+1] inpts.append(tmp.flatten()) labels.append('%s_%d' % (row['id'], index)) return np.array(inpts), feature_names, labels def encode_output(df, data_type='reactivity', pad=10): '''Creat input/output for regression model for predicting structure probing data. Inputs: dataframe (in EternaBench RDAT format) data_type: column name for degradation window_size: size of window (in one direction). so window_size=1 is a total window size of 3 pad: number of nucleotides at start to not include Outputs: output array (n_samples): array of reactivity values ''' #MAX_LEN = 68 outpts = [] labels = [] # output identity should be in form id_00073f8be_0 for i, row in df.iterrows(): MAX_LEN = len(row['sequence'])-39 for index in range(pad,MAX_LEN): outpts.append(row[data_type][index]) labels.append('%s_%d' % (row['id'], index)) return outpts, labels def encode_input_construct(df, window_size=1, pad=10, seq=True, struct=True, ensemble_size=0): '''Creat input/output for regression model for predicting structure probing data. Inputs: dataframe (in EternaBench RDAT format) window_size: size of window (in one direction). so window_size=1 is a total window size of 3 pad: number of nucleotides at start to not include seq (bool): include sequence encoding struct (bool): include bpRNA structure encoding Outputs: Input array (n_samples x n_features): array of windowed input features feature_names (list): feature names ''' #MAX_LEN = 68 BASES = ['A','U','G','C'] STRUCTS = ['H','E','I','M','B','S'] inpts = [] labels = [] feature_kernel=[] if seq: feature_kernel.extend(BASES) if struct: feature_kernel.extend(STRUCTS) feature_names = ['%s_%d' % (k, val) for val in range(-1*window_size, window_size+1) for k in feature_kernel] for i, row in tqdm(df.iterrows(), desc='Encoding inputs', total=len(df)): MAX_LEN = len(row['sequence'])-39 #68 for RYOS-I arr = np.zeros([MAX_LEN,len(feature_kernel)]) if ensemble_size > 0: # stochastically sample ensemble ensemble = get_ensemble(row['sequence'], n=ensemble_size) else: # use MEA structure ensemble = np.array([list(row['predicted_loop_type'])]) for index in range(pad,MAX_LEN): ctr=0 #encode sequence if seq: for char in BASES: if row['sequence'][index]==char: arr[index,ctr]+=1 ctr+=1 if struct: loop_assignments = ''.join(ensemble[:,index]) for char in STRUCTS: prob = loop_assignments.count(char) / len(loop_assignments) arr[index,ctr]+=prob ctr+=1 # add zero padding to the side padded_arr = np.vstack([np.zeros([window_size,len(feature_kernel)]),arr[pad:], np.zeros([window_size,len(feature_kernel)])]) tmp_construct_holder = np.zeros((window_size*2+1)*len(feature_kernel)) for index in range(pad,MAX_LEN): new_index = index+window_size-pad tmp = padded_arr[new_index-window_size:new_index+window_size+1] tmp_construct_holder = tmp_construct_holder+tmp.flatten() labels.append('%s_%d' % (row['id'], index)) inpts.append(tmp_construct_holder) return np.array(inpts), feature_names, labels def encode_output_construct(df, data_type='reactivity', pad=10): '''Creat input/output for regression model for predicting structure probing data. Inputs: dataframe (in EternaBench RDAT format) data_type: column name for degradation window_size: size of window (in one direction). so window_size=1 is a total window size of 3 pad: number of nucleotides at start to not include Outputs: output array (n_samples): array of reactivity values ''' #MAX_LEN = 68 outpts = [] labels = [] # output identity should be in form id_00073f8be_0 for i, row in df.iterrows(): MAX_LEN = len(row['sequence'])-39 outpts.append(np.sum([row[data_type][index] for index in range(pad,MAX_LEN)])) return outpts, labels ###Output _____no_output_____ ###Markdown Load data ###Code kaggle_train = pd.read_json('train.json',lines=True) kaggle_train = kaggle_train.loc[kaggle_train['SN_filter']==1] kaggle_test = pd.read_json('test.json',lines=True) #kaggle_test = pd.read_csv('test_labels.csv') ###Output _____no_output_____ ###Markdown Encode data Max. expected accuracy ###Code mea_inputs_train, mea_feature_names, _ = encode_input(kaggle_train, window_size=2) mea_inputs_test, _, mea_test_labels = encode_input(kaggle_test, window_size=2) mea_inputs_train_construct, mea_feature_names, _ = encode_input_construct(kaggle_train, window_size=12) #mea_inputs_test, _, mea_test_labels = encode_input(kaggle_test, window_size=12) ###Output _____no_output_____ ###Markdown Ensemble-averaged encoding ###Code ensemble_size = 100 ens_inputs_train, ens_feature_names, _ = encode_input(kaggle_train, window_size=12, ensemble_size=ensemble_size) ens_inputs_test, _, ens_test_labels = encode_input(kaggle_test, window_size=12, ensemble_size=ensemble_size) ###Output _____no_output_____ ###Markdown Visualize two encodings for an example nucleotide ###Code figure(figsize=(10,4)) subplot(1,2,1) title('MFE encoding') imshow(np.array(mea_inputs_train[33].reshape(25,10)).T,cmap='gist_heat_r') yticks(range(10), ['A','U','G','C','H','E','I','M','B','S']) xlabel('window position') subplot(1,2,2) title('ensemble encoding') imshow(np.array(ens_inputs_train[33].reshape(25,10)).T,cmap='gist_heat_r') yticks(range(10), ['A','U','G','C','H','E','I','M','B','S']) xlabel('window position') ###Output _____no_output_____ ###Markdown Setup kaggle submissions ###Code sample_submission = pd.read_csv('sample_submission.csv.zip') mask = sample_submission['id_seqpos'].isin(mea_test_labels) ens_sample_submission = pd.read_csv('sample_submission.csv.zip') mask = ens_sample_submission['id_seqpos'].isin(ens_test_labels) ###Output _____no_output_____ ###Markdown Train models MEA models ###Code mea_models = {} for output_type in ['deg_Mg_pH10']: #['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C','deg_50C']: mea_outputs_train, mea_outputs_labels = encode_output(kaggle_train, data_type=output_type) #mea_outputs_train, mea_outputs_labels = encode_output_construct(kaggle_train, data_type=output_type) # Clip negative values to 0 #mea_outputs_train = np.clip(mea_outputs_train, -1, 100) reg = Ridge(alpha=0.15, fit_intercept=False) print('Fitting %s ...' % output_type) #reg.fit(mea_inputs_train_construct, mea_outputs_train) reg.fit(mea_inputs_train, mea_outputs_train) mea_models[output_type] = reg # test inputs, add to sample submission df test_prediction = reg.predict(mea_inputs_test) #sample_submission.loc[mask, output_type] = test_prediction ','.join(["%.6f" % x for x in reg.coef_]) mdl = DegScore('AAA') mdl.__dict__ figure(figsize=(20,5)) imshow(np.vstack([x[:68] for x in kaggle_train['deg_Mg_pH10']]).T, aspect=10,cmap='RdYlBu_r') colorbar(fraction=0.01) ###Output _____no_output_____ ###Markdown Ensemble-averaged models ###Code ens_models = {} for output_type in ['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C','deg_50C']: ens_outputs_train, ens_outputs_labels = encode_output(kaggle_train, data_type=output_type) # Clip negative values to 0 ens_outputs_train = np.clip(ens_outputs_train, 0, 100) reg = Ridge(alpha=0.15) print('Fitting %s ...' % output_type) reg.fit(ens_inputs_train, ens_outputs_train) ens_models[output_type] = reg # test inputs, add to sample submission df test_prediction = reg.predict(ens_inputs_test) ens_sample_submission.loc[mask, output_type] = test_prediction # save to csv sample_submission.to_csv('test_mea_code_jan28.csv', index=False) ens_sample_submission.to_csv('test_ens_code_jan28.csv', index=False) ###Output _____no_output_____ ###Markdown Using the existing class that contains "DegScore-2.1" ###Code def score_mcrmse(models, test_df): scored_data_types = ['reactivity', 'deg_Mg_pH10', 'deg_Mg_50C'] seq_scores = [] for i, row in test_df.iterrows(): scores = [] for data_type in scored_data_types: model = models[data_type] coeffs = model.coef_ inter = model.intercept_ mdl = DegScore.DegScore(row['sequence'], structure=row['structure'], coeffs=coeffs, intercept=inter) seq_scored = row['seq_scored'] pred = mdl.degscore_by_position[:seq_scored] target = np.array(json.loads(row[data_type])[:seq_scored]) scores.extend(np.square(pred - target)) seq_scores.append(scores) return np.mean(seq_scores) score_mcrmse(mea_models, kaggle_test) score_mcrmse(ens_models, kaggle_test) ###Output _____no_output_____
udacity-dlnd-project-4/dlnd_language_translation.ipynb
###Markdown Language TranslationIn this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French. Get the DataSince translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import problem_unittests as tests source_path = 'data/small_vocab_en' target_path = 'data/small_vocab_fr' source_text = helper.load_data(source_path) target_text = helper.load_data(target_path) ###Output _____no_output_____ ###Markdown Explore the DataPlay around with view_sentence_range to view different parts of the data. ###Code view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()}))) sentences = source_text.split('\n') word_counts = [len(sentence.split()) for sentence in sentences] print('Number of sentences: {}'.format(len(sentences))) print('Average number of words in a sentence: {}'.format(np.average(word_counts))) print() print('English sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) print() print('French sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) ###Output Dataset Stats Roughly the number of unique words: 227 Number of sentences: 137861 Average number of words in a sentence: 13.225277634719028 English sentences 0 to 10: new jersey is sometimes quiet during autumn , and it is snowy in april . the united states is usually chilly during july , and it is usually freezing in november . california is usually quiet during march , and it is usually hot in june . the united states is sometimes mild during june , and it is cold in september . your least liked fruit is the grape , but my least liked is the apple . his favorite fruit is the orange , but my favorite is the grape . paris is relaxing during december , but it is usually chilly in july . new jersey is busy during spring , and it is never hot in march . our least liked fruit is the lemon , but my least liked is the grape . the united states is sometimes busy during january , and it is sometimes warm in november . French sentences 0 to 10: new jersey est parfois calme pendant l' automne , et il est neigeux en avril . les états-unis est généralement froid en juillet , et il gèle habituellement en novembre . california est généralement calme en mars , et il est généralement chaud en juin . les états-unis est parfois légère en juin , et il fait froid en septembre . votre moins aimé fruit est le raisin , mais mon moins aimé est la pomme . son fruit préféré est l'orange , mais mon préféré est le raisin . paris est relaxant en décembre , mais il est généralement froid en juillet . new jersey est occupé au printemps , et il est jamais chaude en mars . notre fruit est moins aimé le citron , mais mon moins aimé est le raisin . les états-unis est parfois occupé en janvier , et il est parfois chaud en novembre . ###Markdown Implement Preprocessing Function Text to Word IdsAs you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function `text_to_ids()`, you'll turn `source_text` and `target_text` from words to ids. However, you need to add the `` word id at the end of `target_text`. This will help the neural network predict when the sentence should end.You can get the `` word id by doing:```pythontarget_vocab_to_int['']```You can get other word ids using `source_vocab_to_int` and `target_vocab_to_int`. ###Code def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int): """ Convert source and target text to proper word ids :param source_text: String that contains all the source text. :param target_text: String that contains all the target text. :param source_vocab_to_int: Dictionary to go from the source words to an id :param target_vocab_to_int: Dictionary to go from the target words to an id :return: A tuple of lists (source_id_text, target_id_text) """ source = [[source_vocab_to_int[w] for w in s.split()] for s in source_text.split('\n')] target = [[target_vocab_to_int[w] for w in (t + ' <EOS>').split()] for t in target_text.split('\n')] return source, target """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_text_to_ids(text_to_ids) ###Output Tests Passed ###Markdown Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ helper.preprocess_and_save_data(source_path, target_path, text_to_ids) ###Output _____no_output_____ ###Markdown Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np import helper import problem_unittests as tests (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() ###Output _____no_output_____ ###Markdown Check the Version of TensorFlow and Access to GPUThis will check to make sure you have the correct version of TensorFlow and access to a GPU ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf from tensorflow.python.layers.core import Dense # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) ###Output TensorFlow Version: 1.3.0 Default GPU Device: /gpu:0 ###Markdown Build the Neural NetworkYou'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:- `model_inputs`- `process_decoder_input`- `encoding_layer`- `decoding_layer_train`- `decoding_layer_infer`- `decoding_layer`- `seq2seq_model` InputImplement the `model_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.- Targets placeholder with rank 2.- Learning rate placeholder with rank 0.- Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.- Target sequence length placeholder named "target_sequence_length" with rank 1- Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0.- Source sequence length placeholder named "source_sequence_length" with rank 1Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) ###Code def model_inputs(): """ Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences. :return: Tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) """ inputs = tf.placeholder(tf.int32, [None, None], name='input') targets = tf.placeholder(tf.int32, [None, None], name='target') learning_rate = tf.placeholder(tf.float32, name='learning_rate') keep_prob = tf.placeholder(tf.float32, name='keep_prob') target_sequence_length = tf.placeholder(tf.int32, shape=[None], name='target_sequence_length') max_target_seq_len = tf.reduce_max(target_sequence_length, name='max_target_seq_len') source_sequence_length = tf.placeholder(tf.int32, shape=[None], name='source_sequence_length') return inputs, targets, learning_rate, keep_prob, target_sequence_length, max_target_seq_len, source_sequence_length """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_model_inputs(model_inputs) ###Output ERROR:tensorflow:================================== Object was never used (type <class 'tensorflow.python.framework.ops.Operation'>): <tf.Operation 'assert_rank_2/Assert/Assert' type=Assert> If you want to mark it as used call its "mark_used()" method. It was originally created here: ['File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main\n "__main__", mod_spec)', 'File "/usr/lib/python3.5/runpy.py", line 85, in _run_code\n exec(code, run_globals)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel_launcher.py", line 16, in <module>\n app.launch_new_instance()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/traitlets/config/application.py", line 658, in launch_instance\n app.start()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/kernelapp.py", line 477, in start\n ioloop.IOLoop.instance().start()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/zmq/eventloop/ioloop.py", line 177, in start\n super(ZMQIOLoop, self).start()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tornado/ioloop.py", line 888, in start\n handler_func(fd_obj, events)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tornado/stack_context.py", line 277, in null_wrapper\n return fn(*args, **kwargs)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events\n self._handle_recv()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv\n self._run_callback(callback, msg)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback\n callback(*args, **kwargs)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tornado/stack_context.py", line 277, in null_wrapper\n return fn(*args, **kwargs)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher\n return self.dispatch_shell(stream, msg)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell\n handler(stream, idents, msg)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 399, in execute_request\n user_expressions, allow_stdin)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/ipkernel.py", line 196, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/zmqshell.py", line 533, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2698, in run_cell\n interactivity=interactivity, compiler=compiler, result=result)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2808, in run_ast_nodes\n if self.run_code(code, result):', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2862, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)', 'File "<ipython-input-7-383563ce154c>", line 21, in <module>\n tests.test_model_inputs(model_inputs)', 'File "/home/cenk/Documents/udacity-dlnd-project-4/problem_unittests.py", line 106, in test_model_inputs\n assert tf.assert_rank(lr, 0, message=\'Learning Rate has wrong rank\')', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/ops/check_ops.py", line 617, in assert_rank\n dynamic_condition, data, summarize)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/ops/check_ops.py", line 571, in _assert_rank_condition\n return control_flow_ops.Assert(condition, data, summarize=summarize)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/util/tf_should_use.py", line 175, in wrapped\n return _add_should_use_warning(fn(*args, **kwargs))', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/util/tf_should_use.py", line 144, in _add_should_use_warning\n wrapped = TFShouldUseWarningWrapper(x)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/util/tf_should_use.py", line 101, in __init__\n stack = [s.strip() for s in traceback.format_stack()]'] ================================== ERROR:tensorflow:================================== Object was never used (type <class 'tensorflow.python.framework.ops.Operation'>): <tf.Operation 'assert_rank_3/Assert/Assert' type=Assert> If you want to mark it as used call its "mark_used()" method. It was originally created here: ['File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main\n "__main__", mod_spec)', 'File "/usr/lib/python3.5/runpy.py", line 85, in _run_code\n exec(code, run_globals)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel_launcher.py", line 16, in <module>\n app.launch_new_instance()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/traitlets/config/application.py", line 658, in launch_instance\n app.start()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/kernelapp.py", line 477, in start\n ioloop.IOLoop.instance().start()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/zmq/eventloop/ioloop.py", line 177, in start\n super(ZMQIOLoop, self).start()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tornado/ioloop.py", line 888, in start\n handler_func(fd_obj, events)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tornado/stack_context.py", line 277, in null_wrapper\n return fn(*args, **kwargs)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events\n self._handle_recv()', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv\n self._run_callback(callback, msg)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback\n callback(*args, **kwargs)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tornado/stack_context.py", line 277, in null_wrapper\n return fn(*args, **kwargs)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher\n return self.dispatch_shell(stream, msg)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell\n handler(stream, idents, msg)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 399, in execute_request\n user_expressions, allow_stdin)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/ipkernel.py", line 196, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/ipykernel/zmqshell.py", line 533, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2698, in run_cell\n interactivity=interactivity, compiler=compiler, result=result)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2808, in run_ast_nodes\n if self.run_code(code, result):', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2862, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)', 'File "<ipython-input-7-383563ce154c>", line 21, in <module>\n tests.test_model_inputs(model_inputs)', 'File "/home/cenk/Documents/udacity-dlnd-project-4/problem_unittests.py", line 107, in test_model_inputs\n assert tf.assert_rank(keep_prob, 0, message=\'Keep Probability has wrong rank\')', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/ops/check_ops.py", line 617, in assert_rank\n dynamic_condition, data, summarize)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/ops/check_ops.py", line 571, in _assert_rank_condition\n return control_flow_ops.Assert(condition, data, summarize=summarize)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/util/tf_should_use.py", line 175, in wrapped\n return _add_should_use_warning(fn(*args, **kwargs))', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/util/tf_should_use.py", line 144, in _add_should_use_warning\n wrapped = TFShouldUseWarningWrapper(x)', 'File "/home/cenk/.virtualenvs/pytorch/lib/python3.5/site-packages/tensorflow/python/util/tf_should_use.py", line 101, in __init__\n stack = [s.strip() for s in traceback.format_stack()]'] ================================== ###Markdown Process Decoder InputImplement `process_decoder_input` by removing the last word id from each batch in `target_data` and concat the GO ID to the begining of each batch. ###Code def process_decoder_input(target_data, target_vocab_to_int, batch_size): """ Preprocess target data for encoding :param target_data: Target Placehoder :param target_vocab_to_int: Dictionary to go from the target words to an id :param batch_size: Batch Size :return: Preprocessed target data """ ends = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) return tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ends], 1) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_process_encoding_input(process_decoder_input) ###Output Tests Passed ###Markdown EncodingImplement `encoding_layer()` to create a Encoder RNN layer: * Embed the encoder input using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence) * Construct a [stacked](https://github.com/tensorflow/tensorflow/blob/6947f65a374ebf29e74bb71e36fd82760056d82c/tensorflow/docs_src/tutorials/recurrent.mdstacking-multiple-lstms) [`tf.contrib.rnn.LSTMCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/LSTMCell) wrapped in a [`tf.contrib.rnn.DropoutWrapper`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/DropoutWrapper) * Pass cell and embedded input to [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) ###Code from imp import reload reload(tests) def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size): """ Create encoding layer :param rnn_inputs: Inputs for the RNN :param rnn_size: RNN Size :param num_layers: Number of layers :param keep_prob: Dropout keep probability :param source_sequence_length: a list of the lengths of each sequence in the batch :param source_vocab_size: vocabulary size of source data :param encoding_embedding_size: embedding size of source data :return: tuple (RNN output, RNN state) """ embedding_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size) cell = lambda x: tf.contrib.rnn.DropoutWrapper( tf.contrib.rnn.LSTMCell(x, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=3)) , output_keep_prob=keep_prob) multi_cell = tf.contrib.rnn.MultiRNNCell([cell(rnn_size) for _ in range(num_layers)]) return tf.nn.dynamic_rnn(multi_cell, embedding_input, sequence_length=source_sequence_length, dtype=tf.float32) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_encoding_layer(encoding_layer) ###Output Tests Passed ###Markdown Decoding - TrainingCreate a training decoding layer:* Create a [`tf.contrib.seq2seq.TrainingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/TrainingHelper) * Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)* Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode) ###Code def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_summary_length, output_layer, keep_prob): """ Create a decoding layer for training :param encoder_state: Encoder State :param dec_cell: Decoder RNN Cell :param dec_embed_input: Decoder embedded input :param target_sequence_length: The lengths of each sequence in the target batch :param max_summary_length: The length of the longest sequence in the batch :param output_layer: Function to apply the output layer :param keep_prob: Dropout keep probability :return: BasicDecoderOutput containing training logits and sample_id """ with tf.variable_scope("dec"): training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input, sequence_length=target_sequence_length, time_major=False) training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, encoder_state, output_layer) training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder, impute_finished=True, maximum_iterations=max_summary_length) return training_decoder_output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer_train(decoding_layer_train) ###Output Tests Passed ###Markdown Decoding - InferenceCreate inference decoder:* Create a [`tf.contrib.seq2seq.GreedyEmbeddingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/GreedyEmbeddingHelper)* Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)* Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode) ###Code def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob): """ Create a decoding layer for inference :param encoder_state: Encoder state :param dec_cell: Decoder RNN Cell :param dec_embeddings: Decoder embeddings :param start_of_sequence_id: GO ID :param end_of_sequence_id: EOS Id :param max_target_sequence_length: Maximum length of target sequences :param vocab_size: Size of decoder/target vocabulary :param decoding_scope: TenorFlow Variable Scope for decoding :param output_layer: Function to apply the output layer :param batch_size: Batch size :param keep_prob: Dropout keep probability :return: BasicDecoderOutput containing inference logits and sample_id """ with tf.variable_scope("dec") : start_tokens = tf.tile(tf.constant([start_of_sequence_id],dtype=tf.int32), [batch_size], name='start_tokens') inf_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, start_tokens, end_of_sequence_id) inf_dec = tf.contrib.seq2seq.BasicDecoder(dec_cell, inf_helper, encoder_state, output_layer) dec_out, _, _ = tf.contrib.seq2seq.dynamic_decode(inf_dec, impute_finished=True, maximum_iterations=max_target_sequence_length) return dec_out """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer_infer(decoding_layer_infer) ###Output Tests Passed ###Markdown Build the Decoding LayerImplement `decoding_layer()` to create a Decoder RNN layer.* Embed the target sequences* Construct the decoder LSTM cell (just like you constructed the encoder cell above)* Create an output layer to map the outputs of the decoder to the elements of our vocabulary* Use the your `decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob)` function to get the training logits.* Use your `decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob)` function to get the inference logits.Note: You'll need to use [tf.variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope) to share variables between training and inference. ###Code def decoding_layer(dec_input, encoder_state, target_sequence_length, max_target_sequence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, decoding_embedding_size): """ Create decoding layer :param dec_input: Decoder input :param encoder_state: Encoder state :param target_sequence_length: The lengths of each sequence in the target batch :param max_target_sequence_length: Maximum length of target sequences :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :param target_vocab_size: Size of target vocabulary :param batch_size: The size of the batch :param keep_prob: Dropout keep probability :param decoding_embedding_size: Decoding embedding size :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size])) embed_input = tf.nn.embedding_lookup(embeddings, dec_input) cell = lambda x: tf.contrib.rnn.LSTMCell(x, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=3)) multi_cell = tf.contrib.rnn.MultiRNNCell([cell(rnn_size) for _ in range(num_layers)]) output_layer = Dense(target_vocab_size, kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1)) with tf.variable_scope("dec"): training_logits = decoding_layer_train(encoder_state, multi_cell, embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) vocab_size = len(target_vocab_to_int) with tf.variable_scope("dec", reuse=True): inference_logits = decoding_layer_infer(encoder_state, multi_cell, embeddings, target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob) return training_logits, inference_logits """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer(decoding_layer) ###Output Tests Passed ###Markdown Build the Neural NetworkApply the functions you implemented above to:- Encode the input using your `encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size)`.- Process target data using your `process_decoder_input(target_data, target_vocab_to_int, batch_size)` function.- Decode the encoded input using your `decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size)` function. ###Code def seq2seq_model(input_data, target_data, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sentence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int): """ Build the Sequence-to-Sequence part of the neural network :param input_data: Input placeholder :param target_data: Target placeholder :param keep_prob: Dropout keep probability placeholder :param batch_size: Batch Size :param source_sequence_length: Sequence Lengths of source sequences in the batch :param target_sequence_length: Sequence Lengths of target sequences in the batch :param source_vocab_size: Source vocabulary size :param target_vocab_size: Target vocabulary size :param enc_embedding_size: Decoder embedding size :param dec_embedding_size: Encoder embedding size :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ _, state = encoding_layer(input_data, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, enc_embedding_size) inp = process_decoder_input(target_data, target_vocab_to_int, batch_size) # Pass encoder state and decoder inputs to the decoders dec_output, inf_dec_output = decoding_layer(inp, state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) return dec_output, inf_dec_output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_seq2seq_model(seq2seq_model) ###Output Tests Passed ###Markdown Neural Network Training HyperparametersTune the following parameters:- Set `epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `num_layers` to the number of layers.- Set `encoding_embedding_size` to the size of the embedding for the encoder.- Set `decoding_embedding_size` to the size of the embedding for the decoder.- Set `learning_rate` to the learning rate.- Set `keep_probability` to the Dropout keep probability- Set `display_step` to state how many steps between each debug output statement ###Code # Number of Epochs epochs = 8 # Batch Size batch_size = 256 # RNN Size rnn_size = 512 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 256 decoding_embedding_size = 256 # Learning Rate learning_rate = 0.001 # Dropout Keep Probability keep_probability = 0.9 display_step = 100 ###Output _____no_output_____ ###Markdown Build the GraphBuild the graph using the neural network you implemented. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ save_path = 'checkpoints/dev' (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() max_target_sentence_length = max([len(sentence) for sentence in source_int_text]) train_graph = tf.Graph() with train_graph.as_default(): input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs() #sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length') input_shape = tf.shape(input_data) train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sequence_length, len(source_vocab_to_int), len(target_vocab_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int) training_logits = tf.identity(train_logits.rnn_output, name='logits') inference_logits = tf.identity(inference_logits.sample_id, name='predictions') masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( training_logits, targets, masks) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) ###Output _____no_output_____ ###Markdown Batch and pad the source and target sequences ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ def pad_sentence_batch(sentence_batch, pad_int): """Pad sentences with <PAD> so that each sentence of a batch has the same length""" max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int): """Batch targets, sources, and the lengths of their sentences together""" for batch_i in range(0, len(sources)//batch_size): start_i = batch_i * batch_size # Slice the right amount for the batch sources_batch = sources[start_i:start_i + batch_size] targets_batch = targets[start_i:start_i + batch_size] # Pad pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # Need the lengths for the _lengths parameters pad_targets_lengths = [] for target in pad_targets_batch: pad_targets_lengths.append(len(target)) pad_source_lengths = [] for source in pad_sources_batch: pad_source_lengths.append(len(source)) yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths ###Output _____no_output_____ ###Markdown TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ def get_accuracy(target, logits): """ Calculate accuracy """ max_seq = max(target.shape[1], logits.shape[1]) if max_seq - target.shape[1]: target = np.pad( target, [(0,0),(0,max_seq - target.shape[1])], 'constant') if max_seq - logits.shape[1]: logits = np.pad( logits, [(0,0),(0,max_seq - logits.shape[1])], 'constant') return np.mean(np.equal(target, logits)) # Split data to training and validation sets train_source = source_int_text[batch_size:] train_target = target_int_text[batch_size:] valid_source = source_int_text[:batch_size] valid_target = target_int_text[:batch_size] (valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source, valid_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(epochs): for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate( get_batches(train_source, train_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])): _, loss = sess.run( [train_op, cost], {input_data: source_batch, targets: target_batch, lr: learning_rate, target_sequence_length: targets_lengths, source_sequence_length: sources_lengths, keep_prob: keep_probability}) if batch_i % display_step == 0 and batch_i > 0: batch_train_logits = sess.run( inference_logits, {input_data: source_batch, source_sequence_length: sources_lengths, target_sequence_length: targets_lengths, keep_prob: 1.0}) batch_valid_logits = sess.run( inference_logits, {input_data: valid_sources_batch, source_sequence_length: valid_sources_lengths, target_sequence_length: valid_targets_lengths, keep_prob: 1.0}) train_acc = get_accuracy(target_batch, batch_train_logits) valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits) print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}' .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_path) print('Model Trained and Saved') ###Output Epoch 0 Batch 100/538 - Train Accuracy: 0.5338, Validation Accuracy: 0.5595, Loss: 1.0015 Epoch 0 Batch 200/538 - Train Accuracy: 0.6406, Validation Accuracy: 0.6303, Loss: 0.5681 Epoch 0 Batch 300/538 - Train Accuracy: 0.7976, Validation Accuracy: 0.7939, Loss: 0.2803 Epoch 0 Batch 400/538 - Train Accuracy: 0.8958, Validation Accuracy: 0.8699, Loss: 0.1396 Epoch 0 Batch 500/538 - Train Accuracy: 0.9343, Validation Accuracy: 0.9192, Loss: 0.0604 Epoch 1 Batch 100/538 - Train Accuracy: 0.9379, Validation Accuracy: 0.9402, Loss: 0.0486 Epoch 1 Batch 200/538 - Train Accuracy: 0.9570, Validation Accuracy: 0.9403, Loss: 0.0346 Epoch 1 Batch 300/538 - Train Accuracy: 0.9524, Validation Accuracy: 0.9585, Loss: 0.0359 Epoch 1 Batch 400/538 - Train Accuracy: 0.9593, Validation Accuracy: 0.9551, Loss: 0.0321 Epoch 1 Batch 500/538 - Train Accuracy: 0.9734, Validation Accuracy: 0.9544, Loss: 0.0203 Epoch 2 Batch 100/538 - Train Accuracy: 0.9611, Validation Accuracy: 0.9719, Loss: 0.0216 Epoch 2 Batch 200/538 - Train Accuracy: 0.9793, Validation Accuracy: 0.9682, Loss: 0.0153 Epoch 2 Batch 300/538 - Train Accuracy: 0.9766, Validation Accuracy: 0.9657, Loss: 0.0186 Epoch 2 Batch 400/538 - Train Accuracy: 0.9714, Validation Accuracy: 0.9709, Loss: 0.0202 Epoch 2 Batch 500/538 - Train Accuracy: 0.9831, Validation Accuracy: 0.9698, Loss: 0.0122 Epoch 3 Batch 100/538 - Train Accuracy: 0.9723, Validation Accuracy: 0.9753, Loss: 0.0128 Epoch 3 Batch 200/538 - Train Accuracy: 0.9846, Validation Accuracy: 0.9602, Loss: 0.0118 Epoch 3 Batch 300/538 - Train Accuracy: 0.9784, Validation Accuracy: 0.9787, Loss: 0.0136 Epoch 3 Batch 400/538 - Train Accuracy: 0.9855, Validation Accuracy: 0.9773, Loss: 0.0156 Epoch 3 Batch 500/538 - Train Accuracy: 0.9950, Validation Accuracy: 0.9702, Loss: 0.0090 Epoch 4 Batch 100/538 - Train Accuracy: 0.9920, Validation Accuracy: 0.9734, Loss: 0.0111 Epoch 4 Batch 200/538 - Train Accuracy: 0.9807, Validation Accuracy: 0.9686, Loss: 0.0103 Epoch 4 Batch 300/538 - Train Accuracy: 0.9905, Validation Accuracy: 0.9728, Loss: 0.0105 Epoch 4 Batch 400/538 - Train Accuracy: 0.9801, Validation Accuracy: 0.9711, Loss: 0.0123 Epoch 4 Batch 500/538 - Train Accuracy: 0.9961, Validation Accuracy: 0.9664, Loss: 0.0067 Epoch 5 Batch 100/538 - Train Accuracy: 0.9908, Validation Accuracy: 0.9808, Loss: 0.0087 Epoch 5 Batch 200/538 - Train Accuracy: 0.9928, Validation Accuracy: 0.9707, Loss: 0.0059 Epoch 5 Batch 300/538 - Train Accuracy: 0.9909, Validation Accuracy: 0.9787, Loss: 0.0095 Epoch 5 Batch 400/538 - Train Accuracy: 0.9903, Validation Accuracy: 0.9778, Loss: 0.0076 Epoch 5 Batch 500/538 - Train Accuracy: 0.9949, Validation Accuracy: 0.9661, Loss: 0.0073 Epoch 6 Batch 100/538 - Train Accuracy: 0.9930, Validation Accuracy: 0.9703, Loss: 0.0069 Epoch 6 Batch 200/538 - Train Accuracy: 0.9973, Validation Accuracy: 0.9680, Loss: 0.0057 Epoch 6 Batch 300/538 - Train Accuracy: 0.9883, Validation Accuracy: 0.9698, Loss: 0.0083 Epoch 6 Batch 400/538 - Train Accuracy: 0.9950, Validation Accuracy: 0.9746, Loss: 0.0064 Epoch 6 Batch 500/538 - Train Accuracy: 0.9991, Validation Accuracy: 0.9734, Loss: 0.0042 Epoch 7 Batch 100/538 - Train Accuracy: 0.9916, Validation Accuracy: 0.9755, Loss: 0.0062 Epoch 7 Batch 200/538 - Train Accuracy: 0.9906, Validation Accuracy: 0.9748, Loss: 0.0070 Epoch 7 Batch 300/538 - Train Accuracy: 0.9896, Validation Accuracy: 0.9739, Loss: 0.0083 Epoch 7 Batch 400/538 - Train Accuracy: 0.9953, Validation Accuracy: 0.9741, Loss: 0.0067 Epoch 7 Batch 500/538 - Train Accuracy: 0.9989, Validation Accuracy: 0.9783, Loss: 0.0044 Model Trained and Saved ###Markdown Save ParametersSave the `batch_size` and `save_path` parameters for inference. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params(save_path) ###Output _____no_output_____ ###Markdown Checkpoint ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess() load_path = helper.load_params() ###Output _____no_output_____ ###Markdown Sentence to SequenceTo feed a sentence into the model for translation, you first need to preprocess it. Implement the function `sentence_to_seq()` to preprocess new sentences.- Convert the sentence to lowercase- Convert words into ids using `vocab_to_int` - Convert words not in the vocabulary, to the `` word id. ###Code def sentence_to_seq(sentence, vocab_to_int): """ Convert a sentence to a sequence of ids :param sentence: String :param vocab_to_int: Dictionary to go from the words to an id :return: List of word ids """ return [vocab_to_int[w] if w in vocab_to_int else vocab_to_int['<UNK>'] for w in sentence.lower().split()] """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_sentence_to_seq(sentence_to_seq) ###Output Tests Passed ###Markdown TranslateThis will translate `translate_sentence` from English to French. ###Code translate_sentence = 'he saw a old yellow truck .' """ DON'T MODIFY ANYTHING IN THIS CELL """ translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_path + '.meta') loader.restore(sess, load_path) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('predictions:0') target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0') source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0') keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size, target_sequence_length: [len(translate_sentence)*2]*batch_size, source_sequence_length: [len(translate_sentence)]*batch_size, keep_prob: 1.0})[0] print('Input') print(' Word Ids: {}'.format([i for i in translate_sentence])) print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence])) print('\nPrediction') print(' Word Ids: {}'.format([i for i in translate_logits])) print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits]))) ###Output INFO:tensorflow:Restoring parameters from checkpoints/dev Input Word Ids: [201, 135, 102, 116, 105, 71, 70] English Words: ['he', 'saw', 'a', 'old', 'yellow', 'truck', '.'] Prediction Word Ids: [90, 135, 250, 30, 340, 45, 238, 37, 1] French Words: il a vu un vieux camion jaune . <EOS>
Modelagem/implementacao_dscore.ipynb
###Markdown 1. Data pre-processing ###Code def get_cheques(js): res = js.get("cheques") if not res is None: df = pd.DataFrame(res.get("itens")) df = df[["data_ocorrencia", "uf", "quantidade_cheques"]] df["tipo"] = "cheques" return df else: return None def get_restricoes_financeiras(js): res = js.get("restricoes_financeiras") if not res is None: df = pd.DataFrame(res.get("itens")) if "uf" in list(df.columns): df = df[["data_ocorrencia", "modalidade_natureza", "natureza", "uf", "valor"]] else: df = df[["data_ocorrencia", "modalidade_natureza", "natureza", "uf", "valor"]] df['uf'] = None df["tipo"] = "restricoes_financeiras" return df return None def get_protestos(js): res = js.get("protestos") if not res is None: df = pd.DataFrame(res.get("itens")) if "uf" in list(df.columns): df = df[["data_anotacao", "natureza", "sub_judice_descricao", "uf", "valor"]] df.columns = ["data_ocorrencia", "natureza", "modalidade_natureza", "uf", "valor"] else: df = df[["data_anotacao", "natureza", "sub_judice_descricao", "valor"]] df.columns = ["data_ocorrencia", "natureza", "modalidade_natureza", "valor"] df["uf"] = None df['tipo'] = "protestos" return df return None def get_pendencias(js): res = js.get("pendencias") if not res is None: df = pd.DataFrame(res.get("itens")) if "uf" in list(df.columns): df = df[["data_ocorrencia", "modalidade", "natureza", "valor", "uf"]] else: df = df[["data_ocorrencia", "modalidade", "natureza", "valor"]] df["uf"] = None df.rename(columns={"modalidade" : "modalidade_natureza"}, inplace=True) df['tipo'] = "pendencias" return df return None def get_processos(js): res = js.get("processos") if not res is None: df = pd.DataFrame(res.get('itens')) if "uf" in list(df.columns): df = df[["data_ocorrencia", "descricao_natureza", "natureza", "uf", "valor"]] else: df = df[["data_ocorrencia", "descricao_natureza", "natureza", "valor"]] df["uf"] = None df.rename(columns = {"descricao_natureza" : "modalidade_natureza"}) df["tipo"] = "processos" return df return None dict_campos = {"restricoes" : ["data_ocorrencia", "modalidade_natureza", "natureza", "valor"], "protestos" : ["data_anotacao", "natureza", "sub_judice_descricao", "valor"], "pendencias" : ["data_ocorrencia", "modalidade", "natureza", "valor"], "processos" : ["data_ocorrencia", "descricao_natureza", "natureza", "valor"], "restricoes_financeiras" : ["data_ocorrencia", "modalidade_natureza", "natureza", "valor"] } dict_rename = {"processos" : {"descricao_natureza" : "modalidade_natureza"}, "pendencias" : {"modalidade" : "modalidade_natureza"}, "protestos" : {'sub_judice_descricao' : "modalidade_natureza", "data_anotacao" : "data_ocorrencia"} } def get_infos_dividas(js, tp_pendencia): res = js.get("result").get(tp_pendencia) if not res is None: df = pd.DataFrame(res.get('itens')) cols = dict_campos.get(tp_pendencia) if "uf" in list(df.columns): cols = cols + ["uf"] df = df[cols].copy() else: df = df[cols] df["uf"] = None rename = dict_rename.get(tp_pendencia) if not rename is None: df.rename(columns = rename, inplace=True) df["tipo"] = tp_pendencia return df return None def get_numero_consulta(doc): engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo") con = engine.connect() query = "select data_ref, numero_consulta from consultas_idwall_operacoes where cnpj_cpf='{}'".format(doc) df = pd.read_sql(query, con) numero = df[df['data_ref']==df['data_ref'].max()]["numero_consulta"].iloc[0] con.close() return numero def get_details(numero): URL = "https://api-v2.idwall.co/relatorios" authorization = "b3818f92-5807-4acf-ade8-78a1f6d7996b" url_details = URL + "/{}".format(numero) + "/dados" while True: dets = requests.get(url_details, headers={"authorization": authorization}) djson = dets.json() sleep(1) if djson['result']['status'] == "CONCLUIDO": break return dets.json() def formata_dados(df): df['modalidade_natureza'] = df.apply(lambda x : x['modalidade_natureza'].replace(" ", "") if isinstance(x['modalidade_natureza'], str) else "OUTROS", axis=1) df['valor'] = df.apply(lambda x : x['valor'].split("R$ ")[1].replace(",", "."), axis=1) df["valor"] = df.apply(lambda x : float(x["valor"]), axis=1) return df def gera_dados(doc): numero = get_numero_consulta(doc) js = get_details(numero) fr = [] lista_pendencias = ["restricoes", "processos", "protestos", "pendencias", "restricoes_financeiras"] for el in lista_pendencias: res = get_infos_dividas(js, el) if not res is None: fr.append(res) df = pd.concat(fr) df = formata_dados(df) return df def atribui_segmento(df): segmento_infra = ['FATAGUA', 'TELEFFX', 'TELEFFIXA', 'TELEFMOVEL', 'CONDOMINIO', 'ENERGIAELET', 'ALUGUEL', 'SERVTELEFON'] segmento_credito = ['EMPRESCONTA', 'EMPRESTIMO', 'CREDCARTAO', 'FINANCIAMENT', 'CREDITOEFINANCIAMENTO-FINANC'] segmento_processos = ['EXCJUDTRAB', 'FISCALESTADUAL', 'EXECUCAO', 'FISCALFEDERAL', 'FISCALMUNICIPAL', 'EXECUCAO-JE', 'BUSCAEAPREENSAO'] df['segmento'] = df.apply(lambda x : 'processos' if x['tipo']=='processos' else('credito' if x['modalidade_natureza'] in segmento_credito else ('infra' if x['modalidade_natureza'] in segmento_infra else "outros")), axis=1) return df # hortifruti 26203839000110 df = gera_dados("26203839000110") df["modalidade_natureza"].unique().tolist() df = atribui_segmento(df) df.head() ###Output _____no_output_____ ###Markdown Part 2 - Calculations ###Code # calculos das metricas # 1 - probabilidade def calcula_probabilidade(df): dt = df.groupby("segmento").count().reset_index()[["segmento", "valor"]] dt.columns = ["segmento", "ocorrencias"] dt["probabilidade"] = dt["ocorrencias"]/dt["ocorrencias"].sum() return dt # 2 - composicao da divida def calcula_composicao(df): dt = df.groupby("segmento").sum().reset_index() dt.columns = ["segmento", "valor_divida"] dt["composicao"] = dt["valor_divida"]/dt["valor_divida"].sum() return dt dfp = calcula_probabilidade(df) dfc = calcula_composicao(df) dfcalc = dfp.merge(dfc, left_on="segmento", right_on="segmento", how='left') dfcalc # 1 - probabilidade def calcula_probabilidade(df): dt = df.groupby("segmento").count().reset_index()[["segmento", "valor"]] dt.columns = ["segmento", "ocorrencias"] dt["probabilidade"] = dt["ocorrencias"]/dt["ocorrencias"].sum() return dt # 2 - composicao da divida def calcula_composicao(df): dt = df.groupby("segmento").sum().reset_index() dt.columns = ["segmento", "valor_divida"] dt["composicao"] = dt["valor_divida"]/dt["valor_divida"].sum() return dt def get_lscore(cnpj, produto): ls = LScoring(cnpj=cnpj, produto=produto) score = ls.calcula().get('score') fat_medio = ls.faturamentos['valor'].mean() return score, fat_medio def calcula_fat_medio(cnpj, produto): df_fat = get_faturamento(cnpj, produto) df_fat.index = pd.to_datetime(df_fat.data) _df = df_fat.resample('MS').sum().reset_index() _df = _df.sort_values('data', ascending=False).iloc[:12, :] return _df['valor'].mean() def calcula_pi(dfcalc): dfcalc['pi'] = dfcalc['valor_divida']/dfcalc['fat_medio'] dfcalc['pi'] = (2/3)*dfcalc['pi'] return dfcalc escala_impacto = {"credito" : {"i0" : 0.75, "i1" : 1}, "processos" : {"i0" : 0.5, "i1" : 0.75}, "infra" : {"i0" : 0.25, "i1" : 0.5}, "outros" : {"i0" : 0, "i1" : 0.25}, } def calcula_lambda(dfcalc): dfcalc["lambda"] = dfcalc['composicao']*dfcalc['pi'] return dfcalc def impacto_segmento(lambda_, segmento, escala_impacto): escala = escala_impacto.get(segmento) i0 = escala.get("i0") i1 = escala.get("i1") return (i1 - i0)*lambda_ + i0 def calcula_impacto_segmento(dfcalc, escala_impacto): dfcalc['impacto_segmento'] = dfcalc.apply(lambda x : impacto_segmento(x['lambda'], x["segmento"], escala_impacto), axis=1) return dfcalc def calcula_risco(dfcalc): dfcalc["risco"] = dfcalc["probabilidade"]*dfcalc["impacto_segmento"] return dfcalc def d_score(risco_, score_limite): return -score_limite*risco_ + score_limite def calcula_escala_score(lscore): delta = int(np.floor(lscore/4)) escala = {"credito" : delta, "processos" : 2*delta, "infra" : 3*delta, "outros" : 4*delta} return escala def calcula_dscore(dfcalc, lscore): escala = calcula_escala_score(lscore) dfcalc["dscore"] = dfcalc.apply(lambda x : d_score(x["risco"], escala.get(x["segmento"])), axis=1) return dfcalc def calcula(cnpj, produto): df = gera_dados(cnpj) df = atribui_segmento(df) dfp = calcula_probabilidade(df) dfc = calcula_composicao(df) dfcalc = dfp.merge(dfc, left_on="segmento", right_on="segmento", how='left') lscore, fat_medio = get_lscore(cnpj, produto) dfcalc['fat_medio'] = fat_medio dfcalc = calcula_pi(dfcalc) dfcalc = calcula_lambda(dfcalc) dfcalc = calcula_impacto_segmento(dfcalc, escala_impacto) dfcalc = calcula_risco(dfcalc) dfcalc = calcula_dscore(dfcalc, lscore) dscore = dfcalc['dscore'].mean() segmentos = dfcalc["segmento"].tolist() dscores = dfcalc["dscore"].tolist() res = dict(zip(segmentos, dscores)) res["lscore"] = int(lscore) res['dscore'] = int(dscore) res['score'] = int((lscore + dscore)/2) return res, dfcalc # %%timeit cnpj = "26203839000110" cnpj='04741728000125' produto = "tomatico" res, d = calcula(cnpj, produto) res d from pricing.service.scoring.lscore import LScoring import numpy as np import requests from time import sleep class DScoring(object): def __init__(self, cnpj, produto): self.cnpj = cnpj self.produto = produto self.lscore = None self.faturamento_medio = None self.calibracao_segmento = None def score_mestre(self): ls = LScoring(cnpj=self.cnpj, produto=self.produto) lscore = ls.calcula().get('score') fat_medio = ls.faturamentos['valor'].mean() self.lscore = lscore self.faturamento_medio = fat_medio return def set_calibracao(self): delta = int(np.floor(self.lscore/4)) escala_score = { "credito" : delta, "processos" : 2*delta, "infra" : 3*delta, "outros" : 4*delta } if self.calibracao_segmento is None: self.calibracao_segmento = escala_score return @property def campos_divida(self): return { "restricoes" : ["data_ocorrencia", "modalidade_natureza", "natureza", "valor"], "protestos" : ["data_anotacao", "natureza", "sub_judice_descricao", "valor"], "pendencias" : ["data_ocorrencia", "modalidade", "natureza", "valor"], "processos" : ["data_ocorrencia", "descricao_natureza", "natureza", "valor"], "restricoes_financeiras" : ["data_ocorrencia", "modalidade_natureza", "natureza", "valor"] } @property def campos_rename(self): return { "processos" : {"descricao_natureza" : "modalidade_natureza"}, "pendencias" : {"modalidade" : "modalidade_natureza"}, "protestos" : {'sub_judice_descricao' : "modalidade_natureza", "data_anotacao" : "data_ocorrencia"} } @property def segmentos(self): return {"credito" : ['EMPRESCONTA', 'EMPRESTIMO', 'CREDCARTAO', 'FINANCIAMENT', 'CREDITOEFINANCIAMENTO-FINANC'], "processos" : ['EXCJUDTRAB', 'FISCALESTADUAL', 'EXECUCAO', 'FISCALFEDERAL', 'FISCALMUNICIPAL','EXECUCAO-JE', 'BUSCAEAPREENSAO'], "infra" : ['FATAGUA', 'TELEFFX', 'TELEFFIXA', 'TELEFMOVEL', 'CONDOMINIO', 'ENERGIAELET', 'ALUGUEL', 'SERVTELEFON'] } @property def escala_impacto(self): return {"credito" : {"i0" : 0.75, "i1" : 1}, "processos" : {"i0" : 0.5, "i1" : 0.75}, "infra" : {"i0" : 0.25, "i1" : 0.5}, "outros" : {"i0" : 0, "i1" : 0.25}, } def get_numero_consulta(self): engine = create_engine("mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo") con = engine.connect() query = "select data_ref, numero_consulta from consultas_idwall_operacoes where cnpj_cpf='{}'".format(self.cnpj) df = pd.read_sql(query, con) numero = df[df['data_ref']==df['data_ref'].max()]["numero_consulta"].iloc[0] con.close() self.numero_consulta = numero return numero @staticmethod def get_details(numero): URL = "https://api-v2.idwall.co/relatorios" authorization = "b3818f92-5807-4acf-ade8-78a1f6d7996b" url_details = URL + "/{}".format(numero) + "/dados" while True: dets = requests.get(url_details, headers={"authorization": authorization}) djson = dets.json() sleep(1) if djson['result']['status'] == "CONCLUIDO": break return dets.json() @staticmethod def formata_dados(df): df['modalidade_natureza'] = df.apply(lambda x : x['modalidade_natureza'].replace(" ", "") if isinstance(x['modalidade_natureza'], str) else "OUTROS", axis=1) df['valor'] = df.apply(lambda x : x['valor'].split("R$ ")[1].replace(",", "."), axis=1) df["valor"] = df.apply(lambda x : float(x["valor"]), axis=1) return df def get_infos_dividas(self, js, tp_pendencia): res = js.get("result").get(tp_pendencia) if not res is None: df = pd.DataFrame(res.get('itens')) cols = self.campos_divida.get(tp_pendencia) if "uf" in list(df.columns): cols = cols + ["uf"] df = df[cols].copy() else: df = df[cols] df["uf"] = None rename = self.campos_rename.get(tp_pendencia) if not rename is None: df.rename(columns = rename, inplace=True) df["tipo"] = tp_pendencia return df return None def gera_dados(self): numero = self.get_numero_consulta() js = self.get_details(numero) fr = [] lista_pendencias = ["restricoes", "processos", "protestos", "pendencias", "restricoes_financeiras"] for el in lista_pendencias: res = self.get_infos_dividas(js, el) if not res is None: fr.append(res) df = pd.concat(fr) df = self.formata_dados(df) return df def atribui_segmento(self, df): df['segmento'] = df.apply(lambda x : 'processos' if x['tipo']=='processos' else('credito' if x['modalidade_natureza'] in self.segmentos.get("credito") else ('infra' if x['modalidade_natureza'] in self.segmentos.get("infra") else "outros")), axis=1) return df @staticmethod def calcula_probabilidade(df): dt = df.groupby("segmento").count().reset_index()[["segmento", "valor"]] dt.columns = ["segmento", "ocorrencias"] dt["probabilidade"] = dt["ocorrencias"]/dt["ocorrencias"].sum() return dt @staticmethod def calcula_composicao(df): dt = df.groupby("segmento").sum().reset_index() dt.columns = ["segmento", "valor_divida"] dt["composicao"] = dt["valor_divida"]/dt["valor_divida"].sum() return dt @staticmethod def calcula_pi(dfcalc): dfcalc['pi'] = dfcalc['valor_divida']/dfcalc['fat_medio'] dfcalc['pi'] = (2/3)*dfcalc['pi'] return dfcalc @staticmethod def calcula_lambda(dfcalc): dfcalc["lambda"] = dfcalc['composicao']*dfcalc['pi'] return dfcalc @staticmethod def impacto_segmento(lambda_, segmento, escala): escala = escala.get(segmento) i0 = escala.get("i0") i1 = escala.get("i1") return (i1 - i0)*lambda_ + i0 def calcula_impacto_segmento(self, dfcalc): dfcalc['impacto_segmento'] = dfcalc.apply(lambda x : self.impacto_segmento(x['lambda'], x["segmento"], self.escala_impacto), axis=1) return dfcalc @staticmethod def calcula_risco(dfcalc): dfcalc["risco"] = dfcalc["probabilidade"]*dfcalc["impacto_segmento"] return dfcalc @staticmethod def d_score(risco_, score_limite): return -score_limite*risco_ + score_limite #set_calibracao #self.calibracao_segmento # def calcula_escala_score(lscore): # delta = int(np.floor(lscore/4)) # escala = {"credito" : delta, "processos" : 2*delta, "infra" : 3*delta, "outros" : 4*delta} # return escala def calcula_dscore(self, dfcalc): escala = self.calibracao_segmento dfcalc["dscore"] = dfcalc.apply(lambda x : d_score(x["risco"], escala.get(x["segmento"])), axis=1) return dfcalc def calcula(self): self.score_mestre() self.set_calibracao() df = self.gera_dados() df = self.atribui_segmento(df) dfp = self.calcula_probabilidade(df) dfc = self.calcula_composicao(df) dfcalc = dfp.merge(dfc, left_on="segmento", right_on="segmento", how='left') dfcalc['fat_medio'] = self.faturamento_medio dfcalc = self.calcula_pi(dfcalc) dfcalc = self.calcula_lambda(dfcalc) dfcalc = self.calcula_impacto_segmento(dfcalc) dfcalc = self.calcula_risco(dfcalc) dfcalc = self.calcula_dscore(dfcalc) dscore = dfcalc['dscore'].mean() lista_segmentos = dfcalc["segmento"].tolist() lista_dscore = dfcalc["dscore"].tolist() lista_dscore = [int(el) for el in lista_dscore] res = dict(zip(lista_segmentos, lista_dscore)) res["lscore"] = int(self.lscore) res['dscore'] = int(dscore) res['score'] = int((self.lscore + dscore)/2) return res, dfcalc cnpj = "26203839000110" produto = "tomatico" ds = DScoring(cnpj, produto) ret = ds.calcula() ret[0] ret[1] cnpj = "04741728000125" produto = "tomatico" ds = DScoring(cnpj, produto) ret, dfcalc = ds.calcula() ret dfcalc cnpj = "24247971000107" produto = "pagueveloz" ds = DScoring(cnpj=cnpj, produto=produto) ret, dfcalc = ds.calcula() ret dfcalc ###Output _____no_output_____
notebooks/.ipynb_checkpoints/1.0-ah-gcn_workflow-checkpoint.ipynb
###Markdown Prep data 1. Get cobre timeseries 2. Get cobre connectomes 3. Get group average connectome 4. Build 8 k-NN graph from avg connectome 5. Split data: 70 training, 10 validation, 20 test 6. All data from same subject assigned to same Split 7. Cut time-series into bins of length time window ###Code ts_path = '/home/harveyaa/Documents/fMRI/data/cobre/difumo/timeseries' conn_path = '/home/harveyaa/Documents/fMRI/data/cobre/difumo/connectomes' pheno_path = '/home/harveyaa/nilearn_data/cobre/phenotypic_data.tsv' timeseries = [np.load(os.path.join(ts_path,p)) for p in os.listdir(ts_path)] ids = [int(p.split('_')[1]) for p in os.listdir(ts_path)] # One subject has different length timeseries, ignore them for now not_150 = np.array([t.shape[0]!=150 for t in timeseries]) print('Bad sub ID: {}'.format(np.array(ids)[not_150][0])) ###Output Bad sub ID: 40075 ###Markdown Make Graph- Load connectomes- Get avg connectome- Get 8 knn graph from avg connectome ###Code def make_undirected(mat): """Takes an input adjacency matrix and makes it undirected (symmetric).""" m = mat.copy() mask = mat != mat.transpose() vals = mat[mask] + mat.transpose()[mask] m[mask] = vals return m def knn_graph(mat,k=8,directed=False): """Takes an input matrix and returns a k-Nearest Neighbour weighted adjacency matrix.""" m = mat.copy() np.fill_diagonal(m,0) slices = [] for i in range(m.shape[0]): s = m[:,i] not_neighbours = s.argsort()[:-k] s[not_neighbours] = 0 slices.append(s) if directed: return np.array(slices) else: return make_undirected(np.array(slices)) def make_group_graph(conn_path): # Load connectomes connectomes = [np.load(os.path.join(conn_path,p)) for p in os.listdir(conn_path)] # Group average connectome avg_conn = np.array(connectomes).mean(axis=0) # Undirected 8 k-NN graph as matrix avg_conn8 = knn_graph(avg_conn,directed=False) # Format matrix into graph for torch_geometric graph = nx.convert_matrix.from_numpy_array(avg_conn8) return tg.utils.from_networkx(graph) ###Output _____no_output_____ ###Markdown Get train/test/validation data- Load timeseries and ids- Split timeseries of 150 volumes into time windows- Split data into train/test/validation - All data from a given subject goes in the same bin ###Code def split_timeseries(ts,n_timepoints=50): """Takes an input timeseries and splits it into time windows of specified length. Need to choose a number that splits evenly.""" if ts.shape[0] % n_timepoints != 0: raise ValueError('Yikes choose a divisor for now') else: n_splits = ts.shape[0] / n_timepoints return np.split(ts,n_splits) def split_ts_labels(timeseries,labels,n_timepoints=50): """ timeseries: list of timeseries labels: list of lists (of accompanying labels) n_timepoints: n_timepoints of split (must be an even split) """ # Split the timeseries split_ts = [] for ts in map(split_timeseries,timeseries): split_ts = split_ts + ts #keep track of the corresponding labels n = int(timeseries[0].shape[0]/n_timepoints) split_labels = [] for l in labels: split_labels.append(np.repeat(l,n)) #add a label for each split split_labels.append(list(range(n))*len(timeseries)) return split_ts, split_labels def train_test_val_splits(split_ids,test_size=0.20,val_size=0.10,random_state=111): """Train test val split the data (in splits) so splits from a subject are in the same group. returns INDEX for each split """ # Train test validation split of ids, then used to split dataframe X = np.unique(split_ids) y = list(range(len(X))) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size+val_size, random_state=random_state) X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=val_size/(test_size+val_size), random_state=random_state) train_idx = [] test_idx = [] val_idx = [] for i in range(len(split_ids)): if split_ids[i] in X_train: train_idx.append(i) elif split_ids[i] in X_test: test_idx.append(i) elif split_ids[i]in X_val: val_idx.append(i) return train_idx,test_idx,val_idx class cobreTimeWindows(Dataset): def __init__(self,ts_path,pheno_path,test_size=0.20,val_size=0.10,random_state=111,n_timepoints=50): self.pheno_path = pheno_path pheno = pd.read_csv(pheno_path,delimiter='\t') pheno = pheno[pheno['ID']!=40075] pheno.sort_values('ID',inplace=True) self.labels = pheno['Subject Type'].map({'Patient':1,'Control':0}).tolist() self.ts_path = ts_path self.timeseries = [np.load(os.path.join(ts_path,p)) for p in natsort.natsorted(os.listdir(ts_path))] self.sub_ids = [int(p.split('_')[1]) for p in natsort.natsorted(os.listdir(ts_path))] #filter out bad sub idx = self.sub_ids.index(40075) del self.sub_ids[idx] del self.timeseries[idx] #split timeseries self.split_timeseries,split_labs = split_ts_labels(self.timeseries,[self.sub_ids,self.labels],n_timepoints=n_timepoints) self.split_sub_ids = split_labs[0] self.split_labels = split_labs[1] self.split_ids = split_labs[-1] #train test val split the data (each sub's splits in one category only) self.train_idx,self.test_idx,self.val_idx = train_test_val_splits(self.split_sub_ids, test_size=test_size, val_size=val_size, random_state=random_state) def __len__(self): return len(self.split_sub_ids) def __getitem__(self,idx): ts = torch.from_numpy(self.split_timeseries[idx]).transpose(0,1) sub_id = self.split_sub_ids[idx] label = self.split_labels[idx] split_id = self.split_ids[idx] #return {'timeseries':ts, #"sub_id":sub_id, # 'label':label, #"split_id":split_id # } return ts,label ###Output _____no_output_____ ###Markdown Model - C input channels (n time points of timeseries) - 6 GCN layers - 32 graph filters at each layer - Global average pooling layer - 2 fully connected layers - 256, 128 units - ReLU activation - Softmax last layer ###Code class GCN(torch.nn.Module): def __init__(self,edge_index,edge_weight,n_timepoints = 50): super().__init__() #forward(x, edge_index, edge_weight: Optional[torch.Tensor] = None self.edge_index = edge_index self.edge_weight = edge_weight self.conv1 = tg.nn.ChebConv(in_channels=n_timepoints,out_channels=32,K=2,bias=True) self.conv2 = tg.nn.ChebConv(in_channels=32,out_channels=32,K=2,bias=True) self.conv3 = tg.nn.ChebConv(in_channels=32,out_channels=32,K=2,bias=True) self.conv4 = tg.nn.ChebConv(in_channels=32,out_channels=32,K=2,bias=True) self.conv5 = tg.nn.ChebConv(in_channels=32,out_channels=32,K=2,bias=True) self.conv6 = tg.nn.ChebConv(in_channels=32,out_channels=32,K=2,bias=True) #self.fc1 = nn.Linear(512, 256) #self.fc2 = nn.Linear(256, 128) #self.fc3 = nn.Linear(128,2) self.fc1 = nn.Linear(512*32, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 2) self.dropout = nn.Dropout(0.5) def forward(self,x): #print(x.size()) x = self.conv1(x,self.edge_index,self.edge_weight) x = F.relu(x) x = self.conv2(x,self.edge_index,self.edge_weight) x = F.relu(x) x = self.conv3(x,self.edge_index,self.edge_weight) x = F.relu(x) x = self.conv4(x,self.edge_index,self.edge_weight) x = F.relu(x) x = self.conv5(x,self.edge_index,self.edge_weight) x = F.relu(x) x = self.conv6(x,self.edge_index,self.edge_weight) #print(x.size()) x = tg.nn.global_mean_pool(x,torch.from_numpy(np.array(range(x.size(0)),dtype=int))) #print(x.size()) ####x = torch.transpose(x,1,2) x = x.view(-1, 512*32) x = self.fc1(x) #print(x.size()) x = self.dropout(x) x = self.fc2(x) #print(x.size()) x = self.dropout(x) x = self.fc3(x) #print(x.size()) #x = F.softmax(x,dim=0) return x np.array(range(3)) def train_loop(dataloader, model, loss_fn, optimizer): size = len(dataloader.sampler) for batch, (X, y) in enumerate(dataloader): # Compute prediction and loss pred = model(X) #print(pred.size()) #print(y) loss = loss_fn(pred, y) # Backpropagation optimizer.zero_grad() loss.backward() optimizer.step() loss, current = loss.item(), batch * len(X) print(batch) print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]") def test_loop(dataloader, model, loss_fn): size = len(dataloader.sampler) test_loss, correct = 0, 0 with torch.no_grad(): for X, y in dataloader: pred = model.forward(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1) == y).type(torch.float).sum().item() test_loss /= size correct /= size print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n") graph = make_group_graph(conn_path) data = cobreTimeWindows(ts_path,pheno_path,n_timepoints=15) batch_size = 128 # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(data.train_idx) test_sampler = SubsetRandomSampler(data.test_idx) val_sampler = SubsetRandomSampler(data.val_idx) train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, sampler=train_sampler) test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, sampler=test_sampler) val_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, sampler=val_sampler) gcn = GCN(graph.edge_index,graph.weight,n_timepoints=15) data[0][0].size() learning_rate = 0.1 loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(gcn.parameters(), lr=learning_rate) epochs = 10 for t in range(epochs): print(f"Epoch {t+1}\n-------------------------------") train_loop(train_loader, gcn, loss_fn, optimizer) test_loop(test_loader, gcn, loss_fn) print("Done!") gcn np.unique(graph.edge_index) ###Output _____no_output_____
part02_spontaneous_decay.ipynb
###Markdown Usage Instructions* Use "ALT+r" key combination to go to slideshow mode* Use Spacebar (SHIFT+Spacebar) to go forward (backward) through the slideshow. [More info is available in the RISE documentation](https://damianavila.github.io/RISE/usage.html).This notebook is available from https://github.com/stephensekula/joy_of_the_muon. To run in Binder, click the "Launch Binder" badge[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/stephensekula/joy_of_the_muon/master?filepath=part02_spontaneous_decay.ipynb) The Joy of the Muon Spontaneous Decay Professor Stephen Sekula (SMU) Overview * We will define spontaneous decay * We'll build a visual model of this process that allows you to play with the concept. * We'll explore the calculus of spontaneous decay to determine a mathematical model of the process. Spontaneous Decay A Toy Model * Let's build a model of spontaneous decay. The elements of the model are as follows. * We have a system consisting of $N$ independent elements at time zero ($t=0$) * Time then advances forward in uniform steps, each of size $dt$ * In each time step, each element of the system has a contant probability $P(t) = P(t+dt) = P$ of spontaneously decaying. Since all elements are independent of each other, the decay of one has no influence on any other. * For the model on the next slide, $N=225$ (15 per row on the game board) and $P=0.25$. Use the left button to advance time. Use the right button to reset the game and start over. ###Code # The code here allows a game board to be defined, and objects placed on the board that can be blue/solid (not decayed) # or red/unfilled (decayed). The user can step time forward using a slider, emulating evolving the system by # a time step dt. In each time step, since each object has a constant and uncorrelated probability P of decaying, # we'll see more and more objects decay. The user will see that the rate of decay is proportional to the number # of objects class Avatar: def __init__(self, coordinates=(0,0), size=1.0): self.coordinates = coordinates self.size = size def print(self): print(f" coordinates: {self.coordinates}, size={self.size}") class Muon: def __init__(self, decay_prob=0.1, avatar=None): self.decay_prob = decay_prob self.decay_state = 0 self.avatar = avatar def step(self): if self.decay_state == 1: return action = random.uniform(0, 1) if action < self.decay_prob: self.decay_state = 1 def print(self): print(f" Decay probability: {self.decay_prob}") print(f" Muon decayed? {self.decay_state}") self.avatar.print() def draw(self): color = 'b' fill = True if self.decay_state == 1: color='r' fill=False return plt.Circle(self.avatar.coordinates, self.avatar.size, color=color, fill=fill) global muon_list, muon_counts, muon_figure, muon_axis, muon_gameboard, muon_histogram muon_list = [] muon_counts = np.array([]) reset_button = widgets.Button( description = 'Reset Game', layout=widgets.Layout(width='25%', height='30px') ) advance_button = widgets.Button( description='Advance Time by dt', layout=widgets.Layout(width='25%', height='30px') ) def draw_gameboard(): global muon_list, muon_counts, muon_figure, muon_axis, muon_gameboard, muon_histogram muon_gameboard.cla() for muon in muon_list: muon_gameboard.add_artist(muon.draw()) plt.xlabel('Elements') muon_histogram.cla() muon_histogram.bar(x=np.arange(0, np.size(muon_counts), 1), height=muon_counts, color='b') plt.xlabel('Time Step') plt.ylabel('Surviving Elements') muon_histogram.text(0.25,0.85,f"Surviving: {100*muon_counts[-1]/muon_counts[0]:.1f}%", fontsize=16, transform=muon_histogram.transAxes, ma='left', bbox=dict(boxstyle="round",ec=(204/255, 0, 53/255), fc=(0.9, 0.9, 0.9)) ) muon_figure.canvas.draw() @reset_button.on_click def reset(button_object): global muon_list, muon_counts, muon_figure, muon_axis, muon_gameboard, muon_histogram muon_gameboard.cla() for muon in muon_list: muon.decay_state=0 muon_counts = np.array([len(muon_list)]) draw_gameboard() @advance_button.on_click def advance(button_object): global muon_list, muon_counts, muon_figure, muon_axis, muon_gameboard, muon_histogram count_undecayed = 0 for muon in muon_list: muon.step() if muon.decay_state == 0: count_undecayed += 1 muon_counts = np.append(muon_counts, count_undecayed) draw_gameboard() ###Output _____no_output_____ ###Markdown Decay Game ###Code # Code for the Decay Game muon_list = [] muon_counts = np.array([]) muon_figure, muon_axis = plt.subplots(figsize=(9,5)) plt.axis('off') gridspec = muon_figure.add_gridspec(ncols=2, nrows=1, width_ratios=[1,1], height_ratios=[1]) N_per_row = 40 mu_spacing = 0.005 max_size = 1.0 decay_prob = 0.25 mu_size = (max_size - (N_per_row-1)*mu_spacing)/(2*N_per_row) for xrow in np.arange( mu_size, max_size, 2*mu_size+mu_spacing ): for yrow in np.arange( mu_size, max_size, 2*mu_size+mu_spacing ): muon_list.append(Muon( decay_prob = decay_prob, avatar=Avatar(coordinates=(xrow,yrow), size=mu_size))) #print(muon_list[-1].print()) muon_gameboard = muon_figure.add_subplot(gridspec[0,0]) muon_histogram = muon_figure.add_subplot(gridspec[0,1]) muon_counts = np.array([len(muon_list)]) draw_gameboard() plt.show() plt.tight_layout() widgets.VBox(children=[advance_button, reset_button]) ###Output _____no_output_____ ###Markdown What to Watch for When Playing the Decay Game* How many time steps pass before the population declines to half its original number?* How many time steps pass before the population declines to one-quarter its original number? What is the difference in the time steps between 100% and 50%, vs. 50% and 25%? Are they similar or very different?* How many times steps pass before the population declines to about 36.8% of its original number? We'll come back to this strange question in a bit. * **Go back to the game and play it again. Try answering these questions. Don't be afraid to reset the game and take a number of trials to answer each question. Average the results of your trials for each question.** ###Code # Data can go here: time_to_half=2.5 time_to_quarter=(5.0+4.5+4.5)/3 time_to_368=3.5 print(time_to_half/math.log(2)) ###Output _____no_output_____ ###Markdown My Observations* I observed that it took about 2.5 time steps to get to 50% of the population surviving.* I observed that it took about 4.5-5.0 times steps to get to 25% of the population surviving. * I note that the time step gap between 100% and 50% is about the same as the time step gap between 50% and 25%, within the limits of the precision of this game.* I observed that it took about 3.5 steps to get to 36.8% of the population. The Calculus of Spontaneous Decay How does $\Delta N(t+dt)$ relate to $N(t)$? ###Code # This code plots the change per time step against the number remaining at the beginning of that step N = [] dN = [] for i in np.arange(0, len(muon_counts)-1,1): N.append(muon_counts[i]) dN.append(muon_counts[i+1] - muon_counts[i]) #print(N, dN) figure, axis = plt.subplots(figsize=(5,5)) plt.scatter(N, dN) axis.set_xlim(N[0], 0) plt.ylabel('$\Delta N$') plt.xlabel('N') plt.grid(which='both') plt.tight_layout() ###Output _____no_output_____
Chapter11/LSTM_working_details.ipynb
###Markdown https://colab.research.google.com/drive/1McyZC6yB1mbUqfpUQXccvTTReSwAm04e ###Code from keras.preprocessing.text import one_hot from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Dense from keras.layers import Flatten from keras.layers.recurrent import SimpleRNN from keras.layers.embeddings import Embedding from keras.layers import LSTM import numpy as np from keras.utils import to_categorical #define documents docs = ['this is','is an'] # define class labels labels = ['an','example'] from collections import Counter counts = Counter() for i,review in enumerate(docs+labels): counts.update(review.split()) words = sorted(counts, key=counts.get, reverse=True) vocab_size=len(words) word_to_int = {word: i for i, word in enumerate(words, 1)} encoded_docs = [] for doc in docs: encoded_docs.append([word_to_int[word] for word in doc.split()]) encoded_labels = [] for label in labels: encoded_labels.append([word_to_int[word] for word in label.split()]) # pad documents to a max length of 2 words max_length = 2 padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='pre') print(padded_docs) # processing the output dataset one_hot_encoded_labels = to_categorical(encoded_labels, num_classes=5) print(one_hot_encoded_labels) # define the model embed_length=1 max_length=2 model = Sequential() model.add(LSTM(1,activation='tanh', return_sequences=False,recurrent_initializer='Zeros',recurrent_activation='sigmoid', input_shape=(max_length,embed_length),unroll=True)) model.add(Dense(5, activation='softmax')) # compile the model model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) # summarize the model print(model.summary()) model.fit(padded_docs.reshape(2,2,1),np.array(one_hot_encoded_labels),epochs=500) model.weights model.get_weights() padded_docs[0] model.predict(padded_docs[0].reshape(1,2,1)) model.get_weights()[0] input_t0 = 3 cell_state0 = 0 forget0 = input_t0*model.get_weights()[0][0][1] + model.get_weights()[2][1] forget1 = 1/(1+np.exp(-(forget0))) cell_state1 = forget1 * cell_state0 input_t0_1 = input_t0*model.get_weights()[0][0][0] + model.get_weights()[2][0] input_t0_2 = 1/(1+np.exp(-(input_t0_1))) input_t0_cell1 = input_t0*model.get_weights()[0][0][2] + model.get_weights()[2][2] input_t0_cell2 = np.tanh(input_t0_cell1) input_t0_cell3 = input_t0_cell2*input_t0_2 input_t0_cell4 = input_t0_cell3 + cell_state1 output_t0_1 = input_t0*model.get_weights()[0][0][3] + model.get_weights()[2][3] output_t0_2 = 1/(1+np.exp(-output_t0_1)) hidden_layer_1 = np.tanh(input_t0_cell4)*output_t0_2 input_t1 = 1 cell_state1 = input_t0_cell4 forget21 = hidden_layer_1*model.get_weights()[1][0][1] + model.get_weights()[2][1] + input_t1*model.get_weights()[0][0][1] forget_22 = 1/(1+np.exp(-(forget21))) cell_state2 = cell_state1 * forget_22 input_t1_1 = input_t1*model.get_weights()[0][0][0] + model.get_weights()[2][0] + hidden_layer_1*model.get_weights()[1][0][0] input_t1_2 = 1/(1+np.exp(-(input_t1_1))) input_t1_cell1 = input_t1*model.get_weights()[0][0][2] + model.get_weights()[2][2]+ hidden_layer_1*model.get_weights()[1][0][2] input_t1_cell2 = np.tanh(input_t1_cell1) input_t1_cell3 = input_t1_cell2*input_t1_2 input_t1_cell4 = input_t1_cell3 + cell_state2 output_t1_1 = input_t1*model.get_weights()[0][0][3] + model.get_weights()[2][3]+ hidden_layer_1*model.get_weights()[1][0][3] output_t1_2 = 1/(1+np.exp(-output_t1_1)) hidden_layer_2 = np.tanh(input_t1_cell4)*output_t1_2 final_output = hidden_layer_2 * model.get_weights()[3][0] + model.get_weights()[4] np.exp(final_output)/np.sum(np.exp(final_output)) ###Output _____no_output_____
Lesson_5/convolution_visualization/maxpooling_visualization.ipynb
###Markdown Maxpooling LayerIn this notebook, we add and visualize the output of a maxpooling layer in a CNN. A convolutional layer + activation function, followed by a pooling layer, and a linear layer (to create a desired output size) make up the basic layers of a CNN. Import the image ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # TODO: Feel free to try out your own images here by changing img_path # to a file path to another image on your computer! img_path = 'data/udacity_sdc.png' # load color image bgr_img = cv2.imread(img_path) # convert to grayscale gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY) # normalize, rescale entries to lie in [0,1] gray_img = gray_img.astype("float32")/255 # plot image plt.imshow(gray_img, cmap='gray') plt.show() ###Output _____no_output_____ ###Markdown Define and visualize the filters ###Code import numpy as np ## TODO: Feel free to modify the numbers here, to try out another filter! filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]]) print('Filter shape: ', filter_vals.shape) # Defining four different filters, # all of which are linear combinations of the `filter_vals` defined above # define four filters filter_1 = filter_vals filter_2 = -filter_1 filter_3 = filter_1.T filter_4 = -filter_3 filters = np.array([filter_1, filter_2, filter_3, filter_4]) # For an example, print out the values of filter 1 print('Filter 1: \n', filter_1) ###Output Filter 1: [[-1 -1 1 1] [-1 -1 1 1] [-1 -1 1 1] [-1 -1 1 1]] ###Markdown Define convolutional and pooling layersYou've seen how to define a convolutional layer, next is a:* Pooling layerIn the next cell, we initialize a convolutional layer so that it contains all the created filters. Then add a maxpooling layer, [documented here](http://pytorch.org/docs/stable/_modules/torch/nn/modules/pooling.html), with a kernel size of (2x2) so you can see that the image resolution has been reduced after this step!A maxpooling layer reduces the x-y size of an input and only keeps the most *active* pixel values. Below is an example of a 2x2 pooling kernel, with a stride of 2, appied to a small patch of grayscale pixel values; reducing the x-y size of the patch by a factor of 2. Only the maximum pixel values in 2x2 remain in the new, pooled output. ###Code import torch import torch.nn as nn import torch.nn.functional as F # define a neural network with a convolutional layer with four filters # AND a pooling layer of size (2, 2) class Net(nn.Module): def __init__(self, weight): super(Net, self).__init__() # initializes the weights of the convolutional layer to be the weights of the 4 defined filters k_height, k_width = weight.shape[2:] # assumes there are 4 grayscale filters self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False) self.conv.weight = torch.nn.Parameter(weight) # define a pooling layer(Here we use 2x2 MaxPool) self.pool = nn.MaxPool2d(2, 2) def forward(self, x): # calculates the output of a convolutional layer # pre- and post-activation conv_x = self.conv(x) activated_x = F.relu(conv_x) # applies pooling layer pooled_x = self.pool(activated_x) # returns all layers return conv_x, activated_x, pooled_x # instantiate the model and set the weights weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor) model = Net(weight) # print out the layer in the network print(model) ###Output Net( (conv): Conv2d(1, 4, kernel_size=(4, 4), stride=(1, 1), bias=False) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) ###Markdown Visualize the output of each filterFirst, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through. ###Code # helper function for visualizing the output of a given layer # default number of filters is 4 def viz_layer(layer, n_filters= 4): fig = plt.figure(figsize=(20, 20)) for i in range(n_filters): ax = fig.add_subplot(1, n_filters, i+1) # grab layer outputs ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray') ax.set_title('Output %s' % str(i+1)) ###Output _____no_output_____ ###Markdown Let's look at the output of a convolutional layer after a ReLu activation function is applied. ReLu activationA ReLu function turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`. ###Code # plot original image plt.imshow(gray_img, cmap='gray') # visualize all filters fig = plt.figure(figsize=(12, 6)) fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05) for i in range(4): ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[]) ax.imshow(filters[i], cmap='gray') ax.set_title('Filter %s' % str(i+1)) # convert the image into an input Tensor gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1) # get all the layers conv_layer, activated_layer, pooled_layer = model(gray_img_tensor) # visualize the output of the activated conv layer viz_layer(activated_layer) ###Output _____no_output_____ ###Markdown Visualize the output of the pooling layerThen, take a look at the output of a pooling layer. The pooling layer takes as input the feature maps pictured above and reduces the dimensionality of those maps, by some pooling factor, by constructing a new, smaller image of only the maximum (brightest) values in a given kernel area.Take a look at the values on the x, y axes to see how the image has changed size. ###Code # visualize the output of the pooling layer viz_layer(pooled_layer) ###Output _____no_output_____
02-Variaveis_Tipo_Estrutura_Dados/08-Exercicios.ipynb
###Markdown Exercícios ###Code # Exercício 1 - Imprima na tela os números de 1 a 10. Use uma lista para armazenar os números. lista = list(range(1,11)) print(lista) # Exercício 2 - Crie uma lista de 5 objetos e imprima na tela lista2 = [15, 1.6, "aline", 'carlos', ("teste")] print(lista2) # Exercício 3 - Crie duas strings e concatene as duas em uma terceira string string1 = "é muito bom aprender" string2 = "Data science" string3 = string1 + " " + string2 print(string3) # Exercício 4 - Crie uma tupla com os seguintes elementos: 1, 2, 2, 3, 4, 4, 4, 5 e depois utilize a função count do # objeto tupla para verificar quantas vezes o número 4 aparece na tupla tupla1 = (1, 2, 2, 3, 4, 4, 4, 5) tupla1.count(4) # Exercício 5 - Crie um dicionário vazio e imprima na tela dicionario = {} print(dicionario) # Exercício 6 - Crie um dicionário com 3 chaves e 3 valores e imprima na tela dicionario2 = {"key1": "Aline", "key2": 1, "Key3": 6.4} print(dicionario2) # Exercício 7 - Adicione mais um elemento ao dicionário criado no exercício anterior e imprima na tela dicionario2["Key4"] = "Novo" print(dicionario2) # Exercício 8 - Crie um dicionário com 3 chaves e 3 valores. Um dos valores deve ser uma lista de 2 elementos numéricos. # Imprima o dicionário na tela. dicionario3 = {"elm1": "teste", "elm2": 52, "elm3": [1,17]} type(dicionario3["elm3"]) print(dicionario3) # Exercício 9 - Crie uma lista de 4 elementos. O primeiro elemento deve ser uma string, # o segundo uma tupla de 2 elementos, o terceiro um dcionário com 2 chaves e 2 valores e # o quarto elemento um valor do tipo float. # Imprima a lista na tela. dicionario4 = {"key1": "string", "Key2": ("tupla", 15), "key3":{"k1":"novo dict", "k2": 15}, "key4": 1.5} type(dicionario4["key4"]) print(dicionario4) # Exercício 10 - Considere a string abaixo. Imprima na tela apenas os caracteres da posição 1 a 18. frase = 'Cientista de Dados é o profissional mais sexy do século XXI' frase[0:18] ###Output _____no_output_____
examples/EarlyStopping.ipynb
###Markdown Early stopping callback stop if objective function is below a certain threshold ###Code from mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): return [p['x'] ** 2 for p in p_list] def early_stop(results): ''' stop if best objective is below 2 results: dict (same keys as dict returned by tuner.minimize/maximize) ''' return results['best_objective'] <= 2 config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() results ###Output _____no_output_____ ###Markdown stop if objective function does not improve for n iterations ###Code from mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): return [p['x'] ** 2 for p in p_list] def early_stop(results): ''' stop if best objective does not improve for 2 iterations results: dict (same keys as dict returned by tuner.minimize/maximize) ''' current_best = results['best_objective'] patience_window = results['objective_values'][-3:] return min(patience_window) > current_best config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() results ###Output _____no_output_____ ###Markdown stop if objective function does not improve for n secs ###Code import time import numpy as np from mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): time.sleep(0.5) return [p['x'] ** 2 for p in p_list] class context: previous_best = -1.0 previous_best_time = None min_improvement_secs = 0.1 def early_stop(results): ''' stop if objective does not improve for 0.1 seconds ''' current_best = results['best_objective'] current_time = time.time() if current_best == context.previous_best and \ (current_time - context.previous_best_time > context.min_improvement_secs): print("no improvement in %f seconds: stopping early." % context.min_improvement_secs) return True context.previous_best = current_best context.previous_best_time = current_time return False config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() results ###Output _____no_output_____ ###Markdown Early stopping callback stop if objective function is below a certain threshold ###Code from mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): return [p['x'] ** 2 for p in p_list] def early_stop(results): ''' stop if best objective is below 2 results: dict (same keys as dict returned by tuner.minimize/maximize) ''' return results['best_objective'] <= 2 config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() results ###Output _____no_output_____ ###Markdown stop if objective function does not improve for n iterations ###Code from mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): return [p['x'] ** 2 for p in p_list] def early_stop(results): ''' stop if best objective does not improve for 2 iterations results: dict (same keys as dict returned by tuner.minimize/maximize) ''' current_best = results['best_objective'] patience_window = results['objective_values'][-3:] return min(patience_window) > current_best config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() results ###Output _____no_output_____ ###Markdown stop if objective function does not improve for n secs ###Code import time import numpy as np from mango import Tuner param_dict = dict(x=range(-10, 10)) def objfunc(p_list): time.sleep(0.5) return [p['x'] ** 2 for p in p_list] class context: previous_best = None previous_best_time = None min_improvement_secs = 0.1 def early_stop(results): ''' stop if objective does not improve for 0.1 seconds ''' current_best = results['best_objective'] current_time = time.time() _stop = False if context.previous_best is None: context.previous_best = current_best context.previous_best_time = current_time elif current_best == context.previous_best and \ (current_time - context.previous_best_time > context.min_improvement_secs): print("no improvement in %f seconds: stopping early." % context.min_improvement_secs) _stop = True else: context.previous_best = current_best context.previous_best_time = current_time return _stop config = dict(early_stopping=early_stop) tuner = Tuner(param_dict, objfunc, conf_dict=config) results = tuner.minimize() results ###Output _____no_output_____
jupyter/dAnalysis/c_pandas_class/Ex05_groupby2_titanic.ipynb
###Markdown 타이타닉 데이타 셋 * **Survival** - 생존 여부. 0이면 사망, 1이면 생존한 것으로 간주합니다. * **Pclass** - 티켓 등급. 1등석(1), 2등석(2), 3등석(3)이 있으며, 1등석일수록 좋고 3등석일수록 좋지 않습니다. * **Sex** - 성별. 남자(male)와 여자(female)이 있습니다. * **Age** - 나이입니다. 틈틈히 빈 값이 존재하며, 소수점 값도 존재합니다. * **SibSp** - 해당 승객과 같이 탑승한 형재/자매(siblings)와 배우자(spouses)의 총 인원 수입니다. * **Parch** - 해당 승객과 같이 탑승한 부모(parents)와 자식(children)의 총 인원 수입니다. * **Ticket** - 티켓 번호입니다. 다양한 텍스트(문자열)로 구성되어 있습니다. * **Fare** - 운임 요금입니다. 소수점으로 구성되어 있습니다. * **Cabin** - 객실 번호입니다. 많은 빈 값이 존재하며, 다양한 텍스트(문자열)로 구성되어 있습니다. * **Embarked** - 선착장입니다. C는 셰르부르(Cherbourg)라는 프랑스 지역, Q는 퀸스타운(Queenstown)이라는 영국 지역, S는 사우스햄튼(Southampton)이라는 영국 지역입니다. ###Code import pandas as pd titanic = pd.read_csv('data/titanic.csv') titanic # 행, 열 확인 titanic.shape # 자료형 확인 titanic.info() # 기본통계량 확인 titanic.describe() # (1) 1등실 승객중 나이가 10세 미만인 승객 수 titanic[(titanic["Pclass"] == 1) & (titanic["Age"] < 10)] # (2) 선실 등급별 승객의 수 titanic.groupby('Pclass').count() # (3) 선실 등급별 생존자 수 titanic[titanic['Survived'] == 1].groupby('Pclass').count() """ groupby().agg() DataFrame의 groupby()는 SQL의 group by 보다 유연성이 떨어질 수 밖에 없다 그래도 DataFrame의 groupby()에서 여러 개의 컬럼에 각각 다른 집계 함수를 사용할 때 agg() 함수 이용 """ # 선실등급별 가장 많은 나이값, 평균요금 구하고자 한다면?? agg_format = {'Age':'max', 'SibSp':'sum', 'Fare':'mean'} titanic.groupby('Pclass').agg(agg_format) # 나이에 따라 15미만은 Chlid, 15이상 60세 미만은 'Adult' 그 이상은 Elderly로 구분하려면 def get_age_cate(age): cat = '' if age < 15: cat = 'Child' elif age < 60 : cat = 'Adult' else : cat = 'Elderly' return cat Age_cate = titanic['Age'].apply(get_age_cate) Age_cate # 함수를 이용하여 Age_cate 컬럼을 추가 titanic['Age_cate'] = Age_cate titanic # 선실등급별 Age_cate별 생존자 수 추출 titanic[titanic['Survived'] == 1].groupby(['Pclass', 'Age_cate']).count() ###Output _____no_output_____
train_BlazeFace256.ipynb
###Markdown set up person only VOC dataset ###Code # load files vocpath = os.path.join("..", "VOCdevkit", "VOC2007") train_img_list, train_anno_list, val_img_list, val_anno_list = make_datapath_list(vocpath, cls="person") # extend with VOC2012 vocpath = "../VOCdevkit/VOC2012" train_img_list2, train_anno_list2, _, _ = make_datapath_list(vocpath, cls="person", VOC2012=True) train_img_list.extend(train_img_list2) train_anno_list.extend(train_anno_list2) # make Dataset voc_classes = ['person'] color_mean = (104, 117, 123) # (BGR)の色の平均値 print("trainlist: ", len(train_img_list)) print("vallist: ", len(val_img_list)) ## DatasetTransformを適応 transform = DatasetTransform(input_size, color_mean) transform_anno = Anno_xml2list(voc_classes) train_dataset = VOCDataset(train_img_list, train_anno_list, phase = "train", transform=transform, transform_anno = transform_anno) val_dataset = VOCDataset(val_img_list, val_anno_list, phase="val", transform=DatasetTransform( input_size, color_mean), transform_anno=Anno_xml2list(voc_classes)) batch_size = 32 train_dataloader = data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, collate_fn=od_collate_fn, num_workers=8) val_dataloader = data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False, collate_fn=od_collate_fn, num_workers=8) dataloaders_dict = {"train": train_dataloader, "val": val_dataloader} train_dataset[0] # check operation batch_iterator = iter(dataloaders_dict["train"]) # iter images, targets = next(batch_iterator) # get first element print(images.size()) # torch.Size([4, 3, 300, 300]) print(len(targets)) print(targets[1].shape) # check targets targets[1] ###Output _____no_output_____ ###Markdown test with ssd model. ###Code from utils.blazeface import SSD256 # SSD300の設定 ssd_cfg = { 'num_classes': 2, # 背景クラスを含めた合計クラス数 'input_size': 256, # 画像の入力サイズ 'bbox_aspect_num': [4, 6], # 出力するDBoxのアスペクト比の種類 'feature_maps': [16, 8], # 各sourceの画像サイズ 'steps': [8, 16], # DBOXの大きさを決める 'min_sizes': [16, 32], # DBOXの大きさを決める 'max_sizes': [32, 100], # DBOXの大きさを決める 'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]], } net = SSD256(phase="train", cfg=ssd_cfg) # SSDのweightsを設定 def weights_init(m): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight.data) if m.bias is not None: nn.init.constant_(m.bias, 0.0) # set inits for loc and conf net.loc.apply(weights_init) net.conf.apply(weights_init) # GPUが使えるか確認 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("using:", device) print("set weights!") print(net) from utils.ssd_model import MultiBoxLoss # define loss criterion = MultiBoxLoss(jaccard_thresh=0.5,neg_pos=3, device=device) # optim import torch.optim as optim optimizer = optim.Adam(net.parameters(), lr=1e-4, weight_decay=5e-4) def get_current_lr(epoch): lr = 1e-4 for i,lr_decay_epoch in enumerate([120,180]): if epoch >= lr_decay_epoch: lr *= 0.1 return lr def adjust_learning_rate(optimizer, epoch): lr = get_current_lr(epoch) print("lr is:", lr) for param_group in optimizer.param_groups: param_group['lr'] = lr # モデルを学習させる関数を作成 def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs): # GPUが使えるかを確認 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("used device:", device) # ネットワークをGPUへ net.to(device) # ネットワークがある程度固定であれば、高速化させる torch.backends.cudnn.benchmark = True # イテレーションカウンタをセット iteration = 1 epoch_train_loss = 0.0 # epochの損失和 epoch_val_loss = 0.0 # epochの損失和 logs = [] # epochのループ for epoch in range(num_epochs+1): adjust_learning_rate(optimizer, epoch) # 開始時刻を保存 t_epoch_start = time.time() t_iter_start = time.time() print('-------------') print('Epoch {}/{}'.format(epoch+1, num_epochs)) print('-------------') # epochごとの訓練と検証のループ for phase in ['train', 'val']: if phase == 'train': net.train() # モデルを訓練モードに print('train') else: if((epoch+1) % 5 == 0): net.eval() # モデルを検証モードに print('-------------') print('val') else: # 検証は5回に1回だけ行う continue # データローダーからminibatchずつ取り出すループ for images, targets in dataloaders_dict[phase]: # GPUが使えるならGPUにデータを送る images = images.to(device) targets = [ann.to(device) for ann in targets] # リストの各要素のテンソルをGPUへ # optimizerを初期化 optimizer.zero_grad() # 順伝搬(forward)計算 with torch.set_grad_enabled(phase == 'train'): # 順伝搬(forward)計算 outputs = net(images) # 損失の計算 loss_l, loss_c = criterion(outputs, targets) loss = loss_l + loss_c # 訓練時はバックプロパゲーション if phase == 'train': loss.backward() # 勾配の計算 # 勾配が大きくなりすぎると計算が不安定になるので、clipで最大でも勾配2.0に留める nn.utils.clip_grad_value_( net.parameters(), clip_value=2.0) optimizer.step() # パラメータ更新 if (iteration % 10 == 0): # 10iterに1度、lossを表示 t_iter_finish = time.time() duration = t_iter_finish - t_iter_start print('Iteration {} || Loss: {:.4f} || 10iter: {:.4f} sec.'.format( iteration, loss.item(), duration)) t_iter_start = time.time() epoch_train_loss += loss.item() iteration += 1 # 検証時 else: epoch_val_loss += loss.item() # epochのphaseごとのlossと正解率 t_epoch_finish = time.time() print('-------------') print('epoch {} || Epoch_TRAIN_Loss:{:.4f} ||Epoch_VAL_Loss:{:.4f}'.format( epoch+1, epoch_train_loss, epoch_val_loss)) print('timer: {:.4f} sec.'.format(t_epoch_finish - t_epoch_start)) t_epoch_start = time.time() # ログを保存 log_epoch = {'epoch': epoch+1, 'train_loss': epoch_train_loss, 'val_loss': epoch_val_loss} logs.append(log_epoch) df = pd.DataFrame(logs) df.to_csv("log/log_output.csv") epoch_train_loss = 0.0 # epochの損失和 epoch_val_loss = 0.0 # epochの損失和 # ネットワークを保存する if ((epoch+1) % 10 == 0): torch.save(net.state_dict(), 'weights/blazeface256_' + str(epoch+1) + '.pth') ###Output _____no_output_____ ###Markdown start training here ###Code num_epochs = 200 train_model(net, dataloaders_dict, criterion, optimizer, num_epochs=num_epochs) ###Output used device: cuda:0 lr is: 0.0001 ------------- Epoch 1/200 ------------- train Iteration 10 || Loss: 65.4531 || 10iter: 7.2785 sec. Iteration 20 || Loss: 49.4294 || 10iter: 3.5289 sec. Iteration 30 || Loss: 64.4441 || 10iter: 3.9851 sec. Iteration 40 || Loss: 37.6062 || 10iter: 4.2480 sec. Iteration 50 || Loss: 56.9393 || 10iter: 4.0935 sec. Iteration 60 || Loss: 50.8827 || 10iter: 4.1093 sec. Iteration 70 || Loss: 60.1804 || 10iter: 4.0369 sec. Iteration 80 || Loss: 43.3787 || 10iter: 3.9698 sec. Iteration 90 || Loss: 48.7653 || 10iter: 3.9149 sec. Iteration 100 || Loss: 49.1596 || 10iter: 4.0868 sec. Iteration 110 || Loss: 39.0327 || 10iter: 4.1437 sec. Iteration 120 || Loss: 46.1667 || 10iter: 4.0960 sec. Iteration 130 || Loss: 58.0489 || 10iter: 4.1082 sec. Iteration 140 || Loss: 49.2085 || 10iter: 3.9622 sec. Iteration 150 || Loss: 48.5656 || 10iter: 4.1017 sec. Iteration 160 || Loss: 39.4221 || 10iter: 3.7092 sec. Iteration 170 || Loss: 47.2192 || 10iter: 3.7537 sec. Iteration 180 || Loss: 42.5922 || 10iter: 3.7061 sec. Iteration 190 || Loss: 40.4525 || 10iter: 3.5844 sec. Iteration 200 || Loss: 38.3090 || 10iter: 3.3327 sec. ------------- epoch 1 || Epoch_TRAIN_Loss:10714.4430 ||Epoch_VAL_Loss:0.0000 timer: 82.9053 sec. lr is: 0.0001 ------------- Epoch 2/200 ------------- train Iteration 210 || Loss: 48.8379 || 10iter: 5.7393 sec. Iteration 220 || Loss: 35.9089 || 10iter: 3.6852 sec. Iteration 230 || Loss: 37.3149 || 10iter: 3.7270 sec. Iteration 240 || Loss: 40.6810 || 10iter: 3.7706 sec. Iteration 250 || Loss: 43.4674 || 10iter: 3.6401 sec. Iteration 260 || Loss: 47.3659 || 10iter: 3.7846 sec. Iteration 270 || Loss: 36.9680 || 10iter: 3.7297 sec. Iteration 280 || Loss: 30.0206 || 10iter: 3.6035 sec. Iteration 290 || Loss: 26.0999 || 10iter: 3.6699 sec. Iteration 300 || Loss: 32.3027 || 10iter: 3.6979 sec. Iteration 310 || Loss: 36.4805 || 10iter: 3.6716 sec. Iteration 320 || Loss: 30.9623 || 10iter: 3.6738 sec. Iteration 330 || Loss: 36.0357 || 10iter: 3.6935 sec. Iteration 340 || Loss: 32.1889 || 10iter: 3.6056 sec. Iteration 350 || Loss: 38.8257 || 10iter: 3.5936 sec. Iteration 360 || Loss: 33.5170 || 10iter: 3.6954 sec. Iteration 370 || Loss: 34.8093 || 10iter: 3.7186 sec. Iteration 380 || Loss: 30.3271 || 10iter: 4.0283 sec. Iteration 390 || Loss: 39.2620 || 10iter: 3.6747 sec. Iteration 400 || Loss: 32.0571 || 10iter: 3.3872 sec. ------------- epoch 2 || Epoch_TRAIN_Loss:7498.0603 ||Epoch_VAL_Loss:0.0000 timer: 77.6139 sec. lr is: 0.0001 ------------- Epoch 3/200 ------------- train Iteration 410 || Loss: 27.7411 || 10iter: 4.3920 sec. Iteration 420 || Loss: 27.7113 || 10iter: 4.0320 sec. Iteration 430 || Loss: 32.8583 || 10iter: 3.6873 sec. Iteration 440 || Loss: 30.6177 || 10iter: 3.7061 sec. Iteration 450 || Loss: 27.9105 || 10iter: 3.8089 sec. Iteration 460 || Loss: 33.8062 || 10iter: 3.6799 sec. Iteration 470 || Loss: 30.1208 || 10iter: 3.7576 sec. Iteration 480 || Loss: 25.6990 || 10iter: 3.6587 sec. Iteration 490 || Loss: 26.9587 || 10iter: 3.6759 sec. Iteration 500 || Loss: 28.8140 || 10iter: 3.6906 sec. Iteration 510 || Loss: 26.1486 || 10iter: 3.7936 sec. Iteration 520 || Loss: 32.1701 || 10iter: 3.6670 sec. Iteration 530 || Loss: 31.0261 || 10iter: 3.8067 sec. Iteration 540 || Loss: 27.1796 || 10iter: 3.7112 sec. Iteration 550 || Loss: 24.2448 || 10iter: 3.7638 sec. Iteration 560 || Loss: 27.7339 || 10iter: 3.7543 sec. Iteration 570 || Loss: 23.3498 || 10iter: 3.6529 sec. Iteration 580 || Loss: 32.3050 || 10iter: 3.7352 sec. Iteration 590 || Loss: 27.8575 || 10iter: 3.7011 sec. Iteration 600 || Loss: 21.6059 || 10iter: 3.5214 sec. ------------- epoch 3 || Epoch_TRAIN_Loss:5652.9033 ||Epoch_VAL_Loss:0.0000 timer: 78.0294 sec. lr is: 0.0001 ------------- Epoch 4/200 ------------- train Iteration 610 || Loss: 23.8166 || 10iter: 2.9462 sec. Iteration 620 || Loss: 24.8189 || 10iter: 4.4583 sec. Iteration 630 || Loss: 26.4088 || 10iter: 3.6203 sec. Iteration 640 || Loss: 27.3839 || 10iter: 3.7240 sec. Iteration 650 || Loss: 22.4067 || 10iter: 3.7042 sec. Iteration 660 || Loss: 28.5184 || 10iter: 3.7153 sec. Iteration 670 || Loss: 29.9719 || 10iter: 3.6892 sec. Iteration 680 || Loss: 17.7375 || 10iter: 3.6219 sec. Iteration 690 || Loss: 28.9028 || 10iter: 3.7485 sec. Iteration 700 || Loss: 23.0245 || 10iter: 3.7214 sec. Iteration 710 || Loss: 22.2653 || 10iter: 3.7364 sec. Iteration 720 || Loss: 32.3164 || 10iter: 3.6569 sec. Iteration 730 || Loss: 30.7008 || 10iter: 3.7129 sec. Iteration 740 || Loss: 34.7682 || 10iter: 3.6934 sec. Iteration 750 || Loss: 27.6331 || 10iter: 3.6920 sec. Iteration 760 || Loss: 26.6333 || 10iter: 3.6563 sec. Iteration 770 || Loss: 28.2584 || 10iter: 3.7518 sec. Iteration 780 || Loss: 22.3539 || 10iter: 3.7785 sec. Iteration 790 || Loss: 22.2334 || 10iter: 3.6418 sec. Iteration 800 || Loss: 22.9183 || 10iter: 3.5588 sec. Iteration 810 || Loss: 30.4287 || 10iter: 3.3993 sec. ------------- epoch 4 || Epoch_TRAIN_Loss:5398.4665 ||Epoch_VAL_Loss:0.0000 timer: 77.6978 sec. lr is: 0.0001 ------------- Epoch 5/200 ------------- train Iteration 820 || Loss: 21.7738 || 10iter: 6.1706 sec. Iteration 830 || Loss: 24.7067 || 10iter: 3.7465 sec. Iteration 840 || Loss: 23.0346 || 10iter: 3.9047 sec. Iteration 850 || Loss: 25.1425 || 10iter: 3.8610 sec. Iteration 860 || Loss: 22.2893 || 10iter: 3.7681 sec. Iteration 870 || Loss: 24.6029 || 10iter: 3.7965 sec. Iteration 880 || Loss: 28.9262 || 10iter: 3.5842 sec. Iteration 890 || Loss: 22.1613 || 10iter: 3.6501 sec. Iteration 900 || Loss: 19.9273 || 10iter: 3.6386 sec. Iteration 910 || Loss: 23.3924 || 10iter: 3.6513 sec. Iteration 920 || Loss: 31.6503 || 10iter: 3.7264 sec. Iteration 930 || Loss: 30.3737 || 10iter: 3.6577 sec. Iteration 940 || Loss: 28.3220 || 10iter: 3.6876 sec. Iteration 950 || Loss: 29.8245 || 10iter: 3.6231 sec. Iteration 960 || Loss: 22.3884 || 10iter: 3.6718 sec. Iteration 970 || Loss: 25.8995 || 10iter: 3.7057 sec. Iteration 980 || Loss: 29.6855 || 10iter: 3.7506 sec. Iteration 990 || Loss: 21.9468 || 10iter: 3.6755 sec. Iteration 1000 || Loss: 23.9600 || 10iter: 3.6474 sec. Iteration 1010 || Loss: 25.7840 || 10iter: 3.3764 sec. ------------- val ------------- epoch 5 || Epoch_TRAIN_Loss:5264.1710 ||Epoch_VAL_Loss:1450.5376 timer: 89.4748 sec. lr is: 0.0001 ------------- Epoch 6/200 ------------- train Iteration 1020 || Loss: 22.5001 || 10iter: 5.0447 sec. Iteration 1030 || Loss: 25.0749 || 10iter: 3.7316 sec. Iteration 1040 || Loss: 29.6452 || 10iter: 3.6837 sec. Iteration 1050 || Loss: 25.7257 || 10iter: 3.6741 sec. Iteration 1060 || Loss: 27.7900 || 10iter: 3.7240 sec. Iteration 1070 || Loss: 26.8757 || 10iter: 3.6883 sec. Iteration 1080 || Loss: 20.8379 || 10iter: 3.6849 sec. Iteration 1090 || Loss: 23.5449 || 10iter: 3.6817 sec. Iteration 1100 || Loss: 23.2037 || 10iter: 3.7220 sec. Iteration 1110 || Loss: 17.9627 || 10iter: 3.7103 sec. Iteration 1120 || Loss: 28.4751 || 10iter: 3.6421 sec. Iteration 1130 || Loss: 25.7973 || 10iter: 3.6217 sec. Iteration 1140 || Loss: 27.6130 || 10iter: 3.6806 sec. Iteration 1150 || Loss: 29.4225 || 10iter: 3.7231 sec. Iteration 1160 || Loss: 30.1665 || 10iter: 3.6930 sec. Iteration 1170 || Loss: 24.2821 || 10iter: 3.6794 sec. Iteration 1180 || Loss: 25.2625 || 10iter: 3.7185 sec. Iteration 1190 || Loss: 29.1724 || 10iter: 3.6529 sec. Iteration 1200 || Loss: 24.4612 || 10iter: 3.6940 sec. Iteration 1210 || Loss: 27.7585 || 10iter: 3.4037 sec. ------------- epoch 6 || Epoch_TRAIN_Loss:5155.2018 ||Epoch_VAL_Loss:0.0000 timer: 77.3692 sec. lr is: 0.0001 ------------- Epoch 7/200 ------------- train Iteration 1220 || Loss: 22.8126 || 10iter: 3.5321 sec. Iteration 1230 || Loss: 25.3862 || 10iter: 4.2789 sec. Iteration 1240 || Loss: 22.2603 || 10iter: 3.6760 sec. Iteration 1250 || Loss: 21.1276 || 10iter: 3.6515 sec. Iteration 1260 || Loss: 30.5461 || 10iter: 3.6758 sec. Iteration 1270 || Loss: 26.6565 || 10iter: 3.8240 sec. Iteration 1280 || Loss: 23.4265 || 10iter: 3.8353 sec. Iteration 1290 || Loss: 26.0472 || 10iter: 3.7313 sec. Iteration 1300 || Loss: 24.8107 || 10iter: 3.7191 sec. Iteration 1310 || Loss: 21.1861 || 10iter: 3.6871 sec. Iteration 1320 || Loss: 21.8057 || 10iter: 3.7782 sec.
wids-datathon-2020/notebooks/7.1.0-iwong-feature-eng.ipynb
###Markdown Read Data ###Code import pandas as pd from pathlib import Path import pickle from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelEncoder import bisect import numpy as np from itertools import combinations input_filepath = '../data/interim/' output_filepath = '../data/processed/' # cols BINARY_COLS = Path.cwd().joinpath(input_filepath).joinpath('binary-cols.pickle') CATEGORICAL_COLS = Path.cwd().joinpath(input_filepath).joinpath('categorical-cols.pickle') CONTINUOUS_COLS = Path.cwd().joinpath(input_filepath).joinpath('continuous-cols.pickle') TARGET_COL = Path.cwd().joinpath(input_filepath).joinpath('target-col.pickle') BINARY_COLS_OUT = Path.cwd().joinpath(output_filepath).joinpath('binary-cols.pickle') CATEGORICAL_COLS_OUT = Path.cwd().joinpath(output_filepath).joinpath('categorical-cols.pickle') CONTINUOUS_COLS_OUT = Path.cwd().joinpath(output_filepath).joinpath('continuous-cols.pickle') TARGET_COL_OUT = Path.cwd().joinpath(output_filepath).joinpath('target-col.pickle') # data TRAIN_CSV = Path.cwd().joinpath(input_filepath).joinpath('train.csv') VAL_CSV = Path.cwd().joinpath(input_filepath).joinpath('val.csv') TEST_CSV = Path.cwd().joinpath(input_filepath).joinpath('test.csv') TRAIN_CSV_OUT = Path.cwd().joinpath(output_filepath).joinpath('train.csv') VAL_CSV_OUT = Path.cwd().joinpath(output_filepath).joinpath('val.csv') TEST_CSV_OUT = Path.cwd().joinpath(output_filepath).joinpath('test.csv') # metadata BINARY_ENCODERS = Path.cwd().joinpath(output_filepath).joinpath('binary-encoders.pickle') CATEGORICAL_ENCODERS = Path.cwd().joinpath(output_filepath).joinpath('categorical-encoders.pickle') TARGET_ENCODERS = Path.cwd().joinpath(output_filepath).joinpath('target-encoders.pickle') CONTINUOUS_SCALERS = Path.cwd().joinpath(output_filepath).joinpath('continuous-scalers.pickle') def read_obj(path): with open(path, 'rb') as f: return pickle.load(f) return None binary_cols = read_obj(BINARY_COLS) categorical_cols = read_obj(CATEGORICAL_COLS) continuous_cols = read_obj(CONTINUOUS_COLS) target_col = read_obj(TARGET_COL) train = pd.read_csv(TRAIN_CSV) val = pd.read_csv(VAL_CSV) test = pd.read_csv(TEST_CSV) [x for x in categorical_cols if 'id' in x] +[x for x in continuous_cols if 'id' in x] len(list(combinations(categorical_cols, 2))) len(list(combinations(binary_cols, 2))) len(list(combinations(continuous_cols, 2))) ###Output _____no_output_____ ###Markdown Label Engineering ###Code pair_cols = ['ethnicity', 'gender', 'hospital_admit_source', 'icu_admit_source', 'icu_stay_type', 'icu_type', 'apache_3j_bodysystem', 'apache_2_bodysystem'] cmbs = list(combinations(pair_cols, 2)) len(list(combinations(pair_cols, 3))) def concat_columns(df, columns): value = df[columns[0]].astype(str) + ' ' for col in columns[1:]: value += df[col].astype(str) + ' ' return value len(cmbs) combo_cols = list() for cols in cmbs: col_name = f'paired_{"_".join(cols)}' combo_cols.append(col_name) train[col_name] = concat_columns(train, cols) val[col_name] = concat_columns(val, cols) test[col_name] = concat_columns(test, cols) categorical_cols.extend(combo_cols) combo_cols = list() for cols in list(combinations(binary_cols, 2)): col_name = f'paired_{"_".join(cols)}' combo_cols.append(col_name) train[col_name] = concat_columns(train, cols) val[col_name] = concat_columns(val, cols) test[col_name] = concat_columns(test, cols) categorical_cols.extend(combo_cols) # aggregate icu train['hospital_admit_source_is_icu'] = train['hospital_admit_source'].apply( lambda x: 'True' if x in [ 'Other ICU', 'ICU to SDU', 'ICU' ] else 'False') val['hospital_admit_source_is_icu'] = val['hospital_admit_source'].apply( lambda x: 'True' if x in [ 'Other ICU', 'ICU to SDU', 'ICU' ] else 'False') test['hospital_admit_source_is_icu'] = test['hospital_admit_source'].apply( lambda x: 'True' if x in [ 'Other ICU', 'ICU to SDU', 'ICU' ] else 'False') categorical_cols.append('hospital_admit_source_is_icu') # aggregate ethnicity common_cols = [np.nan, 'Other/Unknown'] train['ethnicity_is_unknown'] = train['ethnicity'].apply(lambda x: True if x in common_cols else False) val['ethnicity_is_unknown'] = val['ethnicity'].apply(lambda x: True if x in common_cols else False) test['ethnicity_is_unknown'] = test['ethnicity'].apply(lambda x: True if x in common_cols else False) categorical_cols.append('ethnicity_is_unknown') # aggregate cardiac common_cols = ['CTICU', 'CCU-CTICU', 'Cardiac ICU', 'CSICU'] train['icu_type_is_cardiac'] = train['icu_type'].apply(lambda x: True if x in common_cols else False) val['icu_type_is_cardiac'] = val['icu_type'].apply(lambda x: True if x in common_cols else False) test['icu_type_is_cardiac'] = test['icu_type'].apply(lambda x: True if x in common_cols else False) categorical_cols.append('icu_type_is_cardiac') # aggregate apache_2_bodysystem common_cols = ['Undefined Diagnoses', np.nan, 'Undefined diagnoses'] train['apache_2_bodysystem_is_undefined'] = train['apache_2_bodysystem'].apply(lambda x: True if x in common_cols else False) val['apache_2_bodysystem_is_undefined'] = val['apache_2_bodysystem'].apply(lambda x: True if x in common_cols else False) test['apache_2_bodysystem_is_undefined'] = test['apache_2_bodysystem'].apply(lambda x: True if x in common_cols else False) categorical_cols.append('apache_2_bodysystem_is_undefined') ###Output _____no_output_____ ###Markdown Typify ###Code train[continuous_cols] = train[continuous_cols].astype('float32') val[continuous_cols] = val[continuous_cols].astype('float32') test[continuous_cols] = test[continuous_cols].astype('float32') train[categorical_cols] = train[categorical_cols].astype('str').astype('category') val[categorical_cols] = val[categorical_cols].astype('str').astype('category') test[categorical_cols] = test[categorical_cols].astype('str').astype('category') train[binary_cols] = train[binary_cols].astype('str').astype('category') val[binary_cols] = val[binary_cols].astype('str').astype('category') test[binary_cols] = test[binary_cols].astype('str').astype('category') train[target_col] = train[target_col].astype('str').astype('category') val[target_col] = val[target_col].astype('str').astype('category') test[target_col] = test[target_col].astype('str').astype('category') ###Output _____no_output_____ ###Markdown Dropna ###Code train = train.dropna(how='all') val = val.dropna(how='all') test = test.dropna(how='all') confounding_cols = ['apache_4a_hospital_death_prob', 'apache_4a_icu_death_prob'] # remove confounding vars - biases and undue variance for x in confounding_cols: continuous_cols.remove(x) train = train[[target_col] + continuous_cols + categorical_cols + binary_cols] val = val[[target_col] + continuous_cols + categorical_cols + binary_cols] test = test[[target_col] + continuous_cols + categorical_cols + binary_cols] ###Output _____no_output_____ ###Markdown Fill Data ###Code def fill(df, cols, fillers=None): if None is fillers: fillers = dict() for col in cols: for hospital_id in df['hospital_id'].unique(): subset = df[df['hospital_id'] == hospital_id] hospital_col_key = f'{col}_{hospital_id}' if hospital_col_key not in fillers: fillers[hospital_col_key] = subset[col].dropna().median() fillers[col] = df[col].dropna().median() for col in cols: print(f'fillling {col}') for hospital_id in df['hospital_id'].unique(): print(f'fillling {col} - {hospital_id}') subset = df[df['hospital_id'] == hospital_id] hospital_col_key = f'{col}_{hospital_id}' if hospital_col_key in fillers: df.loc[df['hospital_id'] == hospital_id, col] = subset[col].fillna(fillers[hospital_col_key]) else: df.loc[df['hospital_id'] == hospital_id, col] = subset[col].fillna(fillers[col]) df.loc[df['hospital_id'] == hospital_id, f'{col}_na'] = pd.isnull(subset[col]) return df, fillers train, fillers = fill(train, continuous_cols) val, _ = fill(val, continuous_cols, fillers) test, _ = fill(test, continuous_cols, fillers) categorical_cols.extend([f'{x}_na' for x in continuous_cols]) ###Output _____no_output_____ ###Markdown Normalize ###Code def normalize(df, cols, scalers=None): if None is scalers: scalers = dict() for col in cols: if col not in scalers: scalers[col] = StandardScaler(with_mean=True, with_std=True) scalers[col].fit(df[col].values.reshape(-1,1)) scaler = scalers[col] df[col] = scaler.transform(df[col].values.reshape(-1,1)) return df, scalers train, scalers = normalize(train, continuous_cols) val, _ = normalize(val, continuous_cols, scalers) test, _ = normalize(test, continuous_cols, scalers) train[continuous_cols].head() ###Output _____no_output_____ ###Markdown Label Encode ###Code train[categorical_cols] = train[categorical_cols].astype('str').astype('category') val[categorical_cols] = val[categorical_cols].astype('str').astype('category') test[categorical_cols] = test[categorical_cols].astype('str').astype('category') def labelencode(df, cols, encoders=None, unknown_value='UNK'): if None is encoders: encoders = dict() for col in cols: if col not in encoders: le = LabelEncoder() le.fit(df[col].values) # add unknown val to cats cats = le.classes_.tolist() bisect.insort_left(cats, unknown_value) # redefine cats on le le.classes_ = np.asarray(cats) encoders[col] = le le = encoders[col] df[col] = df[col].map(lambda x: unknown_value if x not in le.classes_ else x) df[col] = le.transform(df[col].values) return df, encoders train, label_encoders = labelencode(train, categorical_cols) val, _ = labelencode(val, categorical_cols, label_encoders) test, _ = labelencode(test, categorical_cols, label_encoders) train[categorical_cols].head() ###Output _____no_output_____ ###Markdown One-Hot Encode ###Code # todo - not necessary with CatBoost, plut CBoost will tune the cats which will become ohe train, ohe_encoders = labelencode(train, binary_cols) val, _ = labelencode(val, binary_cols, ohe_encoders) test, _ = labelencode(test, binary_cols, ohe_encoders) train[binary_cols].head() ###Output _____no_output_____ ###Markdown Label Encode Targets ###Code train, target_encoders = labelencode(train, [target_col]) val, _ = labelencode(val, [target_col], target_encoders) test, _ = labelencode(test, [target_col], target_encoders) train[target_col].head() ###Output _____no_output_____ ###Markdown Persist Data and Metadata ###Code def pickle_obj(path, obj): with open(path, 'wb') as f: pickle.dump(obj, f) # cols pickle_obj(BINARY_COLS_OUT, binary_cols) pickle_obj(CATEGORICAL_COLS_OUT, categorical_cols) pickle_obj(CONTINUOUS_COLS_OUT, continuous_cols) pickle_obj(TARGET_COL_OUT, target_col) # metadata pickle_obj(BINARY_ENCODERS, ohe_encoders) pickle_obj(CATEGORICAL_ENCODERS, label_encoders) pickle_obj(TARGET_ENCODERS, target_encoders) pickle_obj(CONTINUOUS_SCALERS, scalers) # data train.to_csv(TRAIN_CSV_OUT, index=False) val.to_csv(VAL_CSV_OUT, index=False) test.to_csv(TEST_CSV_OUT, index=False) ###Output _____no_output_____