path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
Improving Performance with Algorithm Tuning.ipynb
###Markdown Improving Performance with Algorithm Tuning* Models are parametrized so that their behaviour can be tuned for a given problem.* Models can have many parameters and finding the best combination of parameters can be treated as a search problem.* Algorithm tuning is a final step in the process of applied machine learning before finalizing our model.* Phrased as a search problem we can use different search strategies to find a good and robust parameter or set of parameters for an algorithm on a given problem. Two simple methods for algorithm parameter tuning : 1. **Grid Search Parameter Tuning**2. **Random Search Parameter Tuning** 1. Grid Search Parameter Tuning* It will methodically build and evaluate a model for each combination of algorithm parameters specified in a grid. ###Code # Grid Search for Algorithm Tuning import numpy from pandas import read_csv from sklearn.linear_model import Ridge from sklearn.model_selection import GridSearchCV filename = 'pima-indians-diabetes.data.csv' names = ['preg','plas','pres','skin','test','mass','pedi','age','class'] dataframe = read_csv(filename,names=names) array = dataframe.values X = array[:,0:8] Y = array[:,8] alphas = numpy.array([1,0.1,0.01,0.001,0.0001,0]) param_grid = dict(alpha=alphas) model = Ridge() grid = GridSearchCV(estimator=model,param_grid=param_grid) grid.fit(X,Y) print(grid.best_score_) print(grid.best_estimator_.alpha) ## alpha =1 achieved best score ###Output 0.2761084412929244 1.0 ###Markdown 2. Random Search Parameter Tuning* It will sample algorithm parameters from a random distribution(uniform) for a fixed number of iterations.* A model is constructed and evaluated for each combination of parameters chosen. ###Code # Randomized for Algorithm Tuning import numpy from pandas import read_csv from scipy.stats import uniform from sklearn.linear_model import Ridge from sklearn.model_selection import RandomizedSearchCV param_grid = {'alpha' : uniform()} model = Ridge() rsearch = RandomizedSearchCV(estimator=model,param_distributions=param_grid,n_iter=100,random_state=None) rsearch.fit(X,Y) print(rsearch.best_score_) print(rsearch.best_estimator_.alpha) ###Output 0.2761083261885774 0.9971291645907566
TESS_Prep.ipynb
###Markdown At this point, I like to open the sheet and clean it up:1. Delete the empty "Sheet1" default table, or else rename it and repurpose it for whatever (e.g., pasting in a Gaia EDR3 cross-match).2. Go to "Targets" and bold the column names, then go to View>Freeze>1row to lock it in place so you can see columns while scrolling down. You can do these things from python here, but I haven't coded that in. ###Code # Once the Sheet is populated, you can download the TESS FFI data. This will create a number of subfolders in the project folder. # I'm setting a brighter threshold to reduce the number of files downloaded while testing tess_tools.download_tess(project_name,mag_cut=16.0) # This will use the FFIs to create CPM light curves, saved as *csv files stored in the Projects/[project]/CPM/ folder. tess_tools.make_cpm(project_name) # This will use the FFIs to create SAP light curves (simple aperture photometry), saved as *csv files stored in the Projects/[project]/SAP/ folder. tess_tools.make_sap(project_name) ###Output _____no_output_____
src/stacking/notebooks/cross_section/RealOperation/zz800_run_realtime.ipynb
###Markdown 基础因子加alpha191实时计算---中证800 中证800选股策略 ###Code %matplotlib inline import sys sys.path.append('../') sys.path.append('../../') sys.path.append('../../../') sys.path.append('../../../../') import pandas as pd import numpy as np import seaborn as sns from matplotlib import pyplot as plt from PyFin.api import * from alphamind.api import * from conf.models import * from conf.config import* from data.engines.model import Record from alphamind.execution.naiveexecutor import NaiveExecutor from stacking import factor_store, feature_list from optimization.bayes_optimization_xgb import * pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('max_colwidth',100) # 指定需要执行的日期和保存的文件夹 # start_date = '2010-01-19' # 训练集的起始时间 start_date = '2019-10-18' # 训练集的起始时间 end_date = '2019-12-27' # 设置保存的文件目录 weekly = '800st' # s1: 周一, s2: 周二, s3: 周三, s4: 周四, s5: 周五 universe = Universe('zz500')+ Universe('hs300') freq = '5b' benchmark_code = 906 ref_dates = makeSchedule(start_date, end_date, freq, 'china.sse') horizon = map_freq(freq) industry_name = 'sw' industry_level = 1 # 根据索引删除某些日期 trash_date_list = [datetime(2020, 1, 1, 0, 0)] a = [] for i in trash_date_list: try: del(ref_dates[ref_dates.index(i)]) except: continue # 前一个调仓日, 用于获取前一个调仓日的持仓信息 ref_date_pre = ref_dates[-2] # 当前调仓日 ref_date = ref_dates[-1] # 因子数据库 data_source = alpha_db engine = SqlEngine(data_source) ref_dates # uqer因子列表 basic_factor_store = factor_store.basic_factor_store # alpha191因子列表 alpha_factor_store = factor_store.alpha_factor_store %%time # 提取Uqer因子 basic_factor_org = engine.fetch_factor_range(universe, basic_factor_store, dates=ref_dates) # 提取alpha191因子 # alpha191_factor_org = engine.fetch_factor_range(universe, # alpha_factor_store, # dates=ref_dates, # used_factor_tables=[Alpha191]).drop(['chgPct','secShortName'], axis=1) # # 合并所有的因子 # factor_data_org = pd.merge(basic_factor_org, alpha191_factor_org, on=['trade_date', 'code'], how='outer') factor_data_org = basic_factor_org set(factor_data_org['trade_date']) len(factor_data_org) assert len(set(factor_data_org['trade_date'])) == len(ref_dates) # 因子预处理 ## 确失值填充 factor_mean = factor_data_org.mean() factor_std = factor_data_org.std() factor_data_org = factor_data_org.fillna(factor_mean) %%time # 获取行业数据 industry = engine.fetch_industry_range(universe, dates=ref_dates) # factor_data = pd.merge(factor_data_org, industry, on=['trade_date', 'code']).fillna(0.) factor_data = pd.merge(factor_data_org, industry, on=['trade_date', 'code']) # 获取风险因子 risk_total = engine.fetch_risk_model_range(universe, dates=ref_dates)[1] %%time return_data = engine.fetch_dx_return_range(universe, dates=ref_dates, horizon=horizon, offset=0,benchmark = benchmark_code) return_data.tail() %%time benchmark_total = engine.fetch_benchmark_range(dates=ref_dates, benchmark=benchmark_code) industry_total = engine.fetch_industry_matrix_range(universe, dates=ref_dates, category=industry_name, level=industry_level) train_data = pd.merge(factor_data, return_data, on=['trade_date', 'code']).dropna() len(train_data) # 获取特征名 # uqer feature features = list(basic_factor_store.keys()) # alpha features # alpha_features = list(alpha_factor_store.keys()) # features = feature_list.uqer_features # alpha_features = feature_list.alpha_features # features.extend(alpha_features) label = ['dx'] from datetime import datetime, timedelta from models.m1_xgb import * from conf.configuration import xgb_conf from data.engines.model import Record import xgboost as xgb import gc alpha_logger.info('{0} is start'.format(ref_date)) # machine learning model ## Filter Training data ## 训练集构造 trade_date_pre = ref_date - timedelta(days=1) # trade_date_pre_80 = ref_date - timedelta(days=80) ## 1、选择调仓日当天之前(不含当天)并且在80天以内的因子数据作为训练集. # train = train_data[(train_data.trade_date <= trade_date_pre) & (trade_date_pre_80 <= train_data.trade_date)].dropna() ## 2、选择调仓日当天之前(不含当天)的因子数据作为训练集. train = train_data[train_data.trade_date <= trade_date_pre].dropna() alpha_logger.info('trade_date_pre {0}'.format(trade_date_pre)) if len(train) <= 0: alpha_logger.info('{0} HAS NO TRAIN DATA!!!'.format(ref_date)) x_train = train[features] y_train = train[label] alpha_logger.info('len_x_train: {0}, len_y_train: {1}'.format(len(x_train.values), len(y_train.values))) alpha_logger.info('X_train.shape={0}, X_test.shape = {1}'.format(np.shape(x_train), np.shape(y_train))) # load xgboost regression configuration xgb_conf.xgb_config_r() xgb_conf.cv_folds = None xgb_conf.early_stop_round = 100 xgb_conf.max_round = 800 xgb_conf.params.update({'nthread': 12}) GPU_device = False if GPU_device: # use GPUs xgb_conf.params.update({'tree_method': 'gpu_hist'}) alpha_logger.info("params before: {}".format(xgb_conf.params)) tic = time.time() # hyper_parameters optimization # opt_parameters = {'max_depth': (2, 12), # 'gamma': (0.001, 10.0), # 'min_child_weight': (0, 20), # 'max_delta_step': (0, 10), # 'subsample': (0.01, 0.99), # 'colsample_bytree': (0.01, 0.99) # } # opt_xgb = BayesOptimizationXGB('regression', x_train, y_train) # params_op = opt_xgb.train_opt(opt_parameters) # xgb_conf.params.update(params_op) alpha_logger.info("params after: {}".format(xgb_conf.params)) alpha_logger.info("hyper params optimize time : {}".format(time.time() - tic)) # model training xgb_model = XGBooster(xgb_conf) alpha_logger.info('xgb_model params: \n{0}'.format(xgb_model.get_params())) best_score, best_round, best_model = xgb_model.fit(x_train, y_train) alpha_logger.info('Training time cost {}s'.format(time.time() - tic)) alpha_logger.info('best_score = {}, best_round = {}'.format(best_score, best_round)) # 取调仓日当天的因子数据作为输入. # total_data_test_excess = train_data[train_data.trade_date == str(ref_date)] total_data_test_excess = factor_data[factor_data.trade_date == ref_date] if len(total_data_test_excess) <=0: alpha_logger.info('{} HAS NO DATA!!!'.format(ref_date)) sys.exit() alpha_logger.info('{0} total_data_test_excess: {1}'.format(ref_date, len(total_data_test_excess))) # 获取调仓日当天的行业, 风险模型和基准权重数据 industry_matrix = industry_total[industry_total.trade_date == ref_date] benchmark_weight = benchmark_total[benchmark_total.trade_date == ref_date] risk_matrix = risk_total[risk_total.trade_date == ref_date] total_data = pd.merge(industry_matrix, benchmark_weight, on=['code'], how='left').fillna(0.) total_data = pd.merge(total_data, risk_matrix, on=['code']) alpha_logger.info('{0} type_of_total_data: {1}'.format(ref_date, type(total_data))) alpha_logger.info('{0} shape_of_total_data: {1}'.format(ref_date, np.shape(total_data))) total_data_test_excess = pd.merge(total_data, total_data_test_excess, on=['code']) alpha_logger.info('{0} len_of_total_data_test_excess: {1}'.format(ref_date, len(total_data_test_excess))) # 股票代码 codes = total_data_test_excess.code.values.tolist() # predict # alpha_logger.info('total_data_test_excess: \n{}'.format(total_data_test_excess[['weight', 'code', 'industry']])) x_pred = total_data_test_excess[features] predict_xgboost = xgb_model.predict(best_model, x_pred) # alpha_logger.info('predict_xgboost: {}'.format(predict_xgboost)) a = np.shape(predict_xgboost) predict_xgboost = np.reshape(predict_xgboost, (a[0], -1)).astype(np.float64) alpha_logger.info('shape_of_predict_xgboost: {}'.format(np.shape(predict_xgboost))) # 收益率预测结果 predict_xgboost_df = pd.DataFrame({'xgb_pre': list(predict_xgboost.reshape(-1))}) predict_xgboost_df['trade_date'] = ref_date predict_xgboost_df['code'] = codes predict_xgboost_df['code'] = predict_xgboost_df['code'].apply(lambda x: "{:06d}".format(x) + '.XSHG' if len(str(x))==6 and str(x)[0] in '6' else "{:06d}".format(x) + '.XSHE') from data.engines.sqlengine import SQLEngine # 获取当前持仓 record_engine = SQLEngine('sqlite:///./{}/real_tune_record_without_alpha.db'.format(weekly)) try: pos_record = record_engine.fetch_record('pos_record') previous_pos = pos_record[pos_record['trade_date'] == ref_date_pre] except Exception as e: alpha_logger.info('pos_record Exception:{0}'.format(e)) previous_pos = pd.DataFrame({'trade_date':[], 'weight':[],'industry':[], 'er':[],'code':[]}) alpha_logger.info('previous_pos_data: {0}, pos_len: {1}'.format(ref_date_pre, len(previous_pos))) # 股票过滤, 组合优化之前过滤掉(未完成) ## 9:00--9:25之间进行涨跌停股票的实时筛选 # 导入昨持仓并与股票池中所有股票合并, if len(previous_pos) <= 0: current_position = None else: previous_pos = total_data_test_excess[['code']].merge(previous_pos, on=['code'], how='left',).fillna(0) current_position = previous_pos.weight.values alpha_logger.info('previous_pos:\n {}'.format(previous_pos)) # previous_pos = total_data_test_excess[['code']].merge(previous_pos, on=['code'], how='left').fillna(0) # current_position = previous_pos.weight.values # print(current_position.shape) # print(total_data_test_excess.shape) # print(previous_pos.shape) current_position # Constraintes settings industry_names = industry_list(industry_name, industry_level) constraint_risk = ['EARNYILD', 'LIQUIDTY', 'GROWTH', 'SIZE', 'SIZENL', 'BETA', 'MOMENTUM'] + industry_names filter_names = industry_names = ['银行','房地产','保险','证券','多元金融'] total_risk_names = constraint_risk + ['benchmark', 'total'] b_type = [] l_val = [] u_val = [] for name in total_risk_names: if name == 'benchmark': b_type.append(BoundaryType.RELATIVE) l_val.append(0.0) u_val.append(1.0) elif name == 'total': b_type.append(BoundaryType.ABSOLUTE) l_val.append(-0.0) u_val.append(0.0) elif name == 'SIZE': b_type.append(BoundaryType.ABSOLUTE) l_val.append(-1.0) u_val.append(1.0) elif name == 'SIZENL': b_type.append(BoundaryType.ABSOLUTE) l_val.append(-1.0) u_val.append(1.0) elif name in industry_names: b_type.append(BoundaryType.ABSOLUTE) l_val.append(-0.005) u_val.append(0.005) else: b_type.append(BoundaryType.ABSOLUTE) l_val.append(-2.0) u_val.append(2.0) bounds = create_box_bounds(total_risk_names, b_type, l_val, u_val) benchmark_w = total_data_test_excess.weight.values alpha_logger.info('type_of_benchmark_w: {}, shape_of_benchmark_w: {}'.format(type(benchmark_w), np.shape(benchmark_w))) is_in_benchmark = (benchmark_w > 0.).astype(float).reshape((-1, 1)) # 风险模型数据合并 weight_gap = 0.02 total_risk_exp = np.concatenate([total_data_test_excess[constraint_risk].values.astype(float), is_in_benchmark, np.ones_like(is_in_benchmark)], axis=1) alpha_logger.info('shape_of_total_risk_exp_pre: {}'.format(np.shape(total_risk_exp))) total_risk_exp = pd.DataFrame(total_risk_exp, columns=total_risk_names) alpha_logger.info('shape_of_total_risk_exp: {}'.format(np.shape(total_risk_exp))) constraints = LinearConstraints(bounds, total_risk_exp, benchmark_w) alpha_logger.info('constraints: {0} in {1}'.format(np.shape(constraints.risk_targets()), ref_date)) lbound = np.maximum(0., benchmark_w - weight_gap) ubound = weight_gap + benchmark_w alpha_logger.info('lbound: {0} in {1}'.format(np.shape(lbound), ref_date)) alpha_logger.info('ubound: {0} in {1}'.format(np.shape(ubound), ref_date)) # 组合优化 executor = NaiveExecutor() current_pos = pd.DataFrame() target_pos, _ = er_portfolio_analysis(predict_xgboost, total_data_test_excess['industry'].values, None, constraints, False, benchmark_w, method='risk_neutral', lbound=lbound, ubound=ubound, turn_over_target=0.5, current_position=current_position) alpha_logger.info('shape_of_target_pos: {}'.format(np.shape(target_pos))) alpha_logger.info('len_codes:{}'.format(np.shape(codes))) target_pos['code'] = codes # alpha_logger.info('target_pos: \n{}'.format(target_pos)) # 换手率计算 executor.set_current(previous_pos) turn_over_org, current_pos = executor.execute(target_pos=target_pos) alpha_logger.info('turn_over_org: {}'.format(turn_over_org)) turn_over = turn_over_org / sum(target_pos.weight.values) alpha_logger.info('turn_over: {}'.format(turn_over)) # 优化后仓位信息 current_pos['trade_date'] = ref_date alpha_logger.info('{} is finished'.format(ref_date)) # 修改code格式 # 取TOP N作为真实的下单股票 real_pos = current_pos.sort_values(by='weight', ascending=False)[:50] real_pos['weight'] = real_pos['weight'] / real_pos['weight'].sum() real_pos # 保存记录当前持仓信息, 写入数据库 previous_record = record_engine.fetch_record_meta(Record, ref_date) if len(previous_record) == 0: record_engine.write_data('pos_record', real_pos) else: record_engine.del_historical_data(Record, ref_date) # 删除同日期的历史数据 tmp_record = record_engine.fetch_record_meta(Record, ref_date) if len(tmp_record) == 0: # 删除成功 record_engine.write_data('pos_record', real_pos) else: print('{} 的数据没有删除: {}'.format(ref_date, len(previous_record))) # 生成交易记录 ## 修改code格式 real_pos['code'] = real_pos['code'].apply(lambda x: "{:06d}".format(x) + '.SH' if len(str(x))==6 and str(x)[0] in '6' else "{:06d}".format(x) + '.SZ') real_pos = real_pos.loc[:, ['code', 'weight', 'trade_date']] real_pos.rename(columns={"code": "证券代码", "weight": "持仓权重", "trade_date": "成分日期"}, inplace=True) real_pos['交易价格'] = 0 real_pos = real_pos[['证券代码', '持仓权重', '交易价格', '成分日期']].copy() real_pos.to_csv('./{}/800-base-{}.csv'.format(weekly, end_date), encoding='utf_8_sig', index=False) previous_record = record_engine.fetch_record_meta(Record, ref_date) # previous_record = previous_record[previous_record['trade_date'] == ref_date] previous_record ###Output _____no_output_____
Investigate-Shapley-Values.ipynb
###Markdown This notebook investigates Shapley Values proposed for tree based ensembles in [Consistent Individualized Feature Attribution for TreeEnsembles](https://arxiv.org/pdf/1802.03888.pdf). Table of Contents1&nbsp;&nbsp;Create dataset2&nbsp;&nbsp;XGBoost models2.1&nbsp;&nbsp;Model A2.1.1&nbsp;&nbsp;Set up data2.1.2&nbsp;&nbsp;Build model2.1.3&nbsp;&nbsp;Check predictions2.1.4&nbsp;&nbsp;Tree structure2.1.5&nbsp;&nbsp;Calculate prediction contribution2.2&nbsp;&nbsp;Model B2.2.1&nbsp;&nbsp;Set up data2.2.2&nbsp;&nbsp;Build model2.2.3&nbsp;&nbsp;Check predictions2.2.4&nbsp;&nbsp;Tree structure2.2.5&nbsp;&nbsp;Calculate prediction contribution2.3&nbsp;&nbsp;Comment3&nbsp;&nbsp;Sklean decision trees3.1&nbsp;&nbsp;Model A3.1.1&nbsp;&nbsp;MAE3.1.2&nbsp;&nbsp;MSE3.2&nbsp;&nbsp;Model B3.2.1&nbsp;&nbsp;MAE3.2.2&nbsp;&nbsp;MSE3.3&nbsp;&nbsp;Comment ###Code import pandas as pd import xgboost as xgb import pygbmexpl as pygbmexpl from sklearn.tree import DecisionTreeRegressor, plot_tree ###Output _____no_output_____ ###Markdown Create dataset ###Code data = pd.DataFrame( { 'response_a':[80,0,0,0], 'response_b':[90,10,0,0], 'fever':[1,0,1,0], 'cough':[1,1,0,0] } ) data ###Output _____no_output_____ ###Markdown XGBoost models Define params s.t. we have 'vanilla' trees - no regularisation, no scaling of learning rate. ###Code params = { 'objective': 'reg:squarederror', 'max_depth': 2, 'subsample': 1, 'colsample_bytree': 1, 'eta': 1, 'lambda': 0, 'gamma': 0, 'alpha': 0 } ###Output _____no_output_____ ###Markdown Model A Set up data ###Code data_xgb_a = xgb.DMatrix(data[['fever', 'cough']], label = data[['response_a']]) data_xgb_a.set_base_margin([0] * data.shape[0]) ###Output _____no_output_____ ###Markdown Build model ###Code xgb_model_a = xgb.train( params = params, dtrain = data_xgb_a, num_boost_round = 1 ) ###Output _____no_output_____ ###Markdown Check predictions ###Code xgb_model_a.predict(data_xgb_a) ###Output _____no_output_____ ###Markdown Tree structure ###Code xgb_model_a_df = pygbmexpl.xgb.parser.extract_model_predictions(xgb_model_a) xgb_model_a_df ###Output _____no_output_____ ###Markdown Calculate prediction contributionUsing Saabas method ###Code pygbmexpl.xgb.explainer.decompose_prediction(xgb_model_a_df, data.iloc[0]) ###Output _____no_output_____ ###Markdown Model B Set up data ###Code data_xgb_b = xgb.DMatrix(data[['fever', 'cough']], label = data[['response_b']]) data_xgb_b.set_base_margin([0] * data.shape[0]) ###Output _____no_output_____ ###Markdown Build model ###Code xgb_model_b = xgb.train( params = params, dtrain = data_xgb_b, num_boost_round = 1 ) ###Output _____no_output_____ ###Markdown Check predictions ###Code xgb_model_b.predict(data_xgb_b) ###Output _____no_output_____ ###Markdown Tree structure ###Code xgb_model_b_df = pygbmexpl.xgb.parser.extract_model_predictions(xgb_model_b) xgb_model_b_df ###Output _____no_output_____ ###Markdown Calculate prediction contributionUsing Saabas method ###Code pygbmexpl.xgb.explainer.decompose_prediction(xgb_model_b_df, data.iloc[0]) ###Output _____no_output_____ ###Markdown Comment We have reproduced the results presented in Figure 1 of [Consistent Individualized Feature Attribution for TreeEnsembles](https://arxiv.org/pdf/1802.03888.pdf). As we can see Model A attributes more importance to `cough` and Model B attributes more to `fever`. However Model A can be written as `output = [cough & fever]*80` and Model B can be written as `output = [cough & fever]*80 + [cough]*10` so 'cough' should be more important in Model B. We can also see that some of the contribution that would otherwise be attributed to the first split in each model, is attributed to the `base`. This is the starting point for each tree, here it is the average of the response as we're using squared error loss function. Sklean decision trees Note, if we used a different loss function (e.g. mean absolute error) then the base value i.e. the prediction for the root note would be different. We cannot use MAE as a loss function in xgboost as it requires derivates of the loss function which is not defined at x = 0 for MAE - however we can use the decision tree regressor from scikit-learn. ###Code tree_model_mae = DecisionTreeRegressor( criterion = 'mae', max_depth = 2, min_samples_leaf = 1, min_samples_split = 2, random_state = 100 ) tree_model_mse = DecisionTreeRegressor( criterion = 'mse', max_depth = 2, min_samples_leaf = 1, min_samples_split = 2, random_state = 100 ) ###Output _____no_output_____ ###Markdown Model A MAE ###Code tree_mae_model_a = tree_model_mae.fit(X = data[['fever', 'cough']], y = data[['response_a']]) plot_tree(tree_mae_model_a, filled = True) ###Output _____no_output_____ ###Markdown MSE ###Code tree_mse_model_a = tree_model_mse.fit(X = data[['fever', 'cough']], y = data[['response_a']]) plot_tree(tree_mse_model_a, filled = True) ###Output _____no_output_____ ###Markdown Model B MAE ###Code tree_mae_model_b = tree_model.fit(X = data[['fever', 'cough']], y = data[['response_b']]) plot_tree(tree_mae_model_b, filled = True) ###Output _____no_output_____ ###Markdown MSE ###Code tree_mse_model_b = tree_model_mse.fit(X = data[['fever', 'cough']], y = data[['response_b']]) plot_tree(tree_mse_model_b, filled = True) ###Output _____no_output_____
module2/Instacart.ipynb
###Markdown “The Instacart Online Grocery Shopping Dataset 2017”, Accessed from https://www.instacart.com/datasets/grocery-shopping-2017 on Sep 16, 2019 ###Code import numpy as np import pandas as pd #I don't need all this data. I'm commenting out the stuff I'm not using #aisles=pd.read_csv('instacart_2017_05_01/aisles.csv') #departments=pd.read_csv('instacart_2017_05_01/departments.csv') order_products1=pd.read_csv('instacart_2017_05_01/order_products__prior.csv') order_products2=pd.read_csv('instacart_2017_05_01/order_products__train.csv') orders=pd.read_csv('instacart_2017_05_01/orders.csv') products=pd.read_csv('instacart_2017_05_01/products.csv') orders.head() order_products=pd.concat([order_products1, order_products2]) order_products=order_products2.copy() order_products.drop('reordered',axis=1,inplace=True) orders.drop(['user_id','eval_set','order_number','days_since_prior_order'], axis=1, inplace=True) #this is the Df that I'm going to do my predicting on. I still need to figure out how #to create a feature that is the 'next item ordered. ' df=order_products.merge(orders, on='order_id') df.head() products.head() #nextItem=df[['nextItem']] #nextItem.merge(products, left_on='nextItem', right_on='product_id') products #this code is slow and long. Vishnu gave me a way around it def getNext(data): order_id=data[0] nextItem=data[1]+1 orderContent=df[df['order_id']==order_id] Item=orderContent.loc[orderContent['add_to_cart_order']==nextItem, 'product_id'] try: return int(Item) except: return np.NaN getNext([4,2]) ###Output _____no_output_____ ###Markdown df['NextOrder']=df[['order_id','add_to_cart_order']].apply(getNext, axis=1) ###Code df['order_id'].describe() df[df['order_id']<1000000].shape df['nextItem']=df.groupby('order_id')['product_id'].shift(-1) df=df.dropna() df.shape df.head() df.head() subset=df.loc[0:1000] df.shape ###Output _____no_output_____ ###Markdown subset[['order_id','add_to_cart_order']].apply(getNext, axis=1) ###Code #this eliminates all items that are not ordered more than 1000 times. #This helps to deal with the cardinality of the data items=df['nextItem'].value_counts() frequentItems=items[items>1000].index.tolist() df=df[df['nextItem'].isin(frequentItems)] from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier #I'm going to use a train/val/test split #this dataset is so huge that there are items that are getting put into the test and validation #sets that aren't going into train. I'm going to clean this up by getting rid of the low frequency items target='nextItem' features=df.columns.tolist()[1:5] X=df[features] y=df[target].astype('category') X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y) X_train, X_val, y_train, y_val= train_test_split(X_train, y_train, stratify=y_train) X_train.shape, X_val.shape, X_test.shape #begin with majority class baseline y_train.value_counts(normalize=True) ###Output _____no_output_____ ###Markdown The majority class is only 4.5% of the data, that should be easy enough to beat with even a little modeling. ###Code features=X_train.columns.tolist()[1:] features model=RandomForestClassifier(n_estimators=100, n_jobs=-1) evals=[(X_train, y_train),(X_val, y_val)] model.fit(X_train, y_train) model.score(X_val, y_val) ###Output _____no_output_____
examples/expert_section/notebooks/netflow.ipynb
###Markdown Infeasibility assessment with gurobipy and ticdat For this example, we'll demonstrate using `ticdat` and `gurobipy` to troubleshoot an infeasible model. In particular, we will use the combination of foregin key relationships and `obfusimplify` to rename the parent entities of the data instance. This example is based on the Gurobi [netflow model](http://www.gurobi.com/documentation/6.0/examples/netflow_py.html). We created our own version of [netflow.py](https://github.com/ticdat/ticdat/blob/master/examples/gurobipy/netflow/netflow.py) in order to turn the netflow solve engine into a modular component. The two code instances are close to identical, and are a good demonstration of the ease with which data scientists new to Python can use both `ticdat` and `gurobipy` to make readable, modular, solve engines.We begin by importing the netflow components we need to do our work. `dataFactory` defines the netflow schema, `solve` tries to solve a data instance, and `create_model` is a troubleshooting routine (and sub-step of `solve`) that creates a `gurobipy.Model` and a dictionary of variables from that `Model`. ###Code from netflowmodel import dataFactory, solve, create_model ###Output _____no_output_____ ###Markdown The `netflow.xlsx` file has data sheets that we think would populate a sound netflow data instance. Lets quickly perform the three basic integrity checks - row duplication, cross table reference failure, and data field validation. ###Code assert not dataFactory.xls.find_duplicates("netflow.xlsx") dat = dataFactory.xls.create_tic_dat("netflow.xlsx") assert not dataFactory.find_foreign_key_failures(dat) assert not dataFactory.find_data_type_failures(dat) ###Output _____no_output_____ ###Markdown So far so good! No errors. Lets do a quick look at row counts for each table. ###Code {t:len(getattr(dat, t)) for t in dataFactory.all_tables} ###Output _____no_output_____ ###Markdown This is a pretty big model. Lets take care not to display it all at once. At any rate, lets be brave, and try and solve it. But lets freeze it first, to be sure the `solve` routine doesn't inadvertently edit it's input data. ###Code dataFactory.freeze_me(dat) soln = solve(dat) ###Output Optimize a model with 25900 rows, 225001 columns and 700000 nonzeros Concurrent LP optimizer: dual simplex and barrier Showing barrier log only... Presolve removed 899 rows and 1 columns Presolve time: 0.55s Solved with barrier Solved in 0 iterations and 0.63 seconds Infeasible model ###Markdown Infreasible? Inconceivable!Ok, I suppose infeasible models are not only conceivable, but also inevitable. Now we have to troubleshoot. A good first step for infeasibility troubleshooting is creating the Irreducible Inconsistent Subsystem (IIS) of the underlying MIP model. Lucky for us, `netflowmodel` makes it easy us to turn a data instance into a `gurobipy` model, and `gurobipy` makes it easy for us to create the IIS of that model. ###Code model,_ = create_model(dat) model.computeIIS() ###Output Iteration Objective Primal Inf. Dual Inf. Time 0 0.0000000e+00 2.160368e+07 0.000000e+00 0s IIS computed: 99 constraints and 11 bounds ###Markdown 99 constraints and 11 bounds is big enough that it won't fit nicely in this notebook. Let's write out the IIS into a text file, and copy a few lines here. Again, `gurobipy` makes this so easy you hardly need to consult the docs. ###Code model.write("firstFail.ilp") ###Output _____no_output_____ ###Markdown Here are a few snippets from `firstFail.ilp`. It's not very easy to read, but it looks like a lot of conservation of flow constraints. ``` node_00000567234-H7493804610407033_111BFZK2233-7788453265: flow_00000567234-H7493804610407033_111BFZK2233-778845329_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-7788453210_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-778845321_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-778845328_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-778845323_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-778845324_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-778845325_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-778845322_111BFZK2233-7788453265 + flow_00000567234-H7493804610407033_111BFZK2233-778845326_111BFZK2233-7788453265 - forcedToZero = 415 ``` ```node_00000567234-H7493804610407033_111BFZK2233-7788453212: flow_00000567234-H7493804610407033_111BFZK2233-778845329_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-778845324_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-778845325_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-778845321_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-778845326_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-778845323_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-778845328_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-778845322_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453212 + flow_00000567234-H7493804610407033_111BFZK2233-7788453210_111BFZK2233-7788453212 - forcedToZero = 357``` And on like that. There are some constraints that have the signs reversed, like this one (I'm truncating the middle). ```node_00000567234-H7493804610407033_111BFZK2233-778845327: - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453296 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453264 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453218 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453211 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453255 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453292.... - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453241 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453258 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453251 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453234 - flow_00000567234-H7493804610407033_111BFZK2233-778845327_111BFZK2233-7788453283 + forcedToZero = -5220 ``` So what's going on here? `forcedToZero` is a placeholder variable `netflowmodel` uses in order to promote readability. It is fixed at zero by lower and upper bounds. I use it insure that each conservation of flow constraint has at least one positive and one negative variable.But the real source of confusion here is the rest of the variable and constraint names. Why are they so long and cryptic? It's not `gurobipy`'s fault - they provide options to create readable names for each variable and constraint. And it's not `netflowmodel`'s fault. That code is crafting names for the variables and constraint based on the names of the nodes and commodities in the `dat` object. The readability problem stems from the data in `netflow.xlsx`. The commodity and node names in this file aren't human readable at all. They look like they're computer codes designed for use by some ERP system.But of course they are! This data set comes from an ERP system. Ok, not literally, as this data set was home brewed for testing purposes. But we see these types of names all the time. It's a common issue when troubleshooting MIPs. Even after you go through all the work of giving good names to your MIP variables and constraints, the actual entity names of the core data obscures them with all but illegible machine codes. What would be really handy here would be a way to rename the nodes and commodities for testing purposes. If only we could make a copy of our data set that retained all the original numerical data, but substituted simple, readable names for all the entities. Then we could find this exact same infeasibility problem in the copy, and study a truly human readable `.ilp` file. (The whole point of diagnostic files and naming routines is to achieve human readability so as to create insight).Lucky for us, `ticdat` provides just such functionality. ###Code dat_better, dat_renamings = dataFactory.obfusimplify(dat) ###Output _____no_output_____ ###Markdown The relevant function here is called `obfusimplify`. It returns two objects. The first is a copy of `dat` with clean entity names. The second (`dat_renamings`) is a dictionary that maps from the new entities back to the table and entity name of the original data. For example, `'C5'` is the new name for the fifth commodity entry (which is `''00000567234-H74938046104070102'`), and `'N5'` is the new name for the fifth node entry (which is `''111BFZK2233-7788453212''`). ###Code dat_renamings['C5'] dat_renamings['N5'] ###Output _____no_output_____ ###Markdown Just to be clear, these renamings are propagating in an intelligent way throughout the `dat_better` object. ###Code [[c, n1, n2] for c, n1, n2 in dat_better.cost if n1 == 'N5' or n2 == 'N5' and c == 'C5'] [[n1, n2, v["capacity"]] for (n1, n2), v in dat_better.arcs.items() if n1 == 'N5' or n2 == 'N5'] ###Output _____no_output_____ ###Markdown How does `obfusimplify` know how to correctly populate the secondary tables like `arcs` and `cost`? It takes advantage of the foreign key relationships that `netflowmodel` created when it built `dataFactory`. In the [diet example](https://github.com/ticdat/ticdat/blob/master/examples/expert_section/notebooks/diet.ipynb) we saw how the small investment of defining these relationships yielded big dividends in recognizing a data set with a misspelled entry. Here, we see how performing this relatively easy task (which really just documents the nature of the input data) can help us troubleshoot an infeasible model with unreadable names.Now that we have a renamed data set, we can generate a `.ilp` file with better names. ###Code model,_ = create_model(dat_better) model.computeIIS() model.write("betterFail.ilp") ###Output _____no_output_____ ###Markdown Now let's take a look at the `betterFail.ilp` file. ```node_C257_N63: flow_C257_N1_N63 + flow_C257_N57_N63 + flow_C257_N46_N63 + flow_C257_N24_N63 + flow_C257_N13_N63 + flow_C257_N90_N63 + flow_C257_N35_N63 + flow_C257_N79_N63 + flow_C257_N68_N63 + flow_C257_N2_N63 - forcedToZero = 415``` ```node_C257_N5: flow_C257_N1_N5 + flow_C257_N57_N5 + flow_C257_N35_N5 + flow_C257_N46_N5 + flow_C257_N2_N5 + flow_C257_N24_N5 + flow_C257_N13_N5 + flow_C257_N68_N5 + flow_C257_N90_N5 + flow_C257_N79_N5 - forcedToZero = 357``` ```node_C257_N68: - flow_C257_N68_N88 - flow_C257_N68_N81 - flow_C257_N68_N15 - flow_C257_N68_N6 - flow_C257_N68_N64 - flow_C257_N68_N53 - flow_C257_N68_N54 - flow_C257_N68_N48 - flow_C257_N68_N62 - flow_C257_N68_N19 - flow_C257_N68_N12 - flow_C257_N68_N3 - flow_C257_N68_N26 - flow_C257_N68_N56 - flow_C257_N68_N39 - flow_C257_N68_N50 - flow_C257_N68_N76 - flow_C257_N68_N43 - flow_C257_N68_N87 - flow_C257_N68_N52 - flow_C257_N68_N16 - flow_C257_N68_N94 - flow_C257_N68_N7 - flow_C257_N68_N69 - flow_C257_N68_N72 - flow_C257_N68_N47 - flow_C257_N68_N30 - flow_C257_N68_N22 - flow_C257_N68_N65 - flow_C257_N68_N91... - flow_C257_N68_N5 - flow_C257_N68_N70 - flow_C257_N68_N45 - flow_C257_N68_N89 - flow_C257_N68_N84 - flow_C257_N68_N82 - flow_C257_N68_N67 - flow_C257_N68_N14 - flow_C257_N68_N93 - flow_C257_N68_N9 - flow_C257_N68_N23 - flow_C257_N68_N34 - flow_C257_N68_N20 - flow_C257_N68_N75 - flow_C257_N68_N40 - flow_C257_N68_N49 - flow_C257_N68_N37 - flow_C257_N68_N32 - flow_C257_N68_N63 - flow_C257_N68_N11 - flow_C257_N68_N97 - flow_C257_N68_N38 - flow_C257_N68_N27 - flow_C257_N68_N86 - flow_C257_N68_N25 - flow_C257_N68_N28 - flow_C257_N68_N71 - flow_C257_N68_N44 - flow_C257_N68_N55 - flow_C257_N68_N66 + forcedToZero = -5220``` Now we're getting somewhere. These constraints and variables names are fit for man, not machine. This is something I can study for a while and actually get a feel for what's going on. The first thing that jumps out is that all the flow conservation constraints involve commodity `C257`. I wonder if there is an aggregate `inflow` imbalance there? (I.e. total supply, total demand mismatch).There is no reason to check for this problem exclusively for `C257`. Lets whip out a little Python function that checks all aggregate `inflow` imbalances for all commodities for a given data set. ###Code from collections import defaultdict def find_flow_imbalance(dat): rtn = defaultdict(float) for (k,n),v in dat.inflow.items(): rtn[k] += v["quantity"] return {k:v for k,v in rtn.items() if abs(v) > 0} find_flow_imbalance(dat_better) ###Output _____no_output_____ ###Markdown Sure enough, there is an aggregate `inflow` imbalance for commodity `C257`. There is also one for commodity `C15`. Why did the `betterFail.ilp` file describe one and not the other? The answer is it wouldn't be able to put the imbalance for both commodities into the same IIS, since they exist independently of each other. An IIS that captured both at the same time would fail the IIS requirement that removing just one constraint from the IIS results in a feasible sub-model.Just for fun, let's look at the `inflow` imbalances for the original data. ###Code find_flow_imbalance(dat) ###Output _____no_output_____ ###Markdown If you pay attention to the commodity name endings, you should be able to confirm by inspection that the `obfusimplify` did its job correctly. ###Code dat_renamings['C15'] dat_renamings['C257'] ###Output _____no_output_____
Principal Component Analysis (PCA)/Principal_Component_Analysis.ipynb
###Markdown Principal Component Analysis Task 2: Load the Data and Libraries--- ###Code %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns plt.style.use("ggplot") plt.rcParams["figure.figsize"] = (12,8) # data URL: https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data ###Output _____no_output_____
notebooks/HOT_global_stats.ipynb
###Markdown Get summaries of a lot of projects ###Code summaries_file = os.path.join(get_data_dir(), 'summaries.json') summaries = {} with open(summaries_file) as f: summaries = json.load(f) for project_id in tqdm(range(5636, 5650)): if project_id not in summaries or summaries[project_id]['status'] != 'ARCHIVED': summaries[project_id] = download_summary_data(project_id) time.sleep(0.5 + random.random()) for project_id in tqdm(range(5650, 6100)): if project_id not in summaries or summaries[project_id]['status'] != 'ARCHIVED': summaries[project_id] = download_summary_data(project_id) time.sleep(0.5 + random.random()) with open(summaries_file, 'w') as outfile: json.dump(summaries, outfile) ###Output _____no_output_____ ###Markdown Data extraction and cleaning ###Code summary_df = pd.DataFrame() for project_id in summaries: if 'Error' in summaries[project_id]: print(str(project_id) + ' : ' + summaries[project_id]['Error']) continue summary_df = pd.concat([summary_df, pd.DataFrame(data=[(project_id, summaries[project_id]['created'], summaries[project_id]['lastUpdated'], summaries[project_id]['status'], summaries[project_id]['percentValidated'], summaries[project_id]['organisationTag'], summaries[project_id]['projectArea(in sq.km)'])], columns=['project_id', 'created', 'lastUpdated', 'status', 'percentValidated', 'organisation', 'area'])], axis=0, ignore_index=True) summary_df = summary_df.set_index('project_id') summary_df['created'] = pd.to_datetime(summary_df['created']) summary_df['lastUpdated'] = pd.to_datetime(summary_df['lastUpdated']) summary_df.info() summary_df.head() ###Output _____no_output_____ ###Markdown Qualitative analysis ###Code summary_df.hist('percentValidated') summary_df['status'].unique() summary_df['organisation'].unique() summary_df['organisation'] = summary_df['organisation'].apply(lambda s: 'MSF' if s=='Médecins Sans Frontières' else s) summary_df['organisation'].unique() ###Output _____no_output_____ ###Markdown Restrict to S1 2019 Definitions of the restriction- lastUpdated after 1st January 2019- lastUpdated before 1st July 2019- validated > 90% ###Code summary_df[summary_df['organisation'] == 'CartONG'] summary_df[summary_df['organisation'] == 'AIT'] df = summary_df[(summary_df['lastUpdated'] > '2019-01-01') & (summary_df['lastUpdated'] < '2019-07-19') & (summary_df['percentValidated'] >= 90)] df.head() df['organisation'].unique() pd.DataFrame(df.groupby('organisation').count().area.sort_values(ascending=False)) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), dpi=100, sharex=True) g = df.groupby('organisation').count().area.sort_values() ax.bar(np.arange(len(g)), g.values, color=['black' if index == 'CartONG' else 'lightgray' for index in g.index]) plt.xticks(np.arange(len(g)), g.index, rotation=90) ax.set_xlabel('Organisations') ax.set_ylabel('Number of finished projects') ax.set_title('Number of finished projects in S1 2019') plt.savefig('finished_project_nb.png', dpi=100) df.groupby('organisation').sum().sort_values('area', ascending=False) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), dpi=100, sharex=True) g = df.groupby('organisation').sum().sort_values('area') ax.bar(np.arange(len(g)), g['area'], color=['black' if index == 'CartONG' else 'lightgray' for index in g.index]) plt.xticks(np.arange(len(g)), g.index, rotation=90) ax.set_xlabel('Organisations') ax.set_ylabel('Area in km²') ax.set_title('Total area of finished projects in S1 2019') plt.savefig('finished_project_total_area.png', dpi=100) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), dpi=100, sharex=True) g = df.groupby('organisation').sum().sort_values('area')[:6] ax.bar(np.arange(len(g)), g['area'], color=['black' if index == 'CartONG' else 'lightgray' for index in g.index]) plt.xticks(np.arange(len(g)), g.index, rotation=90) ax.set_xlabel('Organisations') ax.set_ylabel('Area in km²') ax.set_title('[ZOOM IN] Total area of finished projects in S1 2019') plt.savefig('finished_project_total_area_bis.png', dpi=100) df.groupby('organisation').mean().sort_values('area', ascending=False) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), dpi=100, sharex=True) g = df.groupby('organisation').mean().sort_values('area') ax.bar(np.arange(len(g)), g['area'], color=['black' if index == 'CartONG' else 'lightgray' for index in g.index]) plt.xticks(np.arange(len(g)), g.index, rotation=90) ax.set_xlabel('Organisations') ax.set_ylabel('Area in km²') ax.set_title('Mean area of finished projects in S1 2019') plt.savefig('finished_project_mean_area.png', dpi=100) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), dpi=100, sharex=True) g = df.groupby('organisation').mean().sort_values('area')[:4] ax.bar(np.arange(len(g)), g['area'], color=['black' if index == 'CartONG' else 'lightgray' for index in g.index]) plt.xticks(np.arange(len(g)), g.index, rotation=90) ax.set_xlabel('Organisations') ax.set_ylabel('Area in km²') ax.set_title('[ZOOM IN] Mean area of finished projects in S1 2019') plt.savefig('finished_project_mean_area_bis.png', dpi=100) ###Output _____no_output_____
datashader-work/datashader-examples/user_guide/7_Networks.ipynb
###Markdown The point and line-segment plotting provided by Datashader can be put together in different ways to visualize specific types of data. For instance, network graph data, i.e., networks of nodes connected by edges, can very naturally be represented by points and lines. Here we will show examples of using Datashader's graph-specific plotting tools, focusing on how to visualize very large graphs while allowing any portion of the rendering pipeline to replaced with components suitable for specific problems.First, we'll import the packages we are using and demonstrating here. ###Code import math import numpy as np import pandas as pd import datashader as ds import datashader.transfer_functions as tf from datashader.layout import random_layout, circular_layout, forceatlas2_layout from datashader.bundling import connect_edges, hammer_bundle from itertools import chain ###Output _____no_output_____ ###Markdown Graph (node) layoutSome graph data is inherently spatial, such as connections between geographic locations, and these graphs can simply be plotted by connecting each location with line segments. However, most graphs are more abstract, with nodes having no natural position in space, and so they require a "layout" operation to choose a 2D location for each node before the graph can be visualized. Unfortunately, choosing such locations is an [open-ended problem involving a complex set of tradeoffs and complications](http://www.hiveplot.com).Datashader provides a few tools for doing graph layout, while also working with external layout tools. As a first example, let's generate a random graph, with 100 points normally distributed around the origin and 20000 random connections between them: ###Code np.random.seed(0) n=100 m=20000 nodes = pd.DataFrame(["node"+str(i) for i in range(n)], columns=['name']) nodes.tail() edges = pd.DataFrame(np.random.randint(0,len(nodes), size=(m, 2)), columns=['source', 'target']) edges.tail() ###Output _____no_output_____ ###Markdown Here you can see that the nodes list is a columnar dataframe with an index value and name for every node. The edges list is a columnar dataframe listing the index of the source and target in the nodes dataframe. To make this abstract graph plottable, we'll need to choose an x,y location for each node. There are two simple and fast layout algorithms included: ###Code circular = circular_layout(nodes, uniform=False) randomloc = random_layout(nodes) randomloc.tail() cvsopts = dict(plot_height=400, plot_width=400) def nodesplot(nodes, name=None, canvas=None, cat=None): canvas = ds.Canvas(**cvsopts) if canvas is None else canvas aggregator=None if cat is None else ds.count_cat(cat) agg=canvas.points(nodes,'x','y',aggregator) return tf.spread(tf.shade(agg, cmap=["#FF3333"]), px=3, name=name) tf.Images(nodesplot(randomloc,"Random layout"), nodesplot(circular, "Circular layout")) ###Output _____no_output_____ ###Markdown The circular layout provides an option to distribute the nodes randomly along the circle or evenly, and here we've chosen the former.The two layouts above ignore the connectivity structure of the graph, focusing only on the nodes. The [ForceAtlas2](http://journals.plos.org/plosone/article/file?id=10.1371/journal.pone.0098679&type=printable) algorithm is a more complex approach that treats connections like physical forces (a force-directed approach) in order to construct a layout for the nodes based on the network connectivity: ###Code %time forcedirected = forceatlas2_layout(nodes, edges) tf.Images(nodesplot(forcedirected, "ForceAtlas2 layout")) ###Output _____no_output_____ ###Markdown This algorithm is designed to place densely connected nodes closer to each other, but of course we will only be able to evaluate how well it has done so once we plot edges (below). Edge rendering/bundlingAssuming that we have a suitable layout for the nodes, we can now plot the connections between them. There are currently two bundling algorithms provided: drawing a line directly between any connected nodes (``connect_edges``), and an iterative "bundling" algorithm ``hammer_bundle`` (a variant of [Hurter, Ersoy, & Telea, ECV-2012](http://www.cs.rug.nl/~alext/PAPERS/EuroVis12/kdeeb.pdf)) that allows edges to curve and then groups nearby ones together to help convey structure. Rendering direct connections should be very quick, even for large graphs, but bundling can be quite computationally intensive. ###Code def edgesplot(edges, name=None, canvas=None): canvas = ds.Canvas(**cvsopts) if canvas is None else canvas return tf.shade(canvas.line(edges, 'x','y', agg=ds.count()), name=name) def graphplot(nodes, edges, name="", canvas=None, cat=None): if canvas is None: xr = nodes.x.min(), nodes.x.max() yr = nodes.y.min(), nodes.y.max() canvas = ds.Canvas(x_range=xr, y_range=yr, **cvsopts) np = nodesplot(nodes, name + " nodes", canvas, cat) ep = edgesplot(edges, name + " edges", canvas) return tf.stack(ep, np, how="over", name=name) cd = circular fd = forcedirected %time cd_d = graphplot(cd, connect_edges(cd,edges), "Circular layout") %time fd_d = graphplot(fd, connect_edges(fd,edges), "Force-directed") %time cd_b = graphplot(cd, hammer_bundle(cd,edges), "Circular layout, bundled") %time fd_b = graphplot(fd, hammer_bundle(fd,edges), "Force-directed, bundled") tf.Images(cd_d,fd_d,cd_b,fd_b).cols(2) ###Output _____no_output_____ ###Markdown The four examples above plot the same network structure by either connecting the nodes directly with lines or bundling the connections, and by using a random layout or a force-directed layout. As you can see, these options have a big effect on the resulting visualization. Here we'll look more closely at the bundling algorithm, using a simple example where we know the structure: a single node at the center, with random points on a circle around it that connect to the central node (a star graph topology): ###Code n = 75 np.random.seed(0) x = np.random.random(n) snodes = pd.DataFrame(np.stack((np.cos(2*math.pi*x), np.sin(2*math.pi*x))).T, columns=['x','y']) snodes.iloc[0] = (0.0,0.0) sedges = pd.DataFrame(list(zip((range(1,n)),[0]*n)),columns=['source', 'target']) star = snodes,sedges tf.Images(graphplot(snodes, connect_edges(*star),"Star"), graphplot(snodes, hammer_bundle(*star),"Star bundled")) ###Output _____no_output_____ ###Markdown Here you can see the bundling algorithm forms groups of nearby connnections, which helps make the structure at a particular scale clear. The scale of this structure, i.e., how much bundling is done, is determined by an effective "bandwidth", which is a combination of an `initial_bandwidth` parameter and a `decay` time constant for annealing this bandwidth over time: ###Code %%time grid = [graphplot(snodes, hammer_bundle(*star, iterations=5, decay=decay, initial_bandwidth=bw), "d={:0.2f}, bw={:0.2f}".format(decay, bw)) for decay in [0.1, 0.25, 0.5, 0.9] for bw in [0.1, 0.2, 0.5, 1]] tf.Images(*grid).cols(4) ###Output _____no_output_____ ###Markdown Starting from the bottom left and moving diagonally to the upper right, the scale of the bundling increases along a diagonal to the upper right, with higher initial bandwidth and higher decay time constants leading to larger-scale bundling. For the largest decay time constant, the algorithm has failed to converge for large initial bandwidths (the bw 0.5 and 1.0 plots on the bottom row), because the algorithm stops at a specified maximum `iterations`, rather than reaching a fully organized state.Of course, even when the algorithm does converge, larger amounts of bundling can magnify small amounts of clumping over large scales, which may or may not be relevant to the questions being asked of this data, so it is important to set these parameters appropriately for the types of structures of interest.<!--max_iterations=10hmap = hv.HoloMap({(it, bw, decay): hv.Curve(hammer_bundle(nodes.data, edges.data, decay=decay, initial_bandwidth=bw, iterations=it)) for decay in [0.1, 0.25, 0.5, 1, 2] for bw in [0.1, 0.2, 0.5, 1] for it in range(max_iterations)}, kdims=['Iteration', 'Initial bandwidth', 'Decay']) nodes_ds = datashade(nodes,cmap=["cyan"])datashade(hmap.grid(['Initial bandwidth', 'Decay']), **sz).map(lambda e_ds: e_ds * nodes, hv.DynamicMap)--> Graphs with categoriesOne of the main uses for visualizations of large graphs is to examine the connectivity patterns from nodes of different categories. Let's consider an artificial example with four groups of highly interconnected nodes: ###Code np.random.seed(1) cats,n,m = 4,80,1000 cnodes = pd.concat([ pd.DataFrame.from_records([("node"+str(i+100*c),"c"+str(c)) for i in range(n)], columns=['name','cat']) for c in range(cats)], ignore_index=True) cnodes.cat=cnodes.cat.astype('category') cedges = pd.concat([ pd.DataFrame(np.random.randint(n*c,n*(c+1), size=(m, 2)), columns=['source', 'target']) for c in range(cats)], ignore_index=True) ###Output _____no_output_____ ###Markdown The ``cnodes`` and ``cedges`` data structures form a graph that has clear structure not visible in a random layout, but is easily extracted using the force-directed approach: ###Code rd = random_layout( cnodes, cedges) fd = forceatlas2_layout(cnodes, cedges) %time rd_d = graphplot(rd, connect_edges(rd,cedges), "Random layout", cat="cat") %time fd_d = graphplot(fd, connect_edges(fd,cedges), "Force-directed", cat="cat") %time rd_b = graphplot(rd, hammer_bundle(rd,cedges), "Random layout, bundled", cat="cat") %time fd_b = graphplot(fd, hammer_bundle(fd,cedges), "Force-directed, bundled",cat="cat") tf.Images(rd_d,fd_d,rd_b,fd_b).cols(2) ###Output _____no_output_____ ###Markdown As you can see, the highly interconnected subgroups are laid out in separate locations in the plane, mostly non-overlapping, allowing these groups to be detected visually in a way that they aren't in a random layout, with or without bundling. Using graphs from NetworkXThe above examples constructed networks by hand. A convenient way to get access to a large number of [graph types](https://networkx.github.io/documentation/stable/reference/generators.html) is the separate [NetworkX](https://networkx.readthedocs.io) package. Here, we will select several standard graph structures, lay them each out in the same fixed circular shape using NetworkX, and then show how they will appear without bundling, with moderate levels of bundling, and with high amounts of bundling. ###Code import networkx as nx def ng(graph,name): graph.name = name return graph def nx_layout(graph): layout = nx.circular_layout(graph) data = [[node]+layout[node].tolist() for node in graph.nodes] nodes = pd.DataFrame(data, columns=['id', 'x', 'y']) nodes.set_index('id', inplace=True) edges = pd.DataFrame(list(graph.edges), columns=['source', 'target']) return nodes, edges def nx_plot(graph, name=""): print(graph.name, len(graph.edges)) nodes, edges = nx_layout(graph) direct = connect_edges(nodes, edges) bundled_bw005 = hammer_bundle(nodes, edges) bundled_bw030 = hammer_bundle(nodes, edges, initial_bandwidth=0.30) return [graphplot(nodes, direct, graph.name), graphplot(nodes, bundled_bw005, "Bundled bw=0.05"), graphplot(nodes, bundled_bw030, "Bundled bw=0.30")] n=50 plots = [nx_plot(g) for g in [ng(nx.complete_graph(n), name="Complete"), ng(nx.lollipop_graph(n, 5), name="Lollipop"), ng(nx.barbell_graph(n,2), name="Barbell"), ng(nx.ladder_graph(n), name="Ladder"), ng(nx.circular_ladder_graph(n), name="Circular Ladder"), ng(nx.star_graph(n), name="Star"), ng(nx.cycle_graph(n), name="Cycle")]] tf.Images(*chain.from_iterable(plots)).cols(3) ###Output _____no_output_____ ###Markdown As you can see, both bundled and unbundled representations reflect important aspects of the graph structure, but the bundling results do depend on the parameters chosen. Bundling is also very computationally expensive; nearly all of the time taken to render these plots is for the bundling step.Note that the `star_graph` example above differs from the one in the previous sections, in that all nodes here connect to a node on the outer circle instead of one in the center, which shows clearly how the layout can affect the resulting visualization. Interactive graphsThe above plots all show static images of nodes and edges, with optional category information, but there's no way to see the specific identity of individual nodes. With small numbers of nodes you can try coloring them to convey identity, but in general the only practical way to reveal identity of nodes or edges is typically interactively, as a user inspects individual items. Thus interactive plots are often necessary for doing any exploration of real-world graph data.The simplest way to work with interactive datashaded graphs is to use [HoloViews](http://holoviews.org), which includes specific support for [plotting graphs with and without Datashader](http://holoviews.org/user_guide/Network_Graphs.html): ###Code import holoviews.operation.datashader as hd import holoviews as hv hv.extension("bokeh") circle = hv.Graph(edges, label='Bokeh edges').opts(node_size=5) hnodes = circle.nodes.opts(size=5) dscirc = (hd.dynspread(hd.datashade(circle))*hnodes).relabel("Datashader edges") circle + dscirc ###Output _____no_output_____
42_ETEC.ipynb
###Markdown ###Code from datetime import date, datetime import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression def yday(dt): return dt.timetuple().tm_yday yday(date.today()) df = pd.DataFrame() dates = [ "04-11-2019", "21-11-2019", "28-11-2019", "17-12-2019", "27-12-2019", "17-02-2020", "20-02-2020", "01-07-2020", "23-07-2020", "12-08-2020", "03-09-2020", "14-10-2020", "20-10-2020", ] level = [ 0, 1.23, 1.69, 2.03, 2.30, 3.11, 3.31, 3.90, 4.04, 4.81, 5.04, 5.86, 6.60 ] fdates =[datetime.strptime(dt, '%d-%m-%Y') for dt in dates] beginning = fdates[0] fdates ndates = [yday(x) + (x.year *365 - 2019*365) - 308 for x in fdates] ndates plt.plot(ndates, grades) lr = LinearRegression() lr.fit(np.array(level).reshape(-1, 1), np.array(ndates).reshape(-1, 1)) cursus_length_in_days = lr.predict(np.array([21]).reshape(-1,1)) cursus_length_in_days = cursus_length_in_days[0][0] print(int(cursus_length_in_days/365)) ##years print(int(cursus_length_in_days % 365)) ##days ## warning: this is the total length of the cursus, from lvl 0 to lvl 21, you can change level[] and dates[] to get your result #days remaining: days_remaining = cursus_length_in_days - yday(beginning) print("You will get to level 21 in ", int(days_remaining/365), " years and ", int(days_remaining%365), " days") ###Output _____no_output_____
jupyter/mcpi.ipynb
###Markdown Monte-Carlo Algorithm for Estimating $\pi$ in PythonThis notebook will provide a demonstration of how to estimate $\pi$ using a Monte-Carlo simulation algorithm. You can find a nice article [Here](https://www.cantorsparadise.com/estimating-%CF%80-using-monte-carlo-simulations-3459a84b5ef9) that explains how it works (check under *The Easy Way: The Unit Square and the Unit Circle*). ###Code import numpy as np import matplotlib.pyplot as plt import math ###Output _____no_output_____ ###Markdown First, we choose the number of tosses: ###Code n_tosses = 10000 ###Output _____no_output_____ ###Markdown Then, we randomly generate x and y corrdinates between 0 and 1: ###Code x = np.random.rand(n_tosses) y = np.random.rand(n_tosses) ###Output _____no_output_____ ###Markdown Now, we count the number of points inside the circle: ###Code n_in_circle = 0 for i in range(0, n_tosses - 1): if (x[i]**2 + y[i]**2 <= 1): n_in_circle += 1 ###Output _____no_output_____ ###Markdown And here is our estimated $\pi$: ###Code pi_estimate = 4 * (n_in_circle / n_tosses) print("π =", pi_estimate) ###Output π = 3.13 ###Markdown The percent error is: ###Code print(100 * math.fabs(math.pi - pi_estimate) / math.pi, "%") circle_x = x[np.sqrt(x**2 + y**2) <= 1] circle_y = y[np.sqrt(x**2 + y**2) <= 1] fig = plt.figure() plot = fig.add_subplot(111) plot.scatter(x, y, marker='.', color='blue') plot.scatter(circle_x, circle_y, marker='.', color='red') x = np.linspace(0, 1, 100) y = np.sqrt(1 - x**2) plot.plot(x, y, color='black') plot.set_aspect(1.0) plt.rcParams['figure.figsize'] = [20, 20] plt.show() ###Output _____no_output_____
msa/models/mnist_dataset/.ipynb_checkpoints/customized_nn_mnist-checkpoint.ipynb
###Markdown Customized Convolutional Neural Network Hyperparameters Sampling on MNIST Dataset- Show network architectures (optimization + Hyperparameter tunning)- Basically Hyperparameters tunning- Applied Dataset Normalization Techniques- Analysis with integrated Tensorboard and Pandas ###Code !pip install tensorflow !pip install tensorboard import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter import time import pandas as pd from IPython.display import display from IPython.display import clear_output import simplejson as json torch.set_printoptions(linewidth=120) torch.set_grad_enabled(True) from collections import OrderedDict from collections import namedtuple from itertools import product ###Output _____no_output_____ ###Markdown Non Batch Normalization Network ###Code # Class to create customized network (this is determined by the user) class CustomedNetwork(nn.Module): # CONSTRUCTOR def __init__(self): """Initialize 5 distinct layers of the network for building forward step """ super().__init__() # Convolutional layers self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1) self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5, stride=1) # Fully connection layers self.fc1 = nn.Linear(in_features=12*4*4, out_features=120, bias=True) self.fc2 = nn.Linear(in_features = 120, out_features=60, bias=True) self.out = nn.Linear(in_features = 60, out_features=10, bias=True) # PUBLIC METHOD def forward(self, x): """Forward propagation of the Customed Neural Network Parameters ---------- x: input batch of images """ # Input layers x = x # Convolution layer 1 x = self.conv1(x) x = F.relu(x) x = F.max_pool2d(x, kernel_size=2, stride=2) # Convolution layer 2 x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, kernel_size=2, stride=2) # Process input from convolution input to 1 input for fully connected layer x = x.reshape(-1, 12*4*4) # Linear layer 1 x = self.fc1(x) x = F.relu(x) # Linear layer 2 x = self.fc2(x) x = F.relu(x) # Output layer x = self.out(x) return x customed_net = CustomedNetwork() print(customed_net) ###Output CustomedNetwork( (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1)) (conv2): Conv2d(6, 12, kernel_size=(5, 5), stride=(1, 1)) (fc1): Linear(in_features=192, out_features=120, bias=True) (fc2): Linear(in_features=120, out_features=60, bias=True) (out): Linear(in_features=60, out_features=10, bias=True) ) ###Markdown Batch Normalzation Network ###Code batch_norm_network = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2), nn.BatchNorm2d(6), # batch norm nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2), nn.Flatten(start_dim=1), nn.Linear(in_features=12*4*4, out_features=120), nn.ReLU(), nn.BatchNorm1d(120), # batch norm 1 d since we already flatten out our images nn.Linear(in_features=120, out_features=60), nn.ReLU(), nn.Linear(in_features=60, out_features=10) ) print(batch_norm_network) # Class to create an object to run the surveying parameters lists combinations class RunBuilder(): @staticmethod def get_runs(params): """Get the lists of parameters' values Parameters ---------- params: list of parameters contained of different related parameters """ Run = namedtuple("Run", params.keys()) runs_list = [] for value in product(*params.values()): runs_list.append(Run(*value)) return runs_list # Class Run Manage that run the surveys of combinations of values of the RunBuilder() object class RunManager(): # CONSTRUCTORS def __init__(self): """Initialize parameters """ self.epoch_count = 0 self.epoch_loss = 0 self.epoch_num_correct = 0 self.epoch_start_time = None self.run_params = None self.run_count = 0; self.run_data = [] self.run_start_time = None self.network = None self.loader = None self.tb = None # Tensorboard # PUBLIC METHODS def begin_run(self, run, network, loaders): """Start running the values combinations surveys Parameters ---------- run: run list network: neural network loader: DataLoader - basically preprocessed data objects """ self.run_start_time = time.time() # used for keep track of run time self.run_params = run self.run_count += 1 self.network = network self.loader = loader self.tb = SummaryWriter(comment=f"-{run}") images, labels = next(iter(self.loader)) # get the first batch of images and labels grid = torchvision.utils.make_grid(images) self.tb.add_image("image", grid) # Try CUDA self.tb.add_graph(self.network, images.to(getattr(run, "device", "cpu"))) def end_run(self): """End runningthe values combinations surveys """ self.tb.close() # close tensorboard self.epoch_count = 0 # reinitialized the epoch def begin_epoch(self): """Begin the epoch, initialize related variables """ self.epoch_start_time = time.time() self.epoch_count += 1 self.epoch_loss = 0 self.epoch_num_correct = 0 def end_epoch(self): """End the epoch, calculated initialized variables above """ # Calculate run time epoch_duration = time.time() - self.epoch_start_time run_duration = time.time() - self.run_start_time # Calculate the loss and accuracy of the trained dataset loss = self.epoch_loss / len(self.loader.dataset) accuracy = self.epoch_num_correct / len(self.loader.dataset) # Calculate the average loss and accuracy self.tb.add_scalar("Loss", loss, self.epoch_count) self.tb.add_scalar("Accuracy", accuracy, self.epoch_count) # Draw historgram for name, param in self.network.named_parameters(): self.tb.add_histogram(name, param, self.epoch_count) self.tb.add_histogram(f"{name}.grad", param.grad, self.epoch_count) # Build pandas to data output of tensorboard results = OrderedDict() results["run"] = self.run_count results["epoch"] = self.epoch_count results["loss"] = loss results["accuracy"] = accuracy results["epoch duration"] = epoch_duration results["run duration"] = run_duration # Add data in the DataFrames for k,v in self.run_params._asdict().items(): results[k] = v # allow us to see what results match with what param self.run_data.append(results) df = pd.DataFrame.from_dict(self.run_data, orient="columns") # Update Dataframe in .ipynb in real time clear_output(wait=True) display(df) def track_loss(self, loss): """Track the loss Parameters ---------- loss: loss of the training process of a batch """ self.epoch_loss += loss.item() * self.loader.batch_size def track_num_correct(self, preds, labels): """Track total number of correct of a batch Parameters ---------- preds: list of predictions in training process labels: list of labels given in the dataset """ self.epoch_num_correct += self._get_num_correct(preds, labels) def save(self, file_name): """Save the Dataframe to .csv file """ pd.DataFrame.from_dict( self.run_data, orient="columns" ).to_csv(f"{file_name}.csv") # save in csv # to create in tensorboard with open(f"{file_name}.json", "w", encoding="utf-8") as f: json.dump(self.run_data, f, ensure_ascii=False, indent = 4) # PRIVATE METHODS @torch.no_grad() def _get_num_correct(self, preds, labels): """Get the total number that the prediction is correct with the labels Parameters ---------- preds: list of predictions labels: list of labels Return ---------- total number that prediction and label are equal when comparing 2 lists """ return preds.argmax(dim=1).eq(labels).sum().item() ###Output _____no_output_____ ###Markdown Get Dataset - MNIST- Normalization: Standardization is a specific type of normalization technique and sometime is referred to as z-score normalization or the standard score. - z = (x-mean)/std Download Original Dataset ###Code train_set = torchvision.datasets.MNIST( root="./data/MNIST", train=True, download=True, transform=transforms.Compose([ # convert image to transforms.ToTensor() ])) print(train_set.data.size()) test_set = torchvision.datasets.MNIST( root="./data/MNIST", train = False, transform=transforms.Compose([ # convert image to transforms.ToTensor() ])) print(test_set.data.size()) ###Output torch.Size([60000, 28, 28]) torch.Size([10000, 28, 28]) ###Markdown Calculate the mean and standard of deviation for normalization ###Code loader = DataLoader(train_set, batch_size=1000, num_workers=1) # create dataloader num_of_pixels = len(train_set) * 28 * 28 # number of total pixel in the image, 28x28 = height and width of the image # Mean total_sum = 0 for batch in loader: total_sum += batch[0].sum() # total sum of all pixel in 1 image mean = total_sum / num_of_pixels # Standard of Dev sum_of_squared_error = 0 for batch in loader: sum_of_squared_error += ((batch[0]-mean).pow(2)).sum() std = torch.sqrt(sum_of_squared_error / num_of_pixels) print(mean) print(std) ###Output tensor(0.1307) tensor(0.3081) ###Markdown Create a new normalized MNIST processed dataset ###Code train_set_normal = torchvision.datasets.MNIST( root="./data/MNIST", train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean, std) ]) ) print(train_set_normal.data.size()) test_set_normal = torchvision.datasets.MNIST( root="./data/MNIST", train = False, transform=transforms.Compose([ # convert image to transforms.ToTensor(), transforms.Normalize(mean, std) ])) print(test_set_normal.data.size()) ###Output torch.Size([60000, 28, 28]) torch.Size([10000, 28, 28]) ###Markdown Create a DataLoader and analyse the batch ###Code loader = DataLoader(train_set_normal, batch_size=len(train_set), num_workers=1) data = next(iter(loader)) data[0].mean(), data[0].std() ###Output _____no_output_____ ###Markdown Create trainsets list ###Code trainsets = { 'not_normal': train_set, 'normal': train_set_normal } ###Output _____no_output_____ ###Markdown Create networks list ###Code customed_network = CustomedNetwork()# allow to try CUDA networks = { 'no_batch_norm_net': customed_network, 'batch_norm_net': batch_norm_network } ###Output _____no_output_____ ###Markdown Training & Testing Process ###Code params = OrderedDict( lr = [0.01], batch_size = [1000], num_workers = [0], shuffle = [True], device = ['cpu'], trainset = ['not_normal', 'normal'], network = list(networks.keys()), num_epochs = [2], test_accuracy = [0] ) m = RunManager() for run in RunBuilder.get_runs(params): params['test_accuracy'] = 0 # restart writing test_accuracy device = torch.device(run.device) # allow to try CUDA network = networks[run.network].to(device) # allow to try CUDA network.train() # mark network as train train_loader = torch.utils.data.DataLoader(trainsets[run.trainset], batch_size=run.batch_size, shuffle=run.shuffle, num_workers=run.num_workers) # num worker to speed up process for dataloader optimizer = optim.Adam(network.parameters(), lr=run.lr) m.begin_run(run, network, train_loader) for epoch in range(run.num_epochs): m.begin_epoch() for batch in train_loader: images = batch[0].to(device) # allow to try CUDA labels = batch[1].to(device) # allow to try CUDA preds = network(images) # pass batch loss = F.cross_entropy(preds, labels) # calculate loss optimizer.zero_grad() # zero gradient loss.backward() # back prop for calculating gradient optimizer.step() # update weights m.track_loss(loss) m.track_num_correct(preds, labels) m.end_epoch() m.end_run() if(run.trainset == 'not_normal'): # Get the testing dataset test_loader = torch.utils.data.DataLoader(test_set, batch_size=run.batch_size, shuffle=run.shuffle, num_workers=run.num_workers) network.eval() with torch.no_grad(): for batch in test_loader: images = batch[0].to(device) # allow to try CUDA labels = batch[1].to(device) # allow to try CUDA preds = network(images) # pass batch accuracy = (preds.argmax(dim=1).eq(labels).sum().item()) / float(run.batch_size) print('Test Accuracy of the not-normalized-dataset model on the 10000 test images: %.2f' % accuracy) # params['test_accuracy'] = accuracy elif(run.trainset == "normal"): # Get the testing dataset test_loader_normal = torch.utils.data.DataLoader(test_set_normal, batch_size=run.batch_size, shuffle=run.shuffle, num_workers=run.num_workers) network.eval() with torch.no_grad(): for batch in test_loader: images = batch[0].to(device) # allow to try CUDA labels = batch[1].to(device) # allow to try CUDA preds = network(images) # pass batch accuracy = (preds.argmax(dim=1).eq(labels).sum().item()) / float(run.batch_size) print('Test Accuracy of the normalized-dataset model on the 10000 test images: %.2f' % accuracy) # params['test_accuracy'] = accuracy # Just write this to another data frame then append it then save m.save("results") # Sort DataFrame by Accuracy pd.DataFrame.from_dict(m.run_data, orient="columns").sort_values("accuracy", ascending=False) ###Output _____no_output_____
03-CNN-Project-Exercise-Solutions.ipynb
###Markdown CNN-Project-Exercise-SolutionsWe'll be using the CIFAR-10 dataset, which is very famous dataset for image recognition! The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class. Follow the Instructions in Bold, if you get stuck somewhere, view the solutions video! Step 0: Get the Data** *Note: If you have trouble with this just watch the solutions video. This doesn't really have anything to do with the exercise, its more about setting up your data. Please make sure to watch the solutions video before posting any QA questions.* ** ** Download the data for CIFAR from here: https://www.cs.toronto.edu/~kriz/cifar.html ****Specifically the CIFAR-10 python version link: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz **** Remember the directory you save the file in! ** ###Code # Put file path as a string here CIFAR_DIR = 'cifar-10-batches-py/' ###Output _____no_output_____ ###Markdown The archive contains the files data_batch_1, data_batch_2, ..., data_batch_5, as well as test_batch. Each of these files is a Python "pickled" object produced with cPickle. ** Load the Data. Use the Code Below to load the data: ** ###Code def unpickle(file): import pickle with open(file, 'rb') as fo: cifar_dict = pickle.load(fo, encoding='bytes') return cifar_dict dirs = ['batches.meta','data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5','test_batch'] all_data = [0,1,2,3,4,5,6] for i,direc in zip(all_data,dirs): all_data[i] = unpickle(CIFAR_DIR+direc) batch_meta = all_data[0] data_batch1 = all_data[1] data_batch2 = all_data[2] data_batch3 = all_data[3] data_batch4 = all_data[4] data_batch5 = all_data[5] test_batch = all_data[6] batch_meta ###Output _____no_output_____ ###Markdown ** Why the 'b's in front of the string? **Bytes literals are always prefixed with 'b' or 'B'; they produce an instance of the bytes type instead of the str type. They may only contain ASCII characters; bytes with a numeric value of 128 or greater must be expressed with escapes.https://stackoverflow.com/questions/6269765/what-does-the-b-character-do-in-front-of-a-string-literal ###Code data_batch1.keys() ###Output _____no_output_____ ###Markdown Loaded in this way, each of the batch files contains a dictionary with the following elements:* data -- a 10000x3072 numpy array of uint8s. Each row of the array stores a 32x32 colour image. The first 1024 entries contain the red channel values, the next 1024 the green, and the final 1024 the blue. The image is stored in row-major order, so that the first 32 entries of the array are the red channel values of the first row of the image.* labels -- a list of 10000 numbers in the range 0-9. The number at index i indicates the label of the ith image in the array data.The dataset contains another file, called batches.meta. It too contains a Python dictionary object. It has the following entries:* label_names -- a 10-element list which gives meaningful names to the numeric labels in the labels array described above. For example, label_names[0] == "airplane", label_names[1] == "automobile", etc. Display a single image using matplotlib.** Grab a single image from data_batch1 and ###Code import matplotlib.pyplot as plt %matplotlib inline import numpy as np X = data_batch1[b"data"] X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("uint8") X[0].max() (X[0]/255).max() plt.imshow(X[0]) plt.imshow(X[1]) plt.imshow(X[4]) ###Output _____no_output_____ ###Markdown Helper Functions for Dealing With Data.** Use the provided code below to help with dealing with grabbing the next batch once you've gotten ready to create the Graph Session. Can you break down how it works? ** ###Code def one_hot_encode(vec, vals=10): ''' For use to one-hot encode the 10- possible labels ''' n = len(vec) out = np.zeros((n, vals)) out[range(n), vec] = 1 return out class CifarHelper(): def __init__(self): self.i = 0 self.all_train_batches = [data_batch1,data_batch2,data_batch3,data_batch4,data_batch5] self.test_batch = [test_batch] self.training_images = None self.training_labels = None self.test_images = None self.test_labels = None def set_up_images(self): print("Setting Up Training Images and Labels") self.training_images = np.vstack([d[b"data"] for d in self.all_train_batches]) train_len = len(self.training_images) self.training_images = self.training_images.reshape(train_len,3,32,32).transpose(0,2,3,1)/255 self.training_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.all_train_batches]), 10) print("Setting Up Test Images and Labels") self.test_images = np.vstack([d[b"data"] for d in self.test_batch]) test_len = len(self.test_images) self.test_images = self.test_images.reshape(test_len,3,32,32).transpose(0,2,3,1)/255 self.test_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.test_batch]), 10) def next_batch(self, batch_size): x = self.training_images[self.i:self.i+batch_size].reshape(100,32,32,3) y = self.training_labels[self.i:self.i+batch_size] self.i = (self.i + batch_size) % len(self.training_images) return x, y ###Output _____no_output_____ ###Markdown ** How to use the above code: ** ###Code # Before Your tf.Session run these two lines ch = CifarHelper() ch.set_up_images() # During your session to grab the next batch use this line # (Just like we did for mnist.train.next_batch) # batch = ch.next_batch(100) ###Output Setting Up Training Images and Labels Setting Up Test Images and Labels ###Markdown Creating the Model** Import tensorflow ** ###Code import tensorflow as tf ###Output _____no_output_____ ###Markdown ** Create 2 placeholders, x and y_true. Their shapes should be: *** x shape = [None,32,32,3]* y_true shape = [None,10] ###Code x = tf.placeholder(tf.float32,shape=[None,32,32,3]) y_true = tf.placeholder(tf.float32,shape=[None,10]) ###Output _____no_output_____ ###Markdown ** Create one more placeholder called hold_prob. No need for shape here. This placeholder will just hold a single probability for the dropout. ** ###Code hold_prob = tf.placeholder(tf.float32) ###Output _____no_output_____ ###Markdown Helper Functions** Grab the helper functions from MNIST with CNN (or recreate them here yourself for a hard challenge!). You'll need: *** init_weights* init_bias* conv2d* max_pool_2by2* convolutional_layer* normal_full_layer ###Code def init_weights(shape): init_random_dist = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(init_random_dist) def init_bias(shape): init_bias_vals = tf.constant(0.1, shape=shape) return tf.Variable(init_bias_vals) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2by2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def convolutional_layer(input_x, shape): W = init_weights(shape) b = init_bias([shape[3]]) return tf.nn.relu(conv2d(input_x, W) + b) def normal_full_layer(input_layer, size): input_size = int(input_layer.get_shape()[1]) W = init_weights([input_size, size]) b = init_bias([size]) return tf.matmul(input_layer, W) + b ###Output _____no_output_____ ###Markdown Create the Layers** Create a convolutional layer and a pooling layer as we did for MNIST. **** Its up to you what the 2d size of the convolution should be, but the last two digits need to be 3 and 32 because of the 3 color channels and 32 pixels. So for example you could use:** convo_1 = convolutional_layer(x,shape=[4,4,3,32]) ###Code convo_1 = convolutional_layer(x,shape=[4,4,3,32]) convo_1_pooling = max_pool_2by2(convo_1) ###Output _____no_output_____ ###Markdown ** Create the next convolutional and pooling layers. The last two dimensions of the convo_2 layer should be 32,64 ** ###Code convo_2 = convolutional_layer(convo_1_pooling,shape=[4,4,32,64]) convo_2_pooling = max_pool_2by2(convo_2) ###Output _____no_output_____ ###Markdown ** Now create a flattened layer by reshaping the pooling layer into [-1,8 \* 8 \* 64] or [-1,4096] ** ###Code 8*8*64 convo_2_flat = tf.reshape(convo_2_pooling,[-1,8*8*64]) ###Output _____no_output_____ ###Markdown ** Create a new full layer using the normal_full_layer function and passing in your flattend convolutional 2 layer with size=1024. (You could also choose to reduce this to something like 512)** ###Code full_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat,1024)) ###Output _____no_output_____ ###Markdown ** Now create the dropout layer with tf.nn.dropout, remember to pass in your hold_prob placeholder. ** ###Code full_one_dropout = tf.nn.dropout(full_layer_one,keep_prob=hold_prob) ###Output _____no_output_____ ###Markdown ** Finally set the output to y_pred by passing in the dropout layer into the normal_full_layer function. The size should be 10 because of the 10 possible labels** ###Code y_pred = normal_full_layer(full_one_dropout,10) ###Output _____no_output_____ ###Markdown Loss Function** Create a cross_entropy loss function ** ###Code cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_pred)) ###Output _____no_output_____ ###Markdown Optimizer** Create the optimizer using an Adam Optimizer. ** ###Code optimizer = tf.train.AdamOptimizer(learning_rate=0.001) train = optimizer.minimize(cross_entropy) ###Output _____no_output_____ ###Markdown ** Create a variable to intialize all the global tf variables. ** ###Code init = tf.global_variables_initializer() ###Output _____no_output_____ ###Markdown Graph Session** Perform the training and test print outs in a Tf session and run your model! ** ###Code with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(5000): batch = ch.next_batch(100) sess.run(train, feed_dict={x: batch[0], y_true: batch[1], hold_prob: 0.5}) # PRINT OUT A MESSAGE EVERY 100 STEPS if i%100 == 0: print('Currently on step {}'.format(i)) print('Accuracy is:') # Test the Train Model matches = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1)) acc = tf.reduce_mean(tf.cast(matches,tf.float32)) print(sess.run(acc,feed_dict={x:ch.test_images,y_true:ch.test_labels,hold_prob:1.0})) print('\n') ###Output Currently on step 0 Accuracy is:
06 BigData/05 sem/mobile-price-classification.ipynb
###Markdown Input ###Code # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. df_train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') df_test = pd.read_csv('/kaggle/input/mobile-price-classification/test.csv') df_train.head() df_test.head() df_train.describe() df_train.info() ###Output _____no_output_____ ###Markdown Visualization ###Code sns.heatmap(df_train.isnull(),yticklabels=False,cbar=False,cmap='viridis') df_train.corr() fig = plt.figure(figsize=(15,12)) sns.heatmap(df_train.corr()) df_train['price_range'].unique() sns.pairplot(df_train,hue='price_range') plt.hist(df_train['battery_power']) plt.show() plt.hist(df_train['ram']) plt.show() sns.countplot(df_train['price_range']) plt.show() sns.boxplot(df_train['price_range'],df_train['talk_time']) sns.countplot(df_train['dual_sim']) plt.show() sns.boxplot(df_train['dual_sim'],df_train['price_range']) plt.hist(df_train['clock_speed']) sns.boxplot(df_train['price_range'],df_train['clock_speed']) sns.boxplot(df_train['fc'],df_train['price_range']) plt.show() df_train['n_cores'].unique() sns.boxplot(df_train['wifi'],df_train['price_range']) plt.show() ###Output _____no_output_____ ###Markdown Phones with 3G supported ###Code labels = ["3G-supported",'Not supported'] values = df_train['three_g'].value_counts().values fig1, ax1 = plt.subplots() colors = ['gold', 'lightskyblue'] ax1.pie(values, labels=labels, autopct='%1.1f%%',shadow=True,startangle=90,colors=colors) plt.show() ###Output _____no_output_____ ###Markdown Phones with 4G supported ###Code labels = ["4G-supported",'Not supported'] values = df_train['four_g'].value_counts().values fig1, ax1 = plt.subplots() colors = ['gold', 'lightskyblue'] ax1.pie(values, labels=labels, autopct='%1.1f%%',shadow=True,startangle=90,colors=colors) plt.show() plt.figure(figsize=(10,6)) df_train['fc'].hist(alpha=0.5,color='blue',label='Front camera') df_train['pc'].hist(alpha=0.5,color='red',label='Primary camera') plt.legend() plt.xlabel('MegaPixels') from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split scaler = StandardScaler() x = df_train.drop('price_range',axis=1) y = df_train['price_range'] scaler.fit(x) x_transformed = scaler.transform(x) x_train,x_test,y_train,y_test = train_test_split(x_transformed,y,test_size=0.3) ###Output _____no_output_____ ###Markdown Linear Regression ###Code #Linear Regression from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(x_train,y_train) lm.score(x_train,y_train) ###Output _____no_output_____ ###Markdown Logistic Regression ###Code #Logistic Regression from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix,classification_report model = LogisticRegression() model.fit(x_train,y_train) y_train_pred = model.predict(x_train) y_test_pred = model.predict(x_test) print("Train Set Accuracy:"+str(accuracy_score(y_train_pred,y_train)*100)) print("Test Set Accuracy:"+str(accuracy_score(y_test_pred,y_test)*100)) print("\nConfusion Matrix:\n%s"%confusion_matrix(y_test_pred,y_test)) print("\nClassification Report:\n%s"%classification_report(y_test_pred,y_test)) ###Output _____no_output_____ ###Markdown KNN ###Code #KNN from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=15) knn.fit(x_train,y_train) knn.score(x_test,y_test) pred = knn.predict(x_test) error_rate = [] for i in range(1,20): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(x_train,y_train) pred_i = knn.predict(x_test) error_rate.append(np.mean(pred_i != y_test)) plt.figure(figsize=(10,6)) plt.plot(range(1,20),error_rate,color='blue', linestyle='dashed', marker='o', markerfacecolor='red', markersize=5) plt.title('Error Rate vs. K Value') plt.xlabel('K') plt.ylabel('Error Rate') print(classification_report(y_test,pred)) matrix=confusion_matrix(y_test,pred) print(matrix) ###Output _____no_output_____ ###Markdown Decision Tree ###Code #Decision Tree from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier() dtree.fit(x_train,y_train) dtree.score(x_test,y_test) ###Output _____no_output_____ ###Markdown Random Forest ###Code #Random Forest from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=100) rfc.fit(x_train, y_train) rfc.score(x_test,y_test) ###Output _____no_output_____ ###Markdown SVM ###Code #SVM from sklearn.svm import SVC model = SVC() model.fit(x_train,y_train) y_train_pred = model.predict(x_train) y_test_pred = model.predict(x_test) print("Train Set Accuracy:"+str(accuracy_score(y_train_pred,y_train)*100)) print("Test Set Accuracy:"+str(accuracy_score(y_test_pred,y_test)*100)) print("\nConfusion Matrix:\n%s"%confusion_matrix(y_test_pred,y_test)) print("\nClassificationReport:\n%s"%classification_report(y_test_pred,y_test)) ###Output _____no_output_____ ###Markdown Gradient Boosting ###Code #Gradient Boosting from sklearn.ensemble import GradientBoostingClassifier model = GradientBoostingClassifier() model.fit(x_train,y_train) y_train_pred = model.predict(x_train) y_test_pred = model.predict(x_test) print("Train Set Accuracy:"+str(accuracy_score(y_train_pred,y_train)*100)) print("Test Set Accuracy:"+str(accuracy_score(y_test_pred,y_test)*100)) print("\nConfusion Matrix:\n%s"%confusion_matrix(y_test_pred,y_test)) print("\nClassificationReport:\n%s"%classification_report(y_test_pred,y_test)) ###Output _____no_output_____
.ipynb_checkpoints/newegg_laptop_webscraper_app_oop_classobj_works-Copy1-checkpoint.ipynb
###Markdown Run this below to pull single items and test parts of the scraper and the head on off ###Code executable_path = {'executable_path': './chromedriver.exe'} url = input("Step 1) Please copy and paste your laptop query that you want to webscrape, and press enter: ") # Step 2) # Function to ask users if they want to watch the Bot (headless = False) work OR not (headless = True) # Lastly, will take you directly to the webpage that was inputted head = '' browser ='' def head_on_off(executable_path): # Have moved two preset variables, head and browser that are both " = '' " # assigning these as global variables enable us to reference them outside and inside the function global head global browser # options creates a bound to an answer options = [1, 2] #executable_path = {'executable_path': './chromedriver.exe'} # for all cases where users input in a value that is not valid while head not in options: head = int(input('Do you want the desktop watch the bot work? Enter a number: 1 - YES | 2 - NO . Your Answer: ')) if head not in options: print("That was not a valid answer. Please try again. ") # For cases where users enter in valid options: if head == options[0]: print('Head is activated. Please view only the new automated Google Chrome web browser. ') print('Do not make any adjustments to this automated window while the program runs, as it may produce errors or undesired outputs. ') browser = Browser('chrome', **executable_path, headless=False) if head == options[1]: print('Headless mode activated. No web browser will pop up. Please proceeed. ') browser = Browser('chrome', **executable_path, headless=True) # visit the target site browser.visit(url) global current_url current_url = browser.url #print(current_url) return current_url #head_on_off(executable_path) #time.sleep(5) # Step 3) # Use Splinter to grab the current url, to setup request to pull URL #current_url = browser.url #+ '&Page=' #+ str(turn_page) # Use Request.get() to pull the current url current_url = browser.url print(current_url) response = requests.get(current_url) response # Step 4) # Use BeautifulSoup to grab all the HTML using the htmlparser current_page_soup = soup(response.text, 'html.parser') current_page_soup #current_page_soup.find_all('a', class_="item-title")[0]['href'] current_page_soup.find_all('div', class_="nav-x-body-top-bar fix")[0].text.split('\n')[5] current_page_soup.find_all('h1', class_="page-title-text")[0].text #current_page_soup.find_all("a", class_="item-title")[0].text #current_page_soup.find_all("div", class_="item-container") #example = current_page_soup.find_all('a', class_="item-title")[0]['href'].split('p/')[1].split('?')[0] try: example1 = current_page_soup.find_all('a', class_="item-title")[0]['href'].split('p/')[1].split('?')[0] print("example 1") print(example1) except (IndexError) as e: example2 = current_page_soup.find_all('a', class_="item-title")[0]['href'].split('p/')[1] print("example 2") print(example2) #bool(example.split('?')) # if bool(example.split('?')) == True: # #example.split('?') # if re.search('?', example) == True: # print(True) example.split('?')[0] current_page_soup.find_all('a', class_="item-title")[0]['href'].split('p/')[1] # Step 5) Are there scrappable items-contrainers on the page? List first, last and count, also how many pages def scrappable_y_n(current_page_soup): global containers containers = current_page_soup.find_all("div", class_="item-container") # print first and last objects so users can understand what the output will be print("Preview: expect these scrapped off this page, and for every other total results pages, if there's more than one: ") print("="*35) # max items should be 36 counter = 0 for con in containers: try: counter += 1 product_details = con.find_all("a", class_="item-title")[0].text product_price = con.find_all("li", class_="price-current")[0].text.split()[0] print(f'{counter}) {product_details} | Price: {product_price}') print("-"*35) except (IndexError) as e: print(f"{counter}) This item was not scrappable. Skipped. ") print("-"*35) print("="*60) if counter == 0: print("Unable to scrap this link. ") else: print(f"{len(containers)} Scrappable Objects on the page. ") #return current_page_soup #scrappable_y_n(current_page_soup) # Create basic classes here and then have the function create product objects AND export out to CSV class Product_catalog: all_prod_count = 0 def __init__(self, general_category): # computer systems self.general_category = general_category Product_catalog.all_prod_count += 1 def count_prod(self): return int(self.all_prod_count) #return '{}'.format(self.general_category) class Sub_category(Product_catalog): # laptops/notebooks, gaming sub_category_ct = 0 def __init__(self, general_category, sub_categ, item_num, brand, price, img_link, prod_link, model_specifications, current_promotions): super().__init__(general_category) Sub_category.sub_category_ct += 1 self.sub_categ = sub_categ self.item_num = item_num self.brand = brand self.price = price self.img_link = img_link self.prod_link = prod_link self.model_specifications = model_specifications self.current_promotions = current_promotions # TEST CREATING OBJECTS AND IT WORKS Sub_category( "Computer_Systems", "Laptops/Notebooks", 'Item=9SIA7AB8D73120', "HP", 1449.00, "//c1.neweggimages.com/ProductImageCompressAll300/A7AB_1_201811092013621813.jpg", 'https://www.newegg.com/p/1TS-000D-032G0?Item=9SIA7AB8D73120', 'HP EliteBook 840 G5 Premium School and Business Laptop (Intel 8th Gen i7-8550U Quad-Core, 16GB RAM, 256GB PCIe SSD, 14" FHD 1920x1080 Sure View Display, Thunderbolt3, NFC, Fingerprint, Win 10 Pro)', 'Free Expedited Shipping') Sub_category( "Computer_Systems", "Laptops/Notebooks", 'Item=0000000000000', "HP", 1449.00, "//c1.neweggimages.com/ProductImageCompressAll300/A7AB_1_201811092013621813.jpg", 'https://www.newegg.com/p/1TS-000D-032G0?Item=AKAKAKAKAKA', 'HP EliteBook 840 G5 Premium School and Business Laptop (Intel 8th Gen i7-8550U Quad-Core, 16GB RAM, 256GB PCIe SSD, 14" FHD 1920x1080 Sure View Display, Thunderbolt3, NFC, Fingerprint, Win 10 Pro)', 'Free Expedited Shipping') #Product_catalog.all_prod_count # learning purposes Sub_category.__dict__ #Product_catalog.__dict__ print(Product_catalog.count_prod) # need to pass in the Sub_category for it to know what to count Product_catalog.count_prod(Sub_category) lptp_1.count_prod() Sub_category.sub_category_ct keys_list = list(lptp_1.__dict__.keys()) keys_list lptp_1.__dict__.values() dict_values_list = list(lptp_1.__dict__.values()) dict_values_list df1 = pd.DataFrame({ 'keys': keys_list, 'values': dict_values_list }) df1.transpose() # handy trick - to see all data from the object. #print(prod_1.__dict__) #prod_1 lptp_1 = sub_category("Computer_Systems", Product_catalog.all_prod_count,"Laptops/Notebooks", sub_category.sub_cat_ct, "HP", 1449.00, "//c1.neweggimages.com/ProductImageCompressAll300/A7AB_1_201811092013621813.jpg", 'https://www.newegg.com/p/1TS-000D-032G0?Item=9SIA7AB8D73120', 'HP EliteBook 840 G5 Premium School and Business Laptop (Intel 8th Gen i7-8550U Quad-Core, 16GB RAM, 256GB PCIe SSD, 14" FHD 1920x1080 Sure View Display, Thunderbolt3, NFC, Fingerprint, Win 10 Pro)', 'Free Expedited Shipping') lptp_1.__dict__ prod_2.__dict__ ###Output _____no_output_____ ###Markdown SKIP THIS ONE - NEXT ONE ###Code # reenabled item_number def newegg_page_scraper(containers, turn_page): #before: (containers, turn_page) images = [] product_brands = [] product_models = [] product_links = [] item_numbers = [] product_categories = [] promotions = [] prices = [] shipping_terms = [] page_nums = [] for con in containers: try: page_counter = turn_page page_nums.append(int(turn_page)) image = con.a.img["src"] #print(image) images.append(image) prd_title = con.find_all('a', class_="item-title")[0].text product_models.append(prd_title) product_link = con.find_all('a', class_="item-title")[0]['href'] product_links.append(product_link) shipping = con.find_all('li', class_='price-ship')[0].text.strip().split()[0] if shipping != "Free": shipping = shipping.replace('$', '') shipping_terms.append(shipping) else: shipping = 0.00 shipping_terms.append(shipping) brand_name = con.find_all('a', class_="item-brand")[0].img["title"] product_brands.append(brand_name) except (IndexError, ValueError) as e: # if there's no item_brand container, take the Brand from product details product_brands.append(con.find_all('a', class_="item-title")[0].text.split()[0]) #print(f"{e} block 1") try: current_promo = con.find_all("p", class_="item-promo")[0].text promotions.append(current_promo) except: promotions.append('null') #print(f"{e} block 2") try: price = con.find_all('li', class_="price-current")[0].text.split()[0].replace('$','').replace(',', '') prices.append(price) except (IndexError, ValueError) as e: prices.append('null') #print(f"{e} block 3") try: item_num = con.find_all('a', class_="item-title")[0]['href'].split('p/')[1].split('?')[0] item_numbers.append(item_num) except (IndexError) as e: item_num = con.find_all('a', class_="item-title")[0]['href'].split('p/')[1] item_numbers.append(item_num) df = pd.DataFrame({ 'item_number': item_numbers, 'brand': product_brands, 'model_specifications': product_models, 'price': prices, 'current_promotions': promotions, 'shipping': shipping_terms, 'page_number': page_nums, 'product_links': product_links, 'image_link': images }) df['general_category'] = current_page_soup.find_all('div', class_="nav-x-body-top-bar fix")[0].text.split('\n')[5] df['product_category'] = current_page_soup.find_all('h1', class_="page-title-text")[0].text # rearrange columns df = df[['item_number', 'general_category','product_category', 'page_number' ,'brand','model_specifications' ,'current_promotions' ,'price' ,'shipping' ,'product_links','image_link']] global product_category product_category = df['product_category'].unique()[0] # eliminate special characters in a string if it exists product_category = ''.join(e for e in product_category if e.isalnum()) #return_list.append(product_category) global items_scraped items_scraped = len(df['model_specifications']) df.to_csv(f'./processing/{current_date}_{product_category}_{items_scraped}_scraped_page{turn_page}.csv') return items_scraped, product_category #df.head() #newegg_page_scraper(containers, turn_page) ###Output _____no_output_____ ###Markdown THIS ONE ###Code #print(containers[1].find_all('a', class_="item-brand")[0].img["title"]) # TEST VERSION FOR CLASSES ############################# def newegg_page_scraper(containers, turn_page): #before: (containers, turn_page) page_nums = [] general_category = [] product_categories = [] images = [] product_brands = [] product_models = [] product_links = [] item_numbers = [] promotions = [] prices = [] shipping_terms = [] global gen_category for con in containers: try: page_counter = turn_page page_nums.append(int(turn_page)) gen_category = target_page_soup.find_all('div', class_="nav-x-body-top-bar fix")[0].text.split('\n')[5] general_category.append(gen_category) prod_category = target_page_soup.find_all('h1', class_="page-title-text")[0].text product_categories.append(prod_category) prd_title = con.find_all('a', class_="item-title")[0].text product_models.append(prd_title) product_link = con.find_all('a', class_="item-title")[0]['href'] product_links.append(product_link) shipping = con.find_all('li', class_='price-ship')[0].text.strip().split()[0] if shipping != "Free": shipping = shipping.replace('$', '') shipping_terms.append(shipping) else: shipping = 0.00 shipping_terms.append(shipping) brand_name = con.find_all('a', class_="item-brand")[0].img["title"] product_brands.append(brand_name) except (IndexError, TypeError, ValueError) as e: # if there's no item_brand container, take the Brand from product details product_brands.append(con.find_all('a', class_="item-title")[0].text.split()[0]) finally: product_brands.append('no brand data available / unable to scrape - refer to screenshots') try: current_promo = con.find_all("p", class_="item-promo")[0].text promotions.append(current_promo) except (IndexError, TypeError, ValueError) as e: promotions.append('no promotion / unable to scrape - refer to screenshots') try: image = con.a.img["src"] images.append(image) except (IndexError, TypeError, ValueError) as e: images.append("unable to scrape image / not available - refer to screenshots") try: price = con.find_all('li', class_="price-current")[0].text.split()[0].replace('$','').replace(',', '') prices.append(price) except (IndexError, TypeError, ValueError) as e: prices.append('no promotion / unable to scrape - refer to screenshots') #print(f"{e} block 3") try: item_num = con.find_all('a', class_="item-title")[0]['href'].split('p/')[1].split('?')[0] item_numbers.append(item_num) except (IndexError, TypeError, ValueError) as e: item_num = con.find_all('a', class_="item-title")[0]['href'].split('p/')[1] item_numbers.append(item_num) df = pd.DataFrame({ 'item_number': item_numbers, 'general_category': general_category, 'product_category': product_categories, 'brand': product_brands, 'model_specifications': product_models, 'price': prices, 'current_promotions': promotions, 'shipping': shipping_terms, 'page_number': page_nums, 'product_links': product_links, 'image_link': images }) df = df[['item_number', 'general_category','product_category', 'page_number' ,'brand','model_specifications' ,'current_promotions' ,'price' ,'shipping' ,'product_links','image_link']] global scraped_dict scraped_dict = df.to_dict('records') global pdt_category pdt_category = df['product_category'].unique()[0] # eliminate special characters in a string if it exists pdt_category = ''.join(e for e in pdt_category if e.isalnum()) global items_scraped items_scraped = len(df['model_specifications']) df.to_csv(f'./processing/{current_date}_{pdt_category}_{items_scraped}_scraped_page{turn_page}.csv') return scraped_dict, items_scraped, pdt_category #df.head() #newegg_page_scraper(containers, turn_page) ####################################################################################### # create a function to return results pages, if exists, otherwise just scrape one page def results_pages(current_page_soup): # Use BeautifulSoup to extract the total results page number results_pages = current_page_soup.find_all('span', class_="list-tool-pagination-text")[0].text.strip() #print(results_pages) # Find and extract total pages + and add 1 to ensure proper length of total pages global total_results_pages total_results_pages = int(re.split("/", results_pages)[1]) + 2 # need to add 2 b/c 'range(inclusive, exclusive)' #========================================= need to remember to +2, and remove -30 #print(total_results_pages) return total_results_pages #results_pages() # Working def concatenate(total_results_pages): path = f'./processing\\' scraped_pages = glob.glob(path + "/*.csv") concatenate_pages = [] counter = 0 for page in scraped_pages: df = pd.read_csv(page, index_col=0, header=0) concatenate_pages.append(df) compiled_data = pd.concat(concatenate_pages, axis=0, ignore_index=True) total_items_scraped = len(compiled_data['brand']) # can replace this counter by creating class objects everytime it scrapes concatenated_output = compiled_data.to_csv(f"./finished_outputs/{current_date}_{total_items_scraped}_scraped_{total_results_pages}_pages_.csv") return concatenated_output # total_results_pages = 4 # concatenate(total_results_pages) ## CONSTRUCT CLASSES HERE # THis is working def clean_processing_fldr(): # delete all files in the 'processing folder' path = f'./processing\\' scraped_pages = glob.glob(path + "/*.csv") if len(scraped_pages) < 1: print("There are no files in the folder to clear. ") else: print(f"Clearing out a total of {len(scraped_pages)} scraped pages in the processing folder... ") clear_processing_files = [] for page in scraped_pages: os.remove(page) print('Clearing of "Processing" folder complete. ') ###Output _____no_output_____ ###Markdown probably won't use these but good practice of fundamentals ###Code # webscrape first page, then run page turner, then scraper for every page thereafter # path = f'./finished_outputs\\' finished_files = glob.glob(path + "/*.csv") dict = {} counter = 0 for file in finished_files: counter += 1 key = str(counter) file = file[19:] dict[key] = file print(str(counter) + ") " + file ) select = input("Which file would you like to read? Enter in the number. ") #with open(select) ## Do not plan on using this... Save for later results = [] for num in range(counter): results.append(num) correct_selection = False while correct_selection == False: select = input("Which file would you like to read? Enter in the number. ") if int(select) not in results: print("That was not a valid selection") correct_selection = False else: print("Processing now. ") correct_selection = True ###Output _____no_output_____ ###Markdown Hold off on using this function ###Code # learning lesson is you can't call a function within a function def page_turner(total_results_pages): # This is "NEXT PAGE BUTTON CLICK" - This loops thru the total amount of pages by clicking the next page button for turn_page in range(1, total_results_pages): # set the current url as the target page (aiming the boomerang) target_url = browser.url # Use Request.get() - throw the boomerang at the target, retrieve the info, & return back to requestor response_target = requests.get(target_url) #response # Use BeautifulSoup to read grab all the HTML using the lxml parser target_page_soup = soup(response_target.text, 'html.parser') # Use BeautifulSoup to extract the total results page number #results_pages = current_page_soup.find_all('span', class_="list-tool-pagination-text")[0].text.strip() results_pages = target_page_soup.find_all('span', class_="list-tool-pagination-text")[0].text.strip() #========================================================= containers = target_page_soup.find_all("div", class_="item-container") newegg_page_scraper(containers, turn_page) #for i in range(total_results_pages): x = random.randint(3, 25) print(f"{turn_page}) | SLEEPING FOR SECONDS: {x} ") time.sleep(x) browser.find_by_xpath('//*[@id="bodyArea"]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').click() browser.quit() # concatenate(total_results_pages) # clean_processing_fldr() # # clear out processing folder function here - as delete everything to prevent clutter # print(f'WebScraping Complete! All {total_results_pages} have been scraped and saved as {current_date}_{product_category}_scraped_{total_results_pages}_pages_.csv in the "finished_outputs" folder') # print('Thank you and hope you found this useful!') ###Output _____no_output_____ ###Markdown Resume below - just skip the top function ###Code class Sub_category: def __init__(self, **entries): self.__dict__.update(entries) scrape_again = True while scrape_again == True: return_dt() print("=== NewEgg.Com WebScraper Beta ===") print("=="*30) print(f"Date: {current_date}") print("") print("Instructions:") print("(1) Go to www.newegg.com, search for your laptop requirements (e.g. brand and specifications) ") print("(2) Copy and paste the url from your exact search ") print('(3) Activate or Disable the "Head View", webscraper bots point of view ') print('(4) Check the "final_output folder when the webscraper bot is done scraping "') print("") executable_path = {'executable_path': './chromedriver.exe'} url = input("Step 1) Please copy and paste your laptop query that you want to webscrape, and press enter: ") head = '' browser ='' head_on_off(executable_path) response = requests.get(current_url) #response current_page_soup = soup(response.text, 'html.parser') current_page_soup.find_all("div", class_="item-container") scrappable_y_n(current_page_soup) ###################################### # # Are there any pop ups / safe to proceed? safe_proceed_y_n = input(f'The Break Pedal: Answer any robot queries by NewEgg. Enter "y" when you are ready to proceed. ') if safe_proceed_y_n == 'y': print(f'Proceeding with webscrape... ') else: print("Quitting browser. You will need to press ctrl + c to quit, and then restart the program to try again. ") browser.quit() # ################ #newegg_page_scraper(containers) # will need to UNCOMMENT AFTER results_pages(current_page_soup) #page_turner(total_results_pages) #total_results_pages = 5 for turn_page in range(1, total_results_pages): # set the current url as the target page (aiming the boomerang) target_url = browser.url # Use Request.get() - throw the boomerang at the target, retrieve the info, & return back to requestor response_target = requests.get(target_url) #response # Use BeautifulSoup to read grab all the HTML using the lxml parser target_page_soup = soup(response_target.text, 'html.parser') # Use BeautifulSoup to extract the total results page number #results_pages = current_page_soup.find_all('span', class_="list-tool-pagination-text")[0].text.strip() results_pages = target_page_soup.find_all('span', class_="list-tool-pagination-text")[0].text.strip() #========================================================= containers = target_page_soup.find_all("div", class_="item-container") newegg_page_scraper(containers, turn_page) screenshot(f"./screenshots/{page_turn_page}", suffix=".png", full=True) objects = [Sub_category(**i) for i in scraped_dict] product_catalog.append(objects) x = random.randint(3, 25) print("Emulating Human Behavior") print(f"{turn_page}) | SLEEPING FOR {x} SECONDS ") time.sleep(x) browser.find_by_xpath('//*[@id="bodyArea"]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').click() browser.quit() ################################################################## concat_y_n = input(f'All {total_results_pages} pages have been saved in the "processing" folder (1 page = csv files). Would you like for us concatenate all the files into one? Enter "y", if so. Otherwise, enter anykey to exit the program. ') if concat_y_n == 'y': concatenate(total_results_pages) print(f'WebScraping Complete! All {total_results_pages} have been scraped and saved as {current_date}_{pdt_category}_scraped_{total_results_pages}_pages_.csv in the "finished_outputs" folder') # temporarily changed product_category to "pdt_category" # clear out processing folder function here - as delete everything to prevent clutter clear_processing_y_n = input(f'The "processing" folder has {total_results_pages} csv files of each page that was scraped. Would you like to clear the files? Enter "y", if so. Otherwise, enter anykey to exit the program. ') if clear_processing_y_n == 'y': clean_processing_fldr() scrape_again = input("Would you like to scrape again? Enter in 'y' or 'n'. ") if scrape_again == 'y': scrape_again = True scrape_again = False print('Thank you! Hope you found this useful. ') product_catalog[1][0].__dict__ target_page_soup.a.img["src"] ###Output _____no_output_____
07_Visualization/Titanic_Desaster/.ipynb_checkpoints/Exercises_code_with_solutions-checkpoint.ipynb
###Markdown Visualizing the Titanic Disaster Introduction:This exercise is based on the titanic Disaster dataset avaiable at [Kaggle](https://www.kaggle.com/c/titanic). To know more about the variables check [here](https://www.kaggle.com/c/titanic/data) Step 1. Import the necessary libraries ###Code import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np %matplotlib inline ###Output _____no_output_____ ###Markdown Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/Visualization/Titanic_Desaster/train.csv). Step 3. Assign it to a variable titanic ###Code url = 'https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/Visualization/Titanic_Desaster/train.csv' titanic = pd.read_csv(url) titanic.head() ###Output _____no_output_____ ###Markdown Step 4. Set PassengerId as the index ###Code titanic.set_index('PassengerId').head() ###Output _____no_output_____ ###Markdown Step 5. Create a pie chart presenting the male/female proportion ###Code # sum the instances of males and females males = (titanic['Sex'] == 'male').sum() females = (titanic['Sex'] == 'female').sum() # put them into a list called proportions proportions = [males, females] # Create a pie chart plt.pie( # using proportions proportions, # with the labels being officer names labels = ['Males', 'Females'], # with no shadows shadow = False, # with colors colors = ['blue','red'], # with one slide exploded out explode = (0.15 , 0), # with the start angle at 90% startangle = 90, # with the percent listed as a fraction autopct = '%1.1f%%' ) # View the plot drop above plt.axis('equal') # Set labels plt.title("Sex Proportion") # View the plot plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Step 6. Create a scatterplot with the Fare payed and the Age, differ the plot color by gender ###Code # creates the plot using lm = sns.lmplot(x = 'Age', y = 'Fare', data = titanic, hue = 'Sex', fit_reg=False) # set title lm.set(title = 'Fare x Age') # get the axes object and tweak it axes = lm.axes axes[0,0].set_ylim(-5,) axes[0,0].set_xlim(-5,85) ###Output _____no_output_____ ###Markdown Step 7. How many people survived? ###Code titanic.Survived.sum() ###Output _____no_output_____ ###Markdown Step 8. Create a histogram with the Fare payed ###Code # sort the values from the top to the least value and slice the first 5 items df = titanic.Fare.sort_values(ascending = False) df # create bins interval using numpy binsVal = np.arange(0,600,10) binsVal # create the plot plt.hist(df, bins = binsVal) # Set the title and labels plt.xlabel('Fare') plt.ylabel('Frequency') plt.title('Fare Payed Histrogram') # show the plot plt.show() ###Output _____no_output_____
01_Simple_Linear_Model_zh_CN.ipynb
###Markdown TensorFlow 教程 01 简单线性模型by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)/ [GitHub](https://github.com/Hvass-Labs/TensorFlow-Tutorials) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmEu1ZniY0XpHSzl5uihcXZ)中文翻译 [thrillerist](https://zhuanlan.zhihu.com/insight-pixel)/[Github](https://github.com/thrillerist/TensorFlow-Tutorials) 介绍这份教程示范了在TensorFlow中使用一个简单线性模型的工作流程。在载入称为MNIST的手写数字图片数据集后,我们在TensorFlow中定义并优化了一个数学模型。(我们)会画出结果并展开讨论。 你应该熟悉基本的线性代数,Python和Jupyter Notebook编辑器。如果你对机器学习和分类有基本的理解也很有帮助。 导入 ###Code %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from sklearn.metrics import confusion_matrix ###Output _____no_output_____ ###Markdown 使用Python3.5.2(Anaconda)开发,TensorFlow版本是: ###Code tf.__version__ ###Output _____no_output_____ ###Markdown 载入数据 MNIST数据集大约有12MB,如果给定的地址里没有文件,它将自动下载。 ###Code from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets("data/MNIST/", one_hot=True) ###Output Extracting data/MNIST/train-images-idx3-ubyte.gz Extracting data/MNIST/train-labels-idx1-ubyte.gz Extracting data/MNIST/t10k-images-idx3-ubyte.gz Extracting data/MNIST/t10k-labels-idx1-ubyte.gz ###Markdown 现在已经载入了MNIST数据集,它由70,000张图像和对应的标签(比如图像的类别)组成。数据集分成三份互相独立的子集。我们在教程中只用训练集和测试集。 ###Code print("Size of:") print("- Training-set:\t\t{}".format(len(data.train.labels))) print("- Test-set:\t\t{}".format(len(data.test.labels))) print("- Validation-set:\t{}".format(len(data.validation.labels))) ###Output Size of: - Training-set: 55000 - Test-set: 10000 - Validation-set: 5000 ###Markdown One-Hot 编码 数据集以一种称为One-Hot编码的方式载入。这意味着标签从一个单独的数字转换成一个长度等于所有可能类别数量的向量。向量中除了第$i$个元素是1,其他元素都是0,这代表着它的类别是$i$'。比如,前面五张图像标签的One-Hot编码为: ###Code data.test.labels[0:5, :] ###Output _____no_output_____ ###Markdown 在不同的比较和度量性能时,我们也需要用单独的数字表示类别,因此我们通过取最大元素的索引,将One-Hot编码的向量转换成一个单独的数字。需注意的是'class'在Python中是一个关键字,所以我们用'cls'代替它。 ###Code data.test.cls = np.array([label.argmax() for label in data.test.labels]) ###Output _____no_output_____ ###Markdown 现在我们可以看到测试集中前面五张图像的类别。将这些与上面的One-Hot编码的向量进行比较。例如,第一张图像的类别是7,对应的在One-Hot编码向量中,除了第7个元素其他都为零。 ###Code data.test.cls[0:5] ###Output _____no_output_____ ###Markdown 数据维度 在下面的源码中,有很多地方用到了数据维度。在计算机编程中,通常来说最好使用变量和常量,而不是在每次使用数值时写硬代码。这意味着数字只需要在一个地方改动就行。这些最好能从读取的数据中获取,但这里我们直接写上数值。 ###Code # We know that MNIST images are 28 pixels in each dimension. img_size = 28 # Images are stored in one-dimensional arrays of this length. img_size_flat = img_size * img_size # Tuple with height and width of images used to reshape arrays. img_shape = (img_size, img_size) # Number of classes, one class for each of 10 digits. num_classes = 10 ###Output _____no_output_____ ###Markdown 用来绘制图像的帮助函数 这个函数用来在3x3的栅格中画9张图像,然后在每张图像下面写出真实的和预测的类别。 ###Code def plot_images(images, cls_true, cls_pred=None): assert len(images) == len(cls_true) == 9 # Create figure with 3x3 sub-plots. fig, axes = plt.subplots(3, 3) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(img_shape), cmap='binary') # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true[i]) else: xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) ###Output _____no_output_____ ###Markdown 绘制几张图像来看看数据是否正确 ###Code # Get the first images from the test-set. images = data.test.images[0:9] # Get the true classes for those images. cls_true = data.test.cls[0:9] # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=cls_true) ###Output _____no_output_____ ###Markdown TensorFlow图TensorFlow的全部目的就是使用一个称之为计算图(computational graph)的东西,它会比直接在Python中进行相同计算量要高效得多。TensorFlow比Numpy更高效,因为TensorFlow了解整个需要运行的计算图,然而Numpy只知道某个时间点上唯一的数学运算。TensorFlow也能够自动地计算需要优化的变量的梯度,使得模型有更好的表现。这是由于Graph是简单数学表达式的结合,因此整个图的梯度可以用链式法则推导出来。TensorFlow还能利用多核CPU和GPU,Google也为TensorFlow制造了称为TPUs(Tensor Processing Units)的特殊芯片,它比GPU更快。一个TensorFlow图由下面几个部分组成,后面会详细描述:* 占位符变量(Placeholder)用来改变图的输入。* 模型变量(Model)将会被优化,使得模型表现得更好。* 模型本质上就是一些数学函数,它根据Placeholder和模型的输入变量来计算一些输出。* 一个cost度量用来指导变量的优化。* 一个优化策略会更新模型的变量。另外,TensorFlow图也包含了一些调试状态,比如用TensorBoard打印log数据,本教程不涉及这些。 占位符 (Placeholder)变量 Placeholder是作为图的输入,每次我们运行图的时候都可能会改变它们。将这个过程称为feeding placeholder变量,后面将会描述它。首先我们为输入图像定义placeholder变量。这让我们可以改变输入到TensorFlow图中的图像。这也是一个张量(tensor),代表一个多维向量或矩阵。数据类型设置为`float32`,形状设为`[None, img_size_flat]`,`None`代表tensor可能保存着任意数量的图像,每张图象是一个长度为`img_size_flat`的向量。 ###Code x = tf.placeholder(tf.float32, [None, img_size_flat]) ###Output _____no_output_____ ###Markdown 接下来我们为输入变量`x`中的图像所对应的真实标签定义placeholder变量。变量的形状是`[None, num_classes]`,这代表着它保存了任意数量的标签,每个标签是长度为`num_classes`的向量,本例中长度为10。 ###Code y_true = tf.placeholder(tf.float32, [None, num_classes]) ###Output _____no_output_____ ###Markdown 最后我们为变量`x`中图像的真实类别定义placeholder变量。它们是整形,并且这个变量的维度设为`[None]`,代表placeholder变量是任意长的一维向量。 ###Code y_true_cls = tf.placeholder(tf.int64, [None]) ###Output _____no_output_____ ###Markdown 需要优化的变量 除了上面定义的那些给模型输入数据的变量之外,TensorFlow还需要改变一些模型变量,使得训练数据的表现更好。第一个需要优化的变量称为权重`weight`,TensorFlow变量需要被初始化为零,它的形状是`[img_size_flat, num_classes]`,因此它是一个`img_size_flat`行、`num_classes`列的二维张量(或矩阵)。 ###Code weights = tf.Variable(tf.zeros([img_size_flat, num_classes])) ###Output _____no_output_____ ###Markdown 第二个需要优化的是偏差变量`biases`,它被定义成一个长度为`num_classes`的1维张量(或向量)。 ###Code biases = tf.Variable(tf.zeros([num_classes])) ###Output _____no_output_____ ###Markdown 模型 这个最基本的数学模型将placeholder变量`x`中的图像与权重`weight`相乘,然后加上偏差`biases`。结果是大小为`[num_images, num_classes]`的一个矩阵,由于`x`的形状是`[num_images, img_size_flat]` 并且 `weights`的形状是`[img_size_flat, num_classes]`,因此两个矩阵乘积的形状是`[num_images, num_classes]`,然后将`biases`向量添加到矩阵每一行中。 ###Code logits = tf.matmul(x, weights) + biases ###Output _____no_output_____ ###Markdown 现在`logits`是一个 `num_images` 行`num_classes`列的矩阵,第$i$行第$j$列的那个元素代表着第$i$张输入图像有多大可能性是第$j$个类别。然而,这是很粗略的估计并且很难解释,因为数值可能很小或很大,因此我们想要对它们做归一化,使得`logits`矩阵的每一行相加为1,每个元素限制在0到1之间。这是用一个称为softmax的函数来计算的,结果保存在`y_pred`中。 ###Code y_pred = tf.nn.softmax(logits) ###Output _____no_output_____ ###Markdown 可以从`y_pred`矩阵中取每行最大元素的索引值,来得到预测的类别。 ###Code y_pred_cls = tf.argmax(y_pred, dimension=1) ###Output _____no_output_____ ###Markdown 优化损失函数 为了使模型更好地对输入图像进行分类,我们必须改变`weights`和`biases`变量。首先我们需要比较模型的预测输出`y_pred`和期望输出`y_true`,来了解目前模型的性能如何。交叉熵(cross-entropy)是一个在分类中使用的性能度量。交叉熵是一个常为正值的连续函数,如果模型的预测值精准地符合期望的输出,它就等于零。因此,优化的目的就是最小化交叉熵,通过改变模型中`weights`和`biases`的值,使交叉熵越接近零越好。TensorFlow有一个内置的计算交叉熵的函数。需要注意的是它使用`logits`的值,因为在它内部也计算了softmax。 ###Code cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_true) ###Output _____no_output_____ ###Markdown 现在,我们已经为每个图像分类计算了交叉熵,所以有一个当前模型在每张图上的性能度量。但是为了用交叉熵来指导模型变量的优化,我们需要一个额外的标量值,因此我们简单地利用所有图像分类交叉熵的均值。 ###Code cost = tf.reduce_mean(cross_entropy) ###Output _____no_output_____ ###Markdown 优化方法 现在,我们有一个需要被最小化的损失度量,接着我们可以创建优化器。在这种情况中,用的是梯度下降的基本形式,步长设为0.5。优化过程并不是在这里执行。实际上,还没计算任何东西,我们只是往TensorFlow图中添加了优化器,以便之后的操作。 ###Code optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cost) ###Output _____no_output_____ ###Markdown 性能度量 我们需要另外一些性能度量,来向用户展示这个过程。这是一个布尔值向量,代表预测类型是否等于每张图片的真实类型。 ###Code correct_prediction = tf.equal(y_pred_cls, y_true_cls) ###Output _____no_output_____ ###Markdown 上面先将布尔值向量类型转换成浮点型向量,这样子False就变成0,True变成1,然后计算这些值的平均数,以此来计算分类的准确度。 ###Code accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) ###Output _____no_output_____ ###Markdown 运行TensorFlow 创建TensorFlow会话(session)一旦创建了TensorFlow图,我们需要创建一个TensorFlow session,用来运行图。 ###Code session = tf.Session() ###Output _____no_output_____ ###Markdown 初始化变量我们需要在开始优化`weights`和`biases`变量之前对它们进行初始化。 ###Code session.run(tf.global_variables_initializer()) ###Output _____no_output_____ ###Markdown 用来优化迭代的帮助函数 在训练集中有50,000张图。用这些图像计算模型的梯度会花很多时间。因此我们利用随机梯度下降的方法,它在优化器的每次迭代里只用到了一小部分的图像。 ###Code batch_size = 100 ###Output _____no_output_____ ###Markdown 函数执行了多次的优化迭代来逐步地提升模型的`weights`和`biases`。在每次迭代中,从训练集中选择一批新的数据,然后TensorFlow用这些训练样本来执行优化器。 ###Code def optimize(num_iterations): for i in range(num_iterations): # Get a batch of training examples. # x_batch now holds a batch of images and # y_true_batch are the true labels for those images. x_batch, y_true_batch = data.train.next_batch(batch_size) # Put the batch into a dict with the proper names # for placeholder variables in the TensorFlow graph. # Note that the placeholder for y_true_cls is not set # because it is not used during training. feed_dict_train = {x: x_batch, y_true: y_true_batch} # Run the optimizer using this batch of training data. # TensorFlow assigns the variables in feed_dict_train # to the placeholder variables and then runs the optimizer. session.run(optimizer, feed_dict=feed_dict_train) ###Output _____no_output_____ ###Markdown 展示性能的帮助函数 测试集数据字典被当做TensorFlow图的输入。注意,在TensorFlow图中,placeholder变量必须使用正确的名字。 ###Code feed_dict_test = {x: data.test.images, y_true: data.test.labels, y_true_cls: data.test.cls} ###Output _____no_output_____ ###Markdown 用来打印测试集分类准确度的函数。 ###Code def print_accuracy(): # Use TensorFlow to compute the accuracy. acc = session.run(accuracy, feed_dict=feed_dict_test) # Print the accuracy. print("Accuracy on test-set: {0:.1%}".format(acc)) ###Output _____no_output_____ ###Markdown Function for printing and plotting the confusion matrix using scikit-learn.用scikit-learn打印混淆矩阵。 ###Code def print_confusion_matrix(): # Get the true classifications for the test-set. cls_true = data.test.cls # Get the predicted classifications for the test-set. cls_pred = session.run(y_pred_cls, feed_dict=feed_dict_test) # Get the confusion matrix using sklearn. cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred) # Print the confusion matrix as text. print(cm) # Plot the confusion matrix as an image. plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) # Make various adjustments to the plot. plt.tight_layout() plt.colorbar() tick_marks = np.arange(num_classes) plt.xticks(tick_marks, range(num_classes)) plt.yticks(tick_marks, range(num_classes)) plt.xlabel('Predicted') plt.ylabel('True') ###Output _____no_output_____ ###Markdown 绘制测试集中误分类图像的函数。 ###Code def plot_example_errors(): # Use TensorFlow to get a list of boolean values # whether each test-image has been correctly classified, # and a list for the predicted class of each image. correct, cls_pred = session.run([correct_prediction, y_pred_cls], feed_dict=feed_dict_test) # Negate the boolean array. incorrect = (correct == False) # Get the images from the test-set that have been # incorrectly classified. images = data.test.images[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = data.test.cls[incorrect] # Plot the first 9 images. plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9]) ###Output _____no_output_____ ###Markdown 绘制模型权重的帮助函数 这个函数用来绘制模型的权重`weights`。画了10张图像,训练模型所识别出的每个数字对应着一张图。 ###Code def plot_weights(): # Get the values for the weights from the TensorFlow variable. w = session.run(weights) # Get the lowest and highest values for the weights. # This is used to correct the colour intensity across # the images so they can be compared with each other. w_min = np.min(w) w_max = np.max(w) # Create figure with 3x4 sub-plots, # where the last 2 sub-plots are unused. fig, axes = plt.subplots(3, 4) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Only use the weights for the first 10 sub-plots. if i<10: # Get the weights for the i'th digit and reshape it. # Note that w.shape == (img_size_flat, 10) image = w[:, i].reshape(img_shape) # Set the label for the sub-plot. ax.set_xlabel("Weights: {0}".format(i)) # Plot the image. ax.imshow(image, vmin=w_min, vmax=w_max, cmap='seismic') # Remove ticks from each sub-plot. ax.set_xticks([]) ax.set_yticks([]) ###Output _____no_output_____ ###Markdown 优化之前的性能测试集上的准确度是9.8%。这是由于模型只做了初始化,并没做任何优化,所以它通常将图像预测成数字零,正如下面绘制的图像那样,刚好测试集中9.8%的图像是数字零。 ###Code print_accuracy() plot_example_errors() ###Output _____no_output_____ ###Markdown 1次迭代优化后的性能在完成一次迭代优化之后,模型在测试集上的准确率从9.8%提高到了40.7%。这意味着它大约10次里面会误分类6次,正如下面所显示的。 ###Code optimize(num_iterations=1) print_accuracy() plot_example_errors() ###Output _____no_output_____ ###Markdown 下面绘制的是权重。正值为红色,负值为蓝色。这些权重可以直观地理解为图像滤波器。例如,权重用来确定一张数字零的图像对圆形图像有正反应(红色),对圆形图像的中间部分有负反应(蓝色)。类似的,权重也用来确定一张数字一的图像对图像中心垂直线段有正反应(红色),对线段周围有负反应(蓝色)。注意到权重大多看起来跟它要识别的数字很像。这是因为只做了一次迭代,即权重只在100张图像上训练。等经过上千张图像的训练之后,权重会变得更难分辨,因为它们需要识别出数字的许多种书写方法。 ###Code plot_weights() ###Output _____no_output_____ ###Markdown 10次优化迭代后的性能 ###Code # We have already performed 1 iteration. optimize(num_iterations=9) print_accuracy() plot_example_errors() plot_weights() ###Output _____no_output_____ ###Markdown 1000次迭代之后的性能在迭代了1000次之后,模型在十次里面大约只误识别了一次。如下图所示,有些误识别情有可原,因为即使在人类眼里,也很难确定图像(的数字),然而有一些图像是很明显的,好的模型应该能分辨出来。但这个简单的模型无法达到更好的性能,因此需要更为复杂的模型。 ###Code # We have already performed 10 iterations. optimize(num_iterations=990) print_accuracy() plot_example_errors() ###Output _____no_output_____ ###Markdown 模型经过了1000次迭代训练,每次迭代用到训练集里面的100张图像。由于图像的多样化,现在权重变得很难辨认,我们可能会怀疑这些权重是否真的理解数字是怎么由线条组成的,或者模型只是记住了许多不同的像素。 ###Code plot_weights() ###Output _____no_output_____ ###Markdown 我们也可以打印并绘制出混淆矩阵,它让我们看到误分类的更多细节。例如,它展示了描绘着数字5的图像有时会被误分类成其他可能的数字,但大多是3,6或8。 ###Code print_confusion_matrix() ###Output [[ 957 0 3 2 0 5 11 1 1 0] [ 0 1108 2 2 1 2 4 2 14 0] [ 4 9 914 19 15 5 13 14 35 4] [ 1 0 16 928 0 28 2 14 13 8] [ 1 1 3 2 939 0 10 2 6 18] [ 10 3 3 33 10 784 17 6 19 7] [ 8 3 3 2 11 14 915 1 1 0] [ 3 9 21 9 7 1 0 959 2 17] [ 8 8 8 38 11 40 14 18 825 4] [ 11 7 1 13 75 13 1 39 4 845]] ###Markdown 现在我们用TensorFlow完成了任务,关闭session,释放资源。 ###Code # This has been commented out in case you want to modify and experiment # with the Notebook without having to restart it. # session.close() ###Output _____no_output_____
6_prog_dynamique/3_alignement_sequences.ipynb
###Markdown Alignement de séquences [Vidéo d'accompagnement](https://vimeo.com/534066780) Que veut dire aligner deux séquences? On dispose de deux séquences $a$ et $b$. **Aligner ces séquences** signifie y *insérer éventuellement des «blancs»* (qu'on représentera par un `-`) de façon que:1. les séquences obtenues $\hat{a}$ et $\hat{b}$ sont de *même longueur*,2. deux éléments de *même position* dans $\hat{a}$ et $\hat{b}$ *ne peuvent tout deux être des «blancs»*. *Exemple*: Si $a={\bf programmer}$ et $b={\bf dormir}$, voici trois alignements: programmer programmer pr-ogr-ammer -do-r-m-ir d-o-r-mi-r d-or--m---ir Bien sûr, certains nous semblent «meilleurs» que d'autres... Score pour un alignement Pour préciser cette idée de «meilleurs» alignements, nous allons attribuer un **score** à ces alignements. Pour chaque position dans les «séquences d'alignement» $\hat{a}$ et $\hat{b}$:1. si les deux caractères ayant cette position sont *non blancs et coincident*, on gagne un point $\bf +1$,2. s'ils sont *non blancs et diffèrent*, on est «quitte» $\bf 0$,3. si l'un d'eux est un *blanc*, on perd un point $\bf -1$. Le score pour un alignement donné s'obtient en ajoutant ces points pour chaque position. *Note*: sachez qu'en pratique - notamment en biologie lorsqu'on compare des séquences d'ADN - le «scoring» peut être beaucoup plus poussé de façon à tenir compte de la signification biologique des éléments (nucléotides) à comparer. Néanmoins, le principe reste le même qu'ici. *Exemple*: Voici les scores des alignements données précédemment: programmer programmer pr-ogr-ammer -do-r-m-ir d-o-r-mi-r d-or--m---ir !*+!+!+!*+ *!+!+!+*!+ *!!*!!!!!!*+ où !=-1 *=0 et +=1 scores 0 0 -7 Exercice 1Trouver deux alignements distincts de ${\bf genome}$ et ${\bf enorme}$ ainsi que les scores correspondants. Par exemple: genome ge-no-me geno-me enorme -enorme- -enorme ****++ !+!**!*! !+++!++ 2 -3 3 ____ Étude du problème de l'alignement de séquences **Le problème de l'alignement de séquences consiste à trouver un alignement de score maximal.** Dans la suite, nous notons $S(a, b)$ le score d'un *alignement optimal* des séquences $a$ et $b$. **Cas 1**: Examinons le *cas particulier où l'une des séquences est vide*. *Exemple*: si $a=\epsilon$ et $b={\bf truc}$ alors le meilleur alignement possible est clairement $\hat{a}={\bf {-}{-}{-}{-}}$ et $\hat{b}=b={\bf truc}$. Dans ce cas, le «meilleur» alignement possible consiste à *insérer autant de blancs que nécessaire dans la séquence vide* pour un score de: $$-1\times «\text{longueur de l'autre séquence}»\\\text{donc}\\ S(\epsilon, b)=-\text{longueur}(b)\text{ ou } S(a, \epsilon)=-\text{longueur}(a)$$ **Cas 2**: À présent, on suppose que *ni $a$ ni $b$ ne sont vides*. **Conventions de notation**: Par la suite, si $a$ désigne une séquence, $a'$ désigne celle obtenue à partir de $a$ en supprimant son dernier élément.- *Exemple*: si $a={\bf progr}$ alors $a'={\bf prog}$.De plus, si $a$ et $b$ désigne deux séquences, $ab$ désigne la séquence obtenue en concaténant $b$ à la suite de $a$.- *Exemple*: si $a={\bf truc}$ et $b={\bf machin}$ alors $ab={\bf trucmachin}$ et $ba={\bf machintruc}$. Supposons disposer d'un alignement **optimal** $\hat{a}, \hat{b}$ pour deux séquences $a$ et $b$ (non vides donc). Si on convient de noter $x$ et $y$ les **derniers caractères** des séquences réalisant l'alignement alors: $$\hat{a}=\hat{a}'x\quad\text{et}\quad \hat{b}=\hat{b}'y$$ Trois cas peuvent alors se présenter:1. **$x$ et $y$ ne sont pas des blancs.** Cela ne peut se produire que dans le cas où $x$ et $y$ sont les derniers caractères des chaînes $a$ et $b$. En supprimant ces caractères, on optient un alignement $\hat{a}'$ et $\hat{b}'$ de $a'$ et $b'$ et il est lui-même **optimal** ... (raisonner par l'absurde ou contentez-vous de l'admettre). Ainsi, si $x=y$, on ajoute 1 point au score de l'alignement $a', b'$ pour obtenir celui de $a, b$. Sinon, on est «quitte» donc: $$S(a,b)=\left\{\begin{array}{l}S(a',b')+1&\text{si } x=y\cr S(a',b')&\text{sinon}\end{array}\right.$$ *Exemple* avec $x=y$: $$\begin{array}{l|l|l|l} a={\bf abc}&\hat{a}={\bf abc}&a'={\bf ab}&\hat{a}'={\bf ab}\cr b={\bf ac}&\hat{b}={\bf a{-}c}&b'={\bf a}&\hat{b}'={\bf a-}\cr \text{score}& \hfill 1 \hfill && \hfill 0 \hfill \end{array} $$ 2. **$x$ est un «blanc»**. Dans ce cas, le dernier élément de $b$ ne peut pas être un blanc. Donc $\hat{a}'$ et $\hat{b}'$ forme un alignement pour $a$ et $b'$ et on peut s'assurer qu'il est lui-même *optimal*... Enfin, on doit enlever 1 point (blanc aligné avec non blanc) pour passer du score de l'alignement de $a$, $b'$ à celui de $a$, $b$ soit: $$S(a,b)=S(a,b')-1$$ *Exemple*: $$\begin{array}{l|l|l|l} a={\bf ab}&\hat{a}={\bf a{-}b{-}}&a={\bf ab}&\hat{a}'={\bf a{-}b}\cr b={\bf adbc}&\hat{b}={\bf adbc}&b'={\bf abd}&\hat{b}'={\bf adb}\cr \text{score}& \hfill 0 \hfill && \hfill 1 \hfill \end{array} $$ 3. **$y$ est un «blanc»**. Ce cas est symétrique du précédent et donc: $$S(a,b)=S(a', b)-1$$ Récurrence pour les scores d'alignement optimaux Ainsi, le score d'un alignement optimal de $a$ et $b$ de longueurs respectives $n$ et $m$ est *le maximum des trois scores ci-dessus* (pour deux séquences non vides) et, en combinant les **cas 1 et 2**, on obtient la récurrence:$$S(a,b)=\left\{\begin{array}{lr}-n&\text{si }m=0\cr-m&\text{sinon si }n=0\cr\max\{S(a',b')+1,S(a,b')-1,S(a',b)-1\}&\text{sinon si }a_n=b_m\cr\max\{S(a',b'),S(a,b')-1,S(a',b)-1\}&\text{sinon}\end{array}\right.$$ *Conseil*: Gardez en vue cette récurrence (ou mieux, notez là soigneusement) pour bien comprendre la suite. Exercice 2Dessiner l'arbre des appels pour le score avec les séquences $a={\bf aa}$ et $b={\bf ab}$. Combien de fois le sous-problème ${\bf a},{\bf a}$ est-il résolu? ![plsc_exo1.png](attachment:9a6c8311-07cb-46fe-8bed-0c12fb13775e.png) _________ Exercice 3Transposer la récurrence précédente pour écrire la fonction récursive `score_alignement_rec(a, b)` qui calcule le score maximal pour l'alignement des séquences `a` et `b`. ###Code def score_alignement_rec(a, b): n, m = len(a), len(b) if len(a) == 0: return -m if len(b) == 0: return -n x, y = a[-1], b[-1] return max( (1 if x == y else 0) + score_alignement_rec(a[:-1],b[:-1]), -1 + score_alignement_rec(a[:-1],b), -1 + score_alignement_rec(a,b[:-1]) ) score_alignement_rec("programmer", "dormir") ###Output _____no_output_____ ###Markdown _________ Calcul du meilleur score par programmation dynamique L'algorithme récursif va calculer plusieurs fois les mêmes sous-problèmes comme nous l'avons vu dans l'exercice 1. Nous allons donc utiliser la **programmation dynamique** pour mémoriser les solutions des sous-problèmes qu'on va résoudre «des plus petits vers les plus grands».Mais quels sont les «sous-problèmes» pour des séquences $a$ et $b$ de longueurs respectives $n$ et $m$? Et combien y en a-t-il? L'opération «prime» (supprimer le dernier élément d'une séquence) peut être appliquée *autant de fois que la séquence a d'éléments*. En comptant la séquence elle-même, cela donne $\bf n+1$ séquences possibles pour $a$ dans un sous-problème (*rappel*: $\epsilon$ désigne la séquence vide):$$a, a', a'', \dots, \epsilon$$De même, il y aura $\bf m+1$ séquences possibles pour $b$. Exercice 4Préciser la suite $a, a', a'', \dots, \epsilon$ si $a={\bf truc}$ $$a={\bf truc}, a'={\bf tru}, a''={\bf tr}, a'''={\bf t}, a''''=\epsilon \text{ (séquence vide)}$$ ______ Comme les sous-problèmes peuvent combiner deux séquences parmi celles-ci, il y en a $\bf (n+1)(m+1)$. On peut visualiser cela comme suit: m+1 colonnes | 𝜖 … b'' b' b ---------------- 𝜖 | ? … ? ? ? n+1 … | … … lignes a''| ? ? * ? avec, par exemple, * = S(a'',b') a' | ? ? ? ? a | ? ? ? ?où à chaque combinaison ligne, colonne correspond un sous-problème à résoudre. On va donc utiliser une matrice `mat` a $n+1$ lignes et $m+1$ colonnes pour mémoriser ces solutions; `mat[i][j]` contiendra: $$S(,)\qquad\text{avec } 0\leqslant i\leqslant n\text{ et } 0\leqslant j\leqslant m$$Autrement dit, `mat[i][j]` contiendra *le score du sous-problème faisant intervenir les **préfixes** de $i$ éléments de $a$ et de $j$ éléments de $b$*. Exprimé avec Python, cela donne: mat[i][j] = S(a[:i], b[:j]) Exercice 5Construire cette matrice à la main pour $a={\bf dormir}$ et $b={\bf programmer}$ en vous aidant du canevas donné ci-dessous et de la récurrence rappelée ici:$$S(a,b)=\left\{\begin{array}{lr}-n&\text{si }m=0\cr-m&\text{sinon si }n=0\cr\max\{S(a',b')+1,S(a,b')-1,S(a',b)-1\}&\text{sinon si }a_n=b_m\cr\max\{S(a',b'),S(a,b')-1,S(a',b)-1\}&\text{sinon}\end{array}\right.$$où $n$ et $m$ représentent les nombre d'éléments respectifs de $a$ et $b$.*Conseil*: Commencer par le cas de base de façon à obtenir la 1ère ligne et la 1ère colonne. Ensuite, procéder ligne par ligne. | p r o g r a m m e r | i ------------------------------------- | 0 -1 -2 . . . . . . . . | 0 d |-1 . . . . . . . . . . | 1 o |-2 . . . . . . . . . . | 2 r | . . . . . . . . . . . | 3 m | . . . . . . . . . . . | 4 i | . . . . . . . . . . . | 5 r | . . . . . . . . . . . | 6 -------------------------------------- j | 0 1 2 3 4 5 6 7 8 9 10| | p r o g r a m m e r | i ------------------------------------- | 0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -10| 0 d |-1 0 -1 -2 -3 -4 -5 -6 -7 -8 -9 | 1 o |-2 -1 0 0 -1 -2 -3 -4 -5 -6 -7 | 2 r |-3 -2 0 0 0 0 -1 -2 -3 -4 -5 | 3 m |-4 -3 -1 0 0 0 0 0 -1 -2 -3 | 4 i |-5 -4 -2 -1 0 0 0 0 0 -1 -2 | 5 r |-6 -5 -3 -2 -1 1 0 0 0 0 0 | 6 -------------------------------------- j | 0 1 2 3 4 5 6 7 8 9 10|*Exemples*: pour calculer `mat[1][1]`, comme `b!=a` on doit prendre le plus grand de `mat[0][0]=0`, `mat[0][1]-1=-2`, `mat[1][0]-1=-2` donc on trouve **0**.Pour calculer `mat[2][3]`, comme on a la même lettre `o` on doit prendre le plus grand de `mat[1][2]+1=0`, `mat[2][2]-1=-1` et `mat[1][3]-1=-3` donc (encore) **0**. _________ Exercice 6Utiliser la programmation dynamique pour écrire une version itérative `score_alignement(a, b)` de la première version récursive trouvée à l'exercice 2. Je vous redonne la fonction `aff` pour afficher (à peu près) correctement la matrice (pour vérification). ###Code def aff(mat): n = len(mat) m = len(mat[0]) for i in range(n): for j in range(m): if j != m-1: print(mat[i][j], end=" ") else: print(mat[i][j]) ###Output _____no_output_____ ###Markdown En avant! ###Code def score_alignement(a, b): pass score_alignement("dormir", "programmer") def score_alignement(a, b): n, m = len(a), len(b) tab = [[0]*(m+1) for _ in range(n+1)] for i in range(n+1): tab[i][0] = -i for j in range(m+1): tab[0][j] = -j for i in range(1, n+1): for j in range(1, m+1): x, y = a[i-1], b[j-1] tab[i][j] = max( (1 if x == y else 0) + tab[i-1][j-1], -1 + tab[i-1][j], -1 + tab[i][j-1] ) aff(tab) return tab[n][m] score_alignement("dormir", "programmer") ###Output _____no_output_____ ###Markdown _________ Calcul d'un «meilleur» alignement Reste à trouver *un meilleur alignement effectif*. Pour cela, vous allez devoir «remonter» la matrice utilisée par la fonction `score_alignement` de l'exercice 6.Cela ressemble beaucoup à ce que nous avons détaillé pour le problème de *la plus longue sous-séquence commune* à cela près qu'à chaque étape (itération), pour construire une solution $\hat{a}$, $\hat{b}$, vous devez soit «ramasser»:- les deux caractères courants (déplacement diagonal),- le caractère courant en colonne et mettre un «blanc» pour l'autre (déplacement horizontal)- le caractère courant en ligne et mettre un «blanc» pour l'autre (déplacement vertical). En cas d'ambiguité (car il y a souvent plusieurs alignements optimaux), fixez-vous une règle:- je peux remonter d'une colonne ou d'une ligne?: je choisis la ligne (par exemple),- je peux remonter d'une «diagonale» ou d'une ligne?: je choisis la diagonale, etc.Enfin lorsque ce processus se termine (soit parce que vous avez atteint la 1ère colonne ou alors la 1ère ligne), il faudra éventuellement finir de compléter vos alignements.Par *exemple*, si vous terminez dans la 1ère colonne et en dessous de sa première case, cela signifie qu'il faut ajouter les lettres situées au-dessus dans l'alignement pour $a$ (le $\hat{a}$ ...) et compléter par des «blancs» l'autre. Exercice 7Écrire une fonction `alignement_de_sequences(a,b)` qui étant donnés deux séquences renvoie le score maximal d'un alignement **ainsi qu'un alignement particulier**. Aidez-vous du plan donné ci-après et des commentaires précédents. Revoir éventuellement ce que nous avons fait pour le problème de la plus longue sous-séquence commune. ###Code def alignement_de_sequences(a, b): n, m = len(a), len(b) tab = [[0]*(m+1) for _ in range(n+1)] for i in range(n+1): tab[i][0] = -i for j in range(m+1): tab[0][j] = -j for i in range(1, n+1): for j in range(1, m+1): x, y = a[i-1], b[j-1] tab[i][j] = max( tab[i-1][j-1] + (1 if x == y else 0), tab[i-1][j] - 1, tab[i][j-1] - 1 ) # Relire le tableau à l'envers et «ramasser» les caractères # (ou insérer des blancs) au fur et à mesure # jusqu'à atteindre la première ligne i = 0 ou la première colonne j = 0 i, j = n, m pile_a = [] pile_b = [] while ...: ... # on se retrouve maintenant avec l'une des deux séquences vide while i > 0: # j == 0 soit a=a[:i], b=<vide> ... while j > 0: # i == 0 soit a=<vide>, b=b[:j] ... # remettre tout «à l'endroit» ... # terminer! return ... alignement_de_sequences("programmer", "dormir") def alignement_de_sequences(a, b): n, m = len(a), len(b) tab = [[0]*(m+1) for _ in range(n+1)] for i in range(n+1): tab[i][0] = -i for j in range(m+1): tab[0][j] = -j for i in range(1, n+1): for j in range(1, m+1): x, y = a[i-1], b[j-1] tab[i][j] = max( tab[i-1][j-1] + (1 if x == y else 0), tab[i-1][j] - 1, tab[i][j-1] - 1 ) # Relire le tableau à l'envers et «ramasser» les caractères # (ou insérer des blancs) au fur et à mesure # jusqu'à atteindre la première ligne i = 0 ou la première colonne j = 0 i, j = n, m pile_a = [] pile_b = [] while i > 0 and j > 0: x, y = a[i-1], b[j-1] d = tab[i-1][j-1] + (1 if x == y else 0) h, g = tab[i-1][j] - 1, tab[i][j-1] - 1 if tab[i][j] == d: pile_a.append(x) pile_b.append(y) i -= 1 j -= 1 elif tab[i][j] == h: pile_a.append(x) pile_b.append('-') i -= 1 else: pile_a.append('-') pile_b.append(y) j -= 1 # on se retrouve maintenant avec l'une des deux séquences vide while i > 0: # j == 0 soit a=a[:i], b=<vide> pile_a.append(a[i-1]) pile_b.append('-') i -= 1 while j > 0: # i == 0 soit a=<vide>, b=b[:j] pile_a.append('-') pile_b.append(b[j-1]) j -= 1 # remettre tout «à l'endroit» ch1 = "" while len(pile_a) > 0: ch1 += pile_a.pop() ch2 = "" while len(pile_b) > 0: ch2 += pile_b.pop() # terminer! return tab[n][m], ch1, ch2 alignement_de_sequences("programmer", "dormir") ###Output _____no_output_____
module_2_programming/loops.ipynb
###Markdown Learning objectives* What is a range function?* How to write for loops?* How to loop through an entire list using a few lines of code?* Performing operations over each element of a list.* Creating a new list using the range function. ###Code ##starting with a while loop i = 0 while i < 10: print(i) i = i + 1 # += is same as i = i + 1 i = 1 while i <= 10: print(19 * i) i += 1 ###Output 19 38 57 76 95 114 133 152 171 190 ###Markdown For loops ###Code range(1, 50, 2) for i in range(1, 50, 2): ans = i ** 2 print(ans) for i in range(11): print(i) new_list = [] for price in range(100, 1200, 50): new_list.append(price) print(new_list) new_prices = [] for price in new_list: updated_price = price * 0.5 new_prices.append(updated_price) new_prices new_list ###Output _____no_output_____
notebooks/moraga2019_capitulo_2_3.ipynb
###Markdown Moraga, P. Modeling and Visualization of Geospatial Data Parte ICapitulos 2 y 3 Capitulo 2 1. CRS En R, el CRS se especifica usando *proj4string*. Este contiene la proyeccion, punto de referencia y elipsoide utilizado. *Ejemplo:* para designar la proyeccion Mercator para la zona 29 sur: ###Code "+proj=utm +zone=29 +ellps=WGS84 +datum=WGS84 +units=m +no_defs +south" ###Output _____no_output_____ ###Markdown Otra forma de especificar el CRS es con el EPSG. Las distintas posibilidades de CRS se pueden chequear con: ###Code rgdal::make_EPSG() ###Output _____no_output_____ ###Markdown Para ver el EPSG=4326: ###Code library(rgdal) CRS("+init=epsg:4326") ###Output Loading required package: sp rgdal: version: 1.5-23, (SVN revision 1121) Geospatial Data Abstraction Library extensions to R successfully loaded Loaded GDAL runtime: GDAL 3.2.1, released 2020/12/29 Path to GDAL shared files: C:/Program Files/R/R-4.0.4/library/rgdal/gdal GDAL binary built with GEOS: TRUE Loaded PROJ runtime: Rel. 7.2.1, January 1st, 2021, [PJ_VERSION: 721] Path to PROJ shared files: C:/Program Files/R/R-4.0.4/library/rgdal/proj PROJ CDN enabled: FALSE Linking to sp version:1.4-5 To mute warnings of possible GDAL/OSR exportToProj4() degradation, use options("rgdal_show_exportToProj4_warnings"="none") before loading rgdal. Overwritten PROJ_LIB was C:/Program Files/R/R-4.0.4/library/rgdal/proj ###Markdown Si los datos `d`no tienen info sobre su CRS, hay que setearlo:`proj4string(d) = CRS(projection)`, y en `projection` debo definir el string de los argumentos de proyeccion. Para transformar los datos a otra proyeccion uso spTransform o st_transform.*Ejemplo:* Transformar de lat/long a zona 35 UTM sur. ###Code library(rgdal) # create data with coordinates given by longitude and latitude d <- data.frame(long = rnorm(100, 0, 1), lat = rnorm(100, 0, 1)) coordinates(d) <- c("long", "lat") # assign CRS WGS84 longitude/latitude proj4string(d) <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") # reproject data from longitude/latitude to UTM zone 35 south d_new <- spTransform(d, CRS("+proj=utm +zone=35 +ellps=WGS84 +datum=WGS84 +units=m +no_defs +south")) # add columns UTMx and UTMy d_new$UTMx <- coordinates(d_new)[, 1] d_new$UTMy <- coordinates(d_new)[, 2] head(d_new, 3) ###Output _____no_output_____ ###Markdown 2. ShapefilesEn el shapefile guardo la ubicacion, forma y atributos de features geograficas como lineas, poligonos y puntos.Un shapefile es una coleccion de archivos en la misma carpeta bajo el mismo nombre, donde 3 son obligatorios: .shp (contiene la geometria), .shx (indice de la geometria) y .dbf (atributos de cada forma). En R leemos los .shp con `rgdal::readOGR()` o con `sf::st_read()`.*Ejemplo:* shapefile de Carolina del Norte: ###Code library(rgdal) library(sf) filename <- system.file("shape/nc.shp", package = "sf") print(filename) map <- readOGR(filename, verbose = FALSE) ###Output [1] "C:/Users/Guillermina/Documents/R/win-library/4.0/sf/shape/nc.shp" ###Markdown Veamos los datos: ###Code class(map) head(map@data, 2) plot(map) # read shapefile with st_read() library(sf) map <- st_read(filename, quiet = TRUE) class(map) head(map, 2) plot(map, max.plot=2) # max.plot to control nr. of features to plot ###Output _____no_output_____ ###Markdown 3. Haciendo mapas con RGeneralmente usamos ggplot2, leaflet, mapview, and tmap. ###Code library(ggplot2) library(leaflet) ###Output _____no_output_____ ###Markdown 1. Ggplot2`ggplot2` necesita que le pasemos los datos, la forma del grafico, estetica (como representar las variables: color, forma, etc) y elementos opcionales como escalas, titulos, ..., etc. Podemos usar la `geom_sf()` para crear mapas, pasandole un objeto `sf` (simple feature). Podemos convertir un *'SpatialPolygonsDataFrame'* a `sf` con la funcion `st_as_sf()`. *Ejemplo:* Ploteemos el mapa que leimos antes de Carolina del Norte, y que cada condado tome color segun la variable *SID74*. ###Code library(ggplot2) library(viridis) map <- st_as_sf(map) ggplot(map) + geom_sf(aes(fill = SID74)) + theme_bw() + scale_fill_viridis() ###Output Loading required package: viridisLite ###Markdown Para escalas de colores discretas uso `scale_color/fill_`, donde en `` puedo poner hue, Brewer, grey o viridis. Para escalas continuas, uso `scale_color/fill_gradient`. Al plot lo guardo con `ggsave()` o con un dispositivo: ###Code png("plot.png") ggplot(map) + geom_sf(aes(fill = SID74)) + scale_fill_viridis() + theme_bw() dev.off() ###Output _____no_output_____ ###Markdown 2. LeafletCall `leaflet()` function and add layers to the map such as `addPolygons()` or `addTiles()` to add a background. Leaflet requires projection EPSG=4326. ###Code st_crs(map) map <- st_transform(map, 4326) pal <- colorNumeric("YlOrRd", domain = map$SID74) # create palette a = leaflet(map) %>% addTiles() %>% addPolygons( color = "white", # border fillColor = ~ pal(SID74), #fill fillOpacity = 1 ) %>% addLegend(pal = pal, values = ~SID74, opacity = 1) a ###Output _____no_output_____ ###Markdown Save the html with `saveWidget()` or a snapshot with `webshot()`. ###Code library(htmlwidgets) saveWidget(a, 'test_leaflet.html', selfcontained=FALSE) ###Output _____no_output_____ ###Markdown **Others:** mapview, tmap ###Code library(mapview) library(RColorBrewer) pal <- colorRampPalette(brewer.pal(9, "YlOrRd")) b = mapview(map, zcol = "SID74", map.types = "CartoDB.DarkMatter", col.regions = pal ) mapshot(b, 'test_mapview.html', selfcontained=FALSE) library(tmap) tmap_mode("view") c = tm_shape(map) + tm_polygons("SID74") tmap_save(c, 'test_tmap.html', selfcontained=FALSE) ###Output tmap mode set to interactive viewing Interactive map saved to C:\Users\Guillermina\Documents\Cursos\Estadistica Aplicada\Tesis\Espacial\cuenca_carcarania\notebooks\test_tmap.html
0.15/_downloads/plot_morph_data.ipynb
###Markdown Morph source estimates from one subject to another subjectA source estimate from a given subject 'sample' is morphedto the anatomy of another subject 'fsaverage'. The outputis a source estimate defined on the anatomy of 'fsaverage' ###Code # Authors: Alexandre Gramfort <[email protected]> # Eric Larson <[email protected]> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne.datasets import sample print(__doc__) data_path = sample.data_path() subject_from = 'sample' subject_to = 'fsaverage' subjects_dir = data_path + '/subjects' fname = data_path + '/MEG/sample/sample_audvis-meg' # Read input stc file stc_from = mne.read_source_estimate(fname) # Morph using one method (supplying the vertices in fsaverage's source # space makes it faster). Note that for any generic subject, you could do: # vertices_to = mne.grade_to_vertices(subject_to, grade=5) # But fsaverage's source space was set up so we can just do this: vertices_to = [np.arange(10242), np.arange(10242)] stc_to = mne.morph_data(subject_from, subject_to, stc_from, n_jobs=1, grade=vertices_to, subjects_dir=subjects_dir) stc_to.save('%s_audvis-meg' % subject_to) # Morph using another method -- useful if you're going to do a lot of the # same inter-subject morphing operations; you could save and load morph_mat morph_mat = mne.compute_morph_matrix(subject_from, subject_to, stc_from.vertices, vertices_to, subjects_dir=subjects_dir) stc_to_2 = mne.morph_data_precomputed(subject_from, subject_to, stc_from, vertices_to, morph_mat) stc_to_2.save('%s_audvis-meg_2' % subject_to) # View source activations plt.plot(stc_from.times, stc_from.data.mean(axis=0), 'r', label='from') plt.plot(stc_to.times, stc_to.data.mean(axis=0), 'b', label='to') plt.plot(stc_to_2.times, stc_to.data.mean(axis=0), 'g', label='to_2') plt.xlabel('time (ms)') plt.ylabel('Mean Source amplitude') plt.legend() plt.show() ###Output _____no_output_____
exercise_02.ipynb
###Markdown Professional Coding Specialist COS Pro level 2 - python `exercise 2` 11. Make a function that summates all numbers from a to b ###Code def solution(a, b): list = [i for i in range(a, b+1)] total = 0 for i in list: total += i # total = sum(list) return total solution(5, 10) ###Output _____no_output_____ ###Markdown follow the answer sheet guide- 1. calculate total summation of all integers between 1 and b- 2. calculate total summation of all integers between 1 and a - 1- 3. substract 2 from 1 ###Code def func(num): sum = 0 for i in range(1, num+1): sum += i return sum def solution(a, b): start = func(a-1) end = func(b) result = end - start return result print(func(3-1)) print(func(5)) print(solution(3, 5)) ###Output 3 15 12 ###Markdown 12. Make a function that calculates total summation without maximum and minimum value ###Code def solution(list): max = 0 min = 101 for i in list: if i > max: max = i if i < min: min = i total = 0 for i in list: if i == max or i == min: continue total += i return total ls_num = [1, 2, 3, 4, 5] solution(ls_num) ###Output _____no_output_____ ###Markdown make the same function and follow the answer sheet ###Code def get_max(list): max = 0 for i in list: if i > max: max = i return max def get_min(list): min = 101 for i in list: if i < min: min = i return min def get_sum(list): sum = 0 for i in list: sum += i return sum def solution(list): sum = get_sum(list) max = get_max(list) min = get_min(list) return sum - max - min values = [1, 2, 3, 4, 5] solution(values) ###Output _____no_output_____ ###Markdown 13. Make a function that calculates the subtraction of 1. maximum value and 2. maximum value ###Code def solution(list): max1 = 0 max2 = 0 for i in list: if i > max1: max1 = i for i in list: if max2 < i < max1: max2 = i return max1 - max2 ls_sample = [1, 2, 3, 4, 5, 10, 15] solution(ls_sample) ###Output _____no_output_____ ###Markdown make the same function based on the answer sheet ###Code def get_max(list): max = -1 for i in list: if i > max: max = i return max def except_max(list, max): ls_ret = [] for i in list: if i != max: ls_ret.append(i) return ls_ret def subtract(a, b): if a > b: return a - b else: return b - a def solution(list): max_first = get_max(list) ls_removed = except_max(list, max_first) max_second = get_max(ls_removed) answer = subtract(max_first, max_second) return answer ls_visitors = [1, 2, 3, 4, 5, 6, 7, 8, 10, 100, 150] solution(ls_visitors) ###Output _____no_output_____ ###Markdown 14. make a function that counts each person by grade ###Code def solution(scores): cnt = [0, 0, 0, 0, 0] for score in scores: if 85 <= score <= 100: cnt[0] += 1 elif 70 <= score <= 84: cnt[1] += 1 elif 55 <= score <= 69: cnt[2] += 1 elif 40 <= score <= 54: cnt[3] += 1 elif 0 <= score <= 39: cnt[4] += 1 return cnt ls_scores = [100, 90, 80, 70, 60, 50, 40] solution(ls_scores) ###Output _____no_output_____ ###Markdown based on answer sheet... ###Code def solution(list): cnt = [0 for _ in range(5)] for i in list: if i >= 85: cnt[0] += 1 elif i >= 70: cnt[1] += 1 elif i >= 55: cnt[2] += 1 elif i >= 40: cnt[3] += 1 else: cnt[4] += 1 return cnt ls_sample = [100, 90, 80, 70, 60, 50, 40, 30] solution(ls_sample) ###Output _____no_output_____ ###Markdown 15. Make a function that calculates total number of how many times a frog jumps ###Code def solution(list): max = len(list) now = 1 cnt = 0 while now <= max: now += list[now-1] cnt += 1 return cnt ls_jump = [1, 2, 3, 1, 1] solution(ls_jump) ###Output _____no_output_____ ###Markdown make the same function with in some different way ###Code def solution(stones): cnt = 0 current = 0 n = len(stones) while current < n: current += stones[current] cnt += 1 return cnt ls_jump = [1, 2, 3, 1, 1] solution(ls_jump) ###Output _____no_output_____ ###Markdown 16. Make a function that calculates how many persons are taller than input 'k' ###Code def solution(list, k): cnt = 0 for i in list: if i > k: cnt += 1 return cnt height = [170, 180, 190, 177, 175, 160, 168] solution(height, 175) ###Output _____no_output_____ ###Markdown model answer 1 ###Code def solution(height, k): answer = 0 n = len(height) for h in height: if h > k: answer += 1 return answer height = [170, 180, 190, 177, 175, 160, 168] solution(height, 175) ###Output _____no_output_____ ###Markdown model answer 2 ###Code def solution(height, k): answer = 0 n = len(height) index = 0 while index < n: if height[index] > k: answer += 1 index += 1 return answer height = [170, 180, 190, 177, 175, 160, 168] solution(height, 175) ###Output _____no_output_____ ###Markdown 17. Make a function that rectifies spelling 'a' to spelling 'z' ###Code def solution(string): tmp = '' for i in string: if i == 'a': tmp += 'z' elif i == 'z': tmp += 'a' else: tmp += i return tmp sent = 'zaure is z web clouding service aaa' solution(sent) ###Output _____no_output_____ ###Markdown in a different way ###Code def solution(s): s_lst = list(s) n = len(s) for i in range(n): if s_lst[i] == 'a': s_lst[i] = 'z' elif s_lst[i] == 'z': s_lst[i] = 'a' return ''.join(s_lst) sent = 'zaure is z web clouding service aaa' solution(sent) ###Output _____no_output_____ ###Markdown 18. Make a function that counts names which contains spelling 'j' and 'k' ###Code def solution(list): cnt = 0 for name in list: for c in name: if c == 'j' or c == 'k': cnt += 1 break return cnt names = ['james', 'kim john', 'dojun', 'seohyun', 'haeil', 'myeonghee'] solution(names) ###Output _____no_output_____ ###Markdown make it one more time ###Code def solution(name_list): answer = 0 for name in name_list: for n in name: if n == 'j' or n == 'k': answer += 1 break return answer names = ['james', 'kim john', 'dojun', 'seohyun', 'haeil', 'myeonghee'] solution(names) ###Output _____no_output_____ ###Markdown 19. Make a function that receives a list of price and calculates the change ###Code def solution(list, money): total = 0 for i in list: total += i result = money - total if result >= 0: return result else: return -1 prices = [1000, 2000, 3500, 3000] print(solution(prices, 10000)) print(solution(prices, 9500)) print(solution(prices, 9000)) ###Output 500 0 -1 ###Markdown one more time ###Code def solution(price, money): answer = 0 total = 0 for i in price: total += i answer = money - total if answer < 0: answer = -1 return answer prices = [1000, 2000, 3500, 3000] print(solution(prices, 10000)) print(solution(prices, 9500)) print(solution(prices, 9000)) ###Output 500 0 -1 ###Markdown 20. Make a function that finds out the 'k'th. smallst number from a list ###Code def get_list(list): tmp = [] for i in list: for j in i: tmp.append(j) return tmp def get_min(list): min = 101 for i in list: if i < min: min = i return min def except_min(list, min): tmp = [] for i in list: if i != min: tmp.append(i) return tmp def solution(list, k): ls_tmp = get_list(list) for _ in range(k): min = get_min(ls_tmp) ls_tmp = except_min(ls_tmp, min) return min ls_samples = [[1, 2, 3, 4, 5], [10, 11, 12, 13, 14, 15], [31, 32, 33, 34, 35]] solution(ls_samples, 2) ###Output _____no_output_____ ###Markdown follow the answer sheet ###Code def solution(list, k): tmp = [] for i in list: for j in i: tmp.append(j) tmp.sort() answer = tmp[k-1] return answer ls_samples = [[1, 2, 3, 4, 5], [10, 11, 12, 13, 14, 15], [31, 32, 33, 34, 35]] solution(ls_samples, 2) ###Output _____no_output_____
04.神经网络(进阶)/9.rmsprop.ipynb
###Markdown RMSProp ###Code def rmsprop(parameters, sqrs, lr, alpha): eps = 1e-10 for param, sqr in zip(parameters, sqrs): sqr[:] = alpha * sqr + (1 - alpha) * param.grad.data ** 2 div = lr / torch.sqrt(sqr + eps) * param.grad.data param.data = param.data - div import numpy as np import torch from torchvision.datasets import MNIST # 导入 pytorch 内置的 mnist 数据 from torch.utils.data import DataLoader from torch import nn from torch.autograd import Variable import time import matplotlib.pyplot as plt %matplotlib inline def data_tf(x): x = np.array(x, dtype='float32') / 255 x = (x - 0.5) / 0.5 # 标准化,这个技巧之后会讲到 x = x.reshape((-1,)) # 拉平 x = torch.from_numpy(x) return x train_set = MNIST('./data', train=True, transform=data_tf, download=True) # 载入数据集,申明定义的数据变换 test_set = MNIST('./data', train=False, transform=data_tf, download=True) # 定义 loss 函数 criterion = nn.CrossEntropyLoss() train_data = DataLoader(train_set, batch_size=64, shuffle=True) # 使用 Sequential 定义 3 层神经网络 net = nn.Sequential( nn.Linear(784, 200), nn.ReLU(), nn.Linear(200, 10), ) # 初始化梯度平方项 sqrs = [] for param in net.parameters(): sqrs.append(torch.zeros_like(param.data)) # 开始训练 losses = [] idx = 0 start = time.time() # 记时开始 for e in range(5): train_loss = 0 for im, label in train_data: im = Variable(im) label = Variable(label) # 前向传播 out = net(im) loss = criterion(out, label) # 反向传播 net.zero_grad() loss.backward() rmsprop(net.parameters(), sqrs, 1e-3, 0.9) # 学习率设为 0.001,alpha 设为 0.9 # 记录误差 train_loss += loss.data[0] if idx % 30 == 0: losses.append(loss.data[0]) idx += 1 print('epoch: {}, Train Loss: {:.6f}' .format(e, train_loss / len(train_data))) end = time.time() # 计时结束 print('使用时间: {:.5f} s'.format(end - start)) x_axis = np.linspace(0, 5, len(losses), endpoint=True) plt.semilogy(x_axis, losses, label='alpha=0.9') plt.legend(loc='best') train_data = DataLoader(train_set, batch_size=64, shuffle=True) # 使用 Sequential 定义 3 层神经网络 net = nn.Sequential( nn.Linear(784, 200), nn.ReLU(), nn.Linear(200, 10), ) # 初始化梯度平方项 sqrs = [] for param in net.parameters(): sqrs.append(torch.zeros_like(param.data)) # 开始训练 losses = [] idx = 0 start = time.time() # 记时开始 for e in range(5): train_loss = 0 for im, label in train_data: im = Variable(im) label = Variable(label) # 前向传播 out = net(im) loss = criterion(out, label) # 反向传播 net.zero_grad() loss.backward() rmsprop(net.parameters(), sqrs, 1e-3, 0.999) # 学习率设为 0.001,alpha 设为 0.999 # 记录误差 train_loss += loss.data[0] if idx % 30 == 0: losses.append(loss.data[0]) idx += 1 print('epoch: {}, Train Loss: {:.6f}' .format(e, train_loss / len(train_data))) end = time.time() # 计时结束 print('使用时间: {:.5f} s'.format(end - start)) x_axis = np.linspace(0, 5, len(losses), endpoint=True) plt.semilogy(x_axis, losses, label='alpha=0.999') plt.legend(loc='best') ###Output _____no_output_____ ###Markdown **小练习:可以看到使用了不同的 alpha 会使得 loss 在下降过程中的震荡程度不同,想想为什么** 当然 pytorch 也内置了 rmsprop 的方法,非常简单,只需要调用 `torch.optim.RMSprop()` 就可以了,下面是例子 ###Code train_data = DataLoader(train_set, batch_size=64, shuffle=True) # 使用 Sequential 定义 3 层神经网络 net = nn.Sequential( nn.Linear(784, 200), nn.ReLU(), nn.Linear(200, 10), ) optimizer = torch.optim.RMSprop(net.parameters(), lr=1e-3, alpha=0.9) # 开始训练 start = time.time() # 记时开始 for e in range(5): train_loss = 0 for im, label in train_data: im = Variable(im) label = Variable(label) # 前向传播 out = net(im) loss = criterion(out, label) # 反向传播 optimizer.zero_grad() loss.backward() optimizer.step() # 记录误差 train_loss += loss.data[0] print('epoch: {}, Train Loss: {:.6f}' .format(e, train_loss / len(train_data))) end = time.time() # 计时结束 print('使用时间: {:.5f} s'.format(end - start)) ###Output epoch: 0, Train Loss: 0.372473 epoch: 1, Train Loss: 0.164288 epoch: 2, Train Loss: 0.122384 epoch: 3, Train Loss: 0.100739 epoch: 4, Train Loss: 0.088391 使用时间: 85.15531 s
examples/ner_pos_tagging/ner_pos_tagging_conll.ipynb
###Markdown EXAMPLE - 5**Tasks :- NER tagging, POS tagging****Tasks Description**``NER`` :-This is a Named Entity Recognition task where individual words of the sentence are tagged with an entity label it belongs to. The words which don't belong to any entity label are simply labeled as "O".``POS`` :- This is a Part of Speech tagging task. A part of speech is a category of words that have similar grammatical properties. Each word of the sentence is tagged with the part of speech label it belongs to. The words which don't belong to any part of speech label are simply labeled as "O".**Conversational Utility** :- In conversational AI context, determining the syntactic parts of the sentence can help in extracting noun-phrases or important keyphrases from the sentence.**Data** :- In this example, we are using the coNLL 2003 data which is BIO tagged format with the POS and NER tags separated by space.The data is already present in ``coNLL_data`` directory. Step - 1: Transforming dataRaw data is in BIO tagged format with the POS and NER tags separated by space.We already provide a sample transformation function ``coNLL_ner_pos_to_tsv`` to convert this data to required tsv format. Running data transformations will save the required train, dev and test tsv data files under ``data`` directory in root of library. For more details on the data transformation process, refer to data transformations in documentation.The transformation file should have the following details which is already created ``transform_file_conll.yml``.```transform1: transform_func: coNLL_ner_pos_to_tsv read_file_names: - coNLL_train.txt - coNLL_testa.txt - coNLL_testb.txt read_dir: coNLL_data save_dir: ../../data ``` Following command can be used to run the data transformation for the tasks. ###Code !python ../../data_transformations.py \ --transform_file 'transform_file_conll.yml' ###Output _____no_output_____ ###Markdown Step -2 Data PreparationFor more details on the data preparation process, refer to data preparation in documentation.Defining tasks file for training single model for entailment task. The file is already created at ``tasks_file_conll.yml`````conllner: model_type: BERT config_name: bert-base-uncased dropout_prob: 0.2 label_map_or_file: ../../data/ner_coNLL_train_label_map.joblib metrics: - seqeval_f1_score - seqeval_precision - seqeval_recall loss_type: NERLoss task_type: NER file_names: - ner_coNLL_train.tsv - ner_coNLL_testa.tsv - ner_coNLL_testb.tsvconllpos: model_type: BERT config_name: bert-base-uncased dropout_prob: 0.2 label_map_or_file: ../../data/pos_coNLL_train_label_map.joblib metrics: - seqeval_f1_score - seqeval_precision - seqeval_recall loss_type: NERLoss task_type: NER file_names: - pos_coNLL_train.tsv - pos_coNLL_testa.tsv - pos_coNLL_testb.tsv``` ###Code !python ../../data_preparation.py \ --task_file 'tasks_file_conll.yml' \ --data_dir '../../data' \ --max_seq_len 50 ###Output _____no_output_____ ###Markdown Step -3 Running Training ###Code !python ../../train.py \ --data_dir '../../data/bert-base-uncased_prepared_data' \ --task_file 'tasks_file_conll.yml' \ --out_dir 'conll_ner_pos_bert_base' \ --epochs 10 \ --train_batch_size 32 \ --eval_batch_size 32 \ --grad_accumulation_steps 1 \ --log_per_updates 50 \ --max_seq_len 50 \ --eval_while_train \ --test_while_train \ --silent ###Output _____no_output_____ ###Markdown Step - 4 InferingYou can import and use the ``inferPipeline`` to get predictions for the required tasks.The trained model and maximum sequence length to be used needs to be specified.For knowing more details about infering, refer to infer pipeline in documentation. ###Code import sys sys.path.insert(1, '../../') from infer_pipeline import inferPipeline ###Output _____no_output_____
DL_TF20/Part 20 - post process - predict _ predict_generator-Antonio.ipynb
###Markdown TensorFlow 2.0 ###Code import os from glob import glob from datetime import datetime import numpy as np import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import datasets import matplotlib.pyplot as plt %load_ext tensorboard %matplotlib inline ###Output _____no_output_____ ###Markdown Hyperparameter Tunning ###Code num_epochs = 5 batch_size = 32 learning_rate = 0.001 dropout_rate = 0.5 input_shape = (32, 32, 3) num_classes = 10 ###Output _____no_output_____ ###Markdown Build Model ###Code inputs = layers.Input(input_shape) net = layers.Conv2D(32, (3, 3), padding='SAME')(inputs) net = layers.Activation('relu')(net) net = layers.Conv2D(32, (3, 3), padding='SAME')(net) net = layers.Activation('relu')(net) net = layers.MaxPooling2D(pool_size=(2, 2))(net) net = layers.Dropout(dropout_rate)(net) net = layers.Conv2D(64, (3, 3), padding='SAME')(net) net = layers.Activation('relu')(net) net = layers.Conv2D(64, (3, 3), padding='SAME')(net) net = layers.Activation('relu')(net) net = layers.MaxPooling2D(pool_size=(2, 2))(net) net = layers.Dropout(dropout_rate)(net) net = layers.Flatten()(net) net = layers.Dense(512)(net) net = layers.Activation('relu')(net) net = layers.Dropout(dropout_rate)(net) net = layers.Dense(num_classes)(net) net = layers.Activation('softmax')(net) model = tf.keras.Model(inputs=inputs, outputs=net, name='Basic_CNN') # Model is the full model w/o custom layers model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate), # Optimization loss='sparse_categorical_crossentropy', # Loss Function metrics=['accuracy']) # Metrics / Accuracy ###Output _____no_output_____ ###Markdown Data Preprocess ###Code train_paths = glob('dataset/cifar/train/*.png')[:100] test_paths = glob('dataset/cifar/test/*.png')[:100] def get_class_name(path): return path.split('_')[-1].replace('.png', '') train_labels = [get_class_name(path) for path in train_paths] class_names = np.unique(train_labels) def get_label(path): fname = tf.strings.split(path, '_')[-1] lbl_name = tf.strings.regex_replace(fname, '.png', '') onehot = tf.cast(lbl_name == class_names, tf.uint8) return tf.argmax(onehot) # 이번에는 onehot이 아닌 label 번호로 def load_image_label(path): gfile = tf.io.read_file(path) image = tf.io.decode_image(gfile) image = tf.cast(image, tf.float32) / 255. # rescale label = get_label(path) return image, label def image_preprocess(image, label): image = tf.image.random_flip_up_down(image) image = tf.image.random_flip_left_right(image) return image, label AUTOTUNE = tf.data.experimental.AUTOTUNE train_dataset = tf.data.Dataset.from_tensor_slices(train_paths) train_dataset = train_dataset.map(load_image_label, num_parallel_calls=AUTOTUNE) train_dataset = train_dataset.map(image_preprocess, num_parallel_calls=AUTOTUNE) train_dataset = train_dataset.batch(batch_size) train_dataset = train_dataset.shuffle(buffer_size=len(train_paths)) train_dataset = train_dataset.repeat() test_dataset = tf.data.Dataset.from_tensor_slices(test_paths) test_dataset = test_dataset.map(load_image_label, num_parallel_calls=AUTOTUNE) test_dataset = test_dataset.batch(batch_size) test_dataset = test_dataset.repeat() ###Output _____no_output_____ ###Markdown Training ###Code steps_per_epoch = len(train_paths) // batch_size validation_steps = len(test_paths) // batch_size model.fit_generator( train_dataset, steps_per_epoch=steps_per_epoch, validation_data=test_dataset, validation_steps=validation_steps, epochs=num_epochs ) ###Output WARNING: Logging before flag parsing goes to stderr. W0919 10:49:02.957817 23184 deprecation.py:323] From C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\ops\math_grad.py:1394: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where ###Markdown Predict 이미지를 Load 직접 load해서 넣는 방법 ###Code path = train_paths[0] test_image, test_label = load_image_label(path) test_image.shape test_image = test_image[tf.newaxis, ...] test_image.shape pred = model.predict(test_image) pred ###Output _____no_output_____ ###Markdown generator에서 데이터를 가져오는 방법 ###Code test_image, test_label = next(iter(test_dataset)) test_image.shape pred = model.predict(test_image) pred.shape pred[0] ###Output _____no_output_____ ###Markdown generator에 넣는 방법 ###Code pred = model.predict_generator(test_dataset.take(1)) pred.shape pred = model.predict_generator(test_dataset.take(2)) pred.shape ###Output _____no_output_____ ###Markdown Evaluate ###Code test_image, test_label = next(iter(test_dataset)) test_image.shape evals = model.evaluate(test_image, test_label) ###Output _____no_output_____
Landslide_Segmentation_with_U-Net_Landslide_Segmentation_with_U-Net:_Evaluating_Different_Sampling_Methods_and_Patch_Sizes/notebooks/Training_regular_RapidEye+Augmentation.ipynb
###Markdown Code used to train the RapidEye+Augmentation regular models The models were trained in Google Colaboratory Virtual Environment, thus, to work properly, this notebook should be loaded in google drive.* [32x32 models](32-x-32-models) * [64x64 models](64-x-64-models) * [128x128 models](128-x-128-models) ###Code # Import libraries import tensorflow as tf from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime from packaging import version %tensorflow_version 2.x from tensorflow import keras from tensorflow.keras.models import * from tensorflow.keras.layers import * from tensorflow.keras.optimizers import * import numpy as np import pandas as pd import matplotlib.pyplot as plt print("TensorFlow version: ", tf.__version__) assert version.parse(tf.__version__).release[0] >= 2, \ "This notebook requires TensorFlow 2.0 or above." # Install segmetation_models library (https://github.com/qubvel/segmentation_models) pip install segmentation_models # Load segmentation)models library import segmentation_models as sm ###Output _____no_output_____ ###Markdown 32 x 32 models ###Code # Load training data 32x32 - regular - the Strings are the directions to the .npy files in google drive X_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/augmented/32_32/regular/arrays/X_train_32_regular.npy") Y_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/augmented/32_32/regular/arrays/Y_train_32_regular.npy") # Load test data - Area 1 X_test_area_1 = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/test/test_without_terrain/area_1/arrays/X_test_test_area_1.npy") Y_test_area_1 = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/test/test_without_terrain/area_1/arrays/Y_test_test_area_1.npy") # Load test data - Area 2 X_test_area_2 = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/test/test_without_terrain/area_2/arrays/X_test_test_area_2.npy") Y_test_area_2 = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/test/test_without_terrain/area_2/arrays/Y_test_test_area_2.npy") # Evaluate data dimensions print(f"X_train shape: {X_train.shape}, Y_train shape: {Y_train.shape}\nX_test_area_1 shape: {X_test_area_1.shape}, Y_test_area_1 shape: {Y_test_area_1.shape},\nX_test_area_2 shape: {X_test_area_2.shape}, Y_test_area_2 shape: {Y_test_area_2.shape}") # Evaluation Metrics - Precision, Recall, FScore, IoU metrics = [sm.metrics.Precision(threshold=0.5),sm.metrics.Recall(threshold=0.5),sm.metrics.FScore(threshold=0.5,beta=1),sm.metrics.IOUScore(threshold=0.5)] # Unet Architecture def Unet_Original(lr,filtersFirstLayer, pretrained_weights = None,input_size = (32,32,5)): inputs = Input(input_size) conv1 = Conv2D(filtersFirstLayer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(inputs) conv1 = Conv2D(filtersFirstLayer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(filtersFirstLayer*2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(pool1) conv2 = Conv2D(filtersFirstLayer*2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(filtersFirstLayer*4, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(pool2) conv3 = Conv2D(filtersFirstLayer*4, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(filtersFirstLayer*8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(pool3) conv4 = Conv2D(filtersFirstLayer*8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Conv2D(filtersFirstLayer*16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(pool4) conv5 = Conv2D(filtersFirstLayer*16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv5) up6 = Conv2D(filtersFirstLayer*8, 2, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,2))(conv5)) merge6 = concatenate([conv4,up6], axis = 3) conv6 = Conv2D(filtersFirstLayer*8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(merge6) conv6 = Conv2D(filtersFirstLayer*8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv6) up7 = Conv2D(filtersFirstLayer*4, 2, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,2))(conv6)) merge7 = concatenate([conv3,up7], axis = 3) conv7 = Conv2D(filtersFirstLayer*4, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(merge7) conv7 = Conv2D(filtersFirstLayer*4, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv7) up8 = Conv2D(filtersFirstLayer*2, 2, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,2))(conv7)) merge8 = concatenate([conv2,up8], axis = 3) conv8 = Conv2D(filtersFirstLayer*2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(merge8) conv8 = Conv2D(filtersFirstLayer*2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv8) up9 = Conv2D(filtersFirstLayer, 2, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(UpSampling2D(size = (2,2))(conv8)) merge9 = concatenate([conv1,up9], axis = 3) conv9 = Conv2D(filtersFirstLayer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(merge9) conv9 = Conv2D(filtersFirstLayer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv9) conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'glorot_normal')(conv9) conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9) model = Model(inputs, conv10) model.compile(optimizer = Adam(lr = lr), loss = 'binary_crossentropy', metrics = metrics) model.summary() if(pretrained_weights): model.load_weights(pretrained_weights) return model # Model training - Results are saved in a .csv file # size of the tiles size = 32 # Sampling method sampling = "regular" # number of filters filters = [16,32,64] # lr = 0.001 lr = [10e-4] # batch sizes batch_size = [16,32,64,128] # dictionary that will save the results dic = {} # Hyperparameters dic["model"] = [] dic["batch_size"] = [] dic["learning_rate"] = [] dic["filters"] = [] # test area 1 dic["precision_area_1"] = [] dic["recall_area_1"] = [] dic["f1_score_area_1"] = [] dic["iou_score_area_1"] = [] # test area 2 dic["precision_area_2"] = [] dic["recall_area_2"] = [] dic["f1_score_area_2"] = [] dic["iou_score_area_2"] = [] # loop over all the filters in the filter list for fiilter in filters: # loop over the learning rates (used to evalute 0.01 and 0.0001 without good results) for learning_rate in lr: # loop over all batch sizes in batch_size list for batch in batch_size: # load the model model = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate) # Save the models only when validation loss decrease model_checkpoint = tf.keras.callbacks.ModelCheckpoint(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5', monitor='val_loss', mode='min',verbose=1, save_best_only=True,save_weights_only = True) # Stop after 20 epochs without decreasing the validation loss early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20) print(fiilter, learning_rate,batch) # fit the model 30% of the dataset was used as validation history = model.fit(X_train,Y_train,batch_size = batch,epochs=200,validation_split=0.3,callbacks=[model_checkpoint, early_stopping]) # summarize history for iou score plt.plot(history.history['iou_score']) plt.plot(history.history['val_iou_score']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') # save plots plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_iou_score.png") plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_val_loss.png") plt.show() # load unet to evaluate the test data unet_original = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate,input_size=(1024,1024,5)) # load the last saved weight from the training unet_original.load_weights(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5") # Evaluate test area 1 res_1 = unet_original.evaluate(X_test_area_1,Y_test_area_1) # Evaluate test area 2 res_2 = unet_original.evaluate(X_test_area_2,Y_test_area_2) # Data to plot the predicted output preds_train_1 = unet_original.predict(X_test_area_1, verbose=1) preds_train_t1 = (preds_train_1 > 0.5).astype(np.uint8) preds_train_2 = unet_original.predict(X_test_area_2, verbose=1) preds_train_t2 = (preds_train_2 > 0.5).astype(np.uint8) # save results on the dictionary dic["model"].append("Unet") dic["batch_size"].append(batch) dic["learning_rate"].append(learning_rate) dic["filters"].append(fiilter) dic["precision_area_1"].append(res_1[1]) dic["recall_area_1"].append(res_1[2]) dic["f1_score_area_1"].append(res_1[3]) dic["iou_score_area_1"].append(res_1[4]) dic["precision_area_2"].append(res_2[1]) dic["recall_area_2"].append(res_2[2]) dic["f1_score_area_2"].append(res_2[3]) dic["iou_score_area_2"].append(res_2[4]) # Plot the results and save the plots f, axarr = plt.subplots(2,3,figsize=(10,10)) axarr[0,0].imshow(X_test_area_1[0][:,:,:3]) axarr[0,1].imshow(np.squeeze(preds_train_t1[0])) axarr[0,2].imshow(np.squeeze(Y_test_area_1[0])) axarr[1,0].imshow(X_test_area_2[0][:,:,:3]) axarr[1,1].imshow(np.squeeze(preds_train_t2[0])) axarr[1,2].imshow(np.squeeze(Y_test_area_2[0])) f.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/images/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_result.png") # Convert results to a dataframe results = pd.DataFrame(dic) # Export as csv results.to_csv(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/result_table/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}.csv', index = False) ###Output _____no_output_____ ###Markdown 64 x 64 models ###Code # Load training data 64x64 - regular X_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/augmented/64_64/regular/arrays/X_train_64_regular.npy") Y_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/augmented/64_64/regular/arrays/Y_train_64_regular.npy") # Evaluate data dimensions print(f"X_train shape: {X_train.shape}, Y_train shape: {Y_train.shape}\nX_test_area_1 shape: {X_test_area_1.shape}, Y_test_area_1 shape: {Y_test_area_1.shape},\nX_test_area_2 shape: {X_test_area_2.shape}, Y_test_area_2 shape: {Y_test_area_2.shape}") # Model training - Results are saved in a .csv file # size of the tiles size = 64 # Sampling method sampling = "regular" # number of filters filters = [16,32,64] # lr = 0.001 lr = [10e-4] # batch sizes batch_size = [16,32,64,128] # dictionary that will save the results dic = {} # Hyperparameters dic["model"] = [] dic["batch_size"] = [] dic["learning_rate"] = [] dic["filters"] = [] # test area 1 dic["precision_area_1"] = [] dic["recall_area_1"] = [] dic["f1_score_area_1"] = [] dic["iou_score_area_1"] = [] # test area 2 dic["precision_area_2"] = [] dic["recall_area_2"] = [] dic["f1_score_area_2"] = [] dic["iou_score_area_2"] = [] # loop over all the filters in the filter list for fiilter in filters: # loop over the learning rates (used to evalute 0.01 and 0.0001 without good results) for learning_rate in lr: # loop over all batch sizes in batch_size list for batch in batch_size: # load the model model = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate, input_size = (64,64,5)) # Save the models only when validation loss decrease model_checkpoint = tf.keras.callbacks.ModelCheckpoint(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5', monitor='val_loss', mode='min',verbose=1, save_best_only=True,save_weights_only = True) # Stop after 20 epochs without decreasing the validation loss early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20) print(fiilter, learning_rate,batch) # fit the model 30% of the dataset was used as validation history = model.fit(X_train,Y_train,batch_size = batch,epochs=200,validation_split=0.3,callbacks=[model_checkpoint, early_stopping]) # summarize history for iou score plt.plot(history.history['iou_score']) plt.plot(history.history['val_iou_score']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') # save plots plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_iou_score.png") plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_val_loss.png") plt.show() # load unet to evaluate the test data unet_original = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate,input_size=(1024,1024,5)) # load the last saved weight from the training unet_original.load_weights(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5") # Evaluate test area 1 res_1 = unet_original.evaluate(X_test_area_1,Y_test_area_1) # Evaluate test area 2 res_2 = unet_original.evaluate(X_test_area_2,Y_test_area_2) # Data to plot the predicted output preds_train_1 = unet_original.predict(X_test_area_1, verbose=1) preds_train_t1 = (preds_train_1 > 0.5).astype(np.uint8) preds_train_2 = unet_original.predict(X_test_area_2, verbose=1) preds_train_t2 = (preds_train_2 > 0.5).astype(np.uint8) # save results on the dictionary dic["model"].append("Unet") dic["batch_size"].append(batch) dic["learning_rate"].append(learning_rate) dic["filters"].append(fiilter) dic["precision_area_1"].append(res_1[1]) dic["recall_area_1"].append(res_1[2]) dic["f1_score_area_1"].append(res_1[3]) dic["iou_score_area_1"].append(res_1[4]) dic["precision_area_2"].append(res_2[1]) dic["recall_area_2"].append(res_2[2]) dic["f1_score_area_2"].append(res_2[3]) dic["iou_score_area_2"].append(res_2[4]) # Plot the results and save the plots f, axarr = plt.subplots(2,3,figsize=(10,10)) axarr[0,0].imshow(X_test_area_1[0][:,:,:3]) axarr[0,1].imshow(np.squeeze(preds_train_t1[0])) axarr[0,2].imshow(np.squeeze(Y_test_area_1[0])) axarr[1,0].imshow(X_test_area_2[0][:,:,:3]) axarr[1,1].imshow(np.squeeze(preds_train_t2[0])) axarr[1,2].imshow(np.squeeze(Y_test_area_2[0])) f.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/images/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_result.png") # Convert results to a dataframe results = pd.DataFrame(dic) # Export as csv results.to_csv(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/result_table/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}.csv', index = False) ###Output _____no_output_____ ###Markdown 128 x 128 models ###Code # Load training data 128x128 - regular X_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/augmented/128_128/regular/arrays/X_train_128_regular.npy") Y_train = np.load("/content/drive/My Drive/Mestrado/artigo/artigo_final/data/augmented/128_128/regular/arrays/Y_train_128_regular.npy") # Evaluate data dimensions print(f"X_train shape: {X_train.shape}, Y_train shape: {Y_train.shape}\nX_test_area_1 shape: {X_test_area_1.shape}, Y_test_area_1 shape: {Y_test_area_1.shape},\nX_test_area_2 shape: {X_test_area_2.shape}, Y_test_area_2 shape: {Y_test_area_2.shape}") # Model training - Results are saved in a .csv file # size of the tiles size = 128 # Sampling method sampling = "regular" # number of filters filters = [16,32,64] # lr = 0.001 lr = [10e-4] # batch sizes batch_size = [16,32,64,128] # dictionary that will save the results dic = {} # Hyperparameters dic["model"] = [] dic["batch_size"] = [] dic["learning_rate"] = [] dic["filters"] = [] # test area 1 dic["precision_area_1"] = [] dic["recall_area_1"] = [] dic["f1_score_area_1"] = [] dic["iou_score_area_1"] = [] # test area 2 dic["precision_area_2"] = [] dic["recall_area_2"] = [] dic["f1_score_area_2"] = [] dic["iou_score_area_2"] = [] # loop over all the filters in the filter list for fiilter in filters: # loop over the learning rates (used to evalute 0.01 and 0.0001 without good results) for learning_rate in lr: # loop over all batch sizes in batch_size list for batch in batch_size: # load the model model = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate, input_size = (128,128,5)) # Save the models only when validation loss decrease model_checkpoint = tf.keras.callbacks.ModelCheckpoint(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5', monitor='val_loss', mode='min',verbose=1, save_best_only=True,save_weights_only = True) # Stop after 20 epochs without decreasing the validation loss early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20) print(fiilter, learning_rate,batch) # fit the model 30% of the dataset was used as validation history = model.fit(X_train,Y_train,batch_size = batch,epochs=200,validation_split=0.3,callbacks=[model_checkpoint, early_stopping]) # summarize history for iou score plt.plot(history.history['iou_score']) plt.plot(history.history['val_iou_score']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') # save plots plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_iou_score.png") plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/plots/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_val_loss.png") plt.show() # load unet to evaluate the test data unet_original = Unet_Original(filtersFirstLayer= fiilter, lr = learning_rate,input_size=(1024,1024,5)) # load the last saved weight from the training unet_original.load_weights(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/model/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}.hdf5") # Evaluate test area 1 res_1 = unet_original.evaluate(X_test_area_1,Y_test_area_1) # Evaluate test area 2 res_2 = unet_original.evaluate(X_test_area_2,Y_test_area_2) # Data to plot the predicted output preds_train_1 = unet_original.predict(X_test_area_1, verbose=1) preds_train_t1 = (preds_train_1 > 0.5).astype(np.uint8) preds_train_2 = unet_original.predict(X_test_area_2, verbose=1) preds_train_t2 = (preds_train_2 > 0.5).astype(np.uint8) # save results on the dictionary dic["model"].append("Unet") dic["batch_size"].append(batch) dic["learning_rate"].append(learning_rate) dic["filters"].append(fiilter) dic["precision_area_1"].append(res_1[1]) dic["recall_area_1"].append(res_1[2]) dic["f1_score_area_1"].append(res_1[3]) dic["iou_score_area_1"].append(res_1[4]) dic["precision_area_2"].append(res_2[1]) dic["recall_area_2"].append(res_2[2]) dic["f1_score_area_2"].append(res_2[3]) dic["iou_score_area_2"].append(res_2[4]) # Plot the results and save the plots f, axarr = plt.subplots(2,3,figsize=(10,10)) axarr[0,0].imshow(X_test_area_1[0][:,:,:3]) axarr[0,1].imshow(np.squeeze(preds_train_t1[0])) axarr[0,2].imshow(np.squeeze(Y_test_area_1[0])) axarr[1,0].imshow(X_test_area_2[0][:,:,:3]) axarr[1,1].imshow(np.squeeze(preds_train_t2[0])) axarr[1,2].imshow(np.squeeze(Y_test_area_2[0])) f.savefig(f"/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/images/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}_lr_{learning_rate}_result.png") # Convert results to a dataframe results = pd.DataFrame(dic) # Export as csv results.to_csv(f'/content/drive/My Drive/Mestrado/artigo/artigo_final/results/augmented/{size}_{size}/{sampling}/result_table/unet/unet_{sampling}_size_{size}_filters_{fiilter}_batch_size_{batch}.csv', index = False) ###Output _____no_output_____
ml-regression/week1-2/numpy-tutorial.ipynb
###Markdown Numpy Tutorial Numpy is a computational library for Python that is optimized for operations on multi-dimensional arrays. In this notebook we will use numpy to work with 1-d arrays (often called vectors) and 2-d arrays (often called matrices).For a the full user guide and reference for numpy see: http://docs.scipy.org/doc/numpy/ ###Code import numpy as np # importing this way allows us to refer to numpy as np ###Output _____no_output_____ ###Markdown Creating Numpy Arrays New arrays can be made in several ways. We can take an existing list and convert it to a numpy array: ###Code mylist = [1., 2., 3., 4.] mynparray = np.array(mylist) mynparray ###Output _____no_output_____ ###Markdown You can initialize an array (of any dimension) of all ones or all zeroes with the ones() and zeros() functions: ###Code one_vector = np.ones(4) print one_vector # using print removes the array() portion one2Darray = np.ones((2, 4)) # an 2D array with 2 "rows" and 4 "columns" print one2Darray zero_vector = np.zeros(4) print zero_vector ###Output _____no_output_____ ###Markdown You can also initialize an empty array which will be filled with values. This is the fastest way to initialize a fixed-size numpy array however you must ensure that you replace all of the values. ###Code empty_vector = np.empty(5) print empty_vector ###Output _____no_output_____ ###Markdown Accessing array elements Accessing an array is straight forward. For vectors you access the index by referring to it inside square brackets. Recall that indices in Python start with 0. ###Code mynparray[2] ###Output _____no_output_____ ###Markdown 2D arrays are accessed similarly by referring to the row and column index separated by a comma: ###Code my_matrix = np.array([[1, 2, 3], [4, 5, 6]]) print my_matrix print my_matrix[1, 2] ###Output _____no_output_____ ###Markdown Sequences of indices can be accessed using ':' for example ###Code print my_matrix[0:2, 2] # recall 0:2 = [0, 1] print my_matrix[0, 0:3] ###Output _____no_output_____ ###Markdown You can also pass a list of indices. ###Code fib_indices = np.array([1, 1, 2, 3]) random_vector = np.random.random(10) # 10 random numbers between 0 and 1 print random_vector print random_vector[fib_indices] ###Output _____no_output_____ ###Markdown You can also use true/false values to select values ###Code my_vector = np.array([1, 2, 3, 4]) select_index = np.array([True, False, True, False]) print my_vector[select_index] ###Output _____no_output_____ ###Markdown For 2D arrays you can select specific columns and specific rows. Passing ':' selects all rows/columns ###Code select_cols = np.array([True, False, True]) # 1st and 3rd column select_rows = np.array([False, True]) # 2nd row print my_matrix[select_rows, :] # just 2nd row but all columns print my_matrix[:, select_cols] # all rows and just the 1st and 3rd column ###Output _____no_output_____ ###Markdown Operations on Arrays You can use the operations '\*', '\*\*', '\\', '+' and '-' on numpy arrays and they operate elementwise. ###Code my_array = np.array([1., 2., 3., 4.]) print my_array*my_array print my_array**2 print my_array - np.ones(4) print my_array + np.ones(4) print my_array / 3 print my_array / np.array([2., 3., 4., 5.]) # = [1.0/2.0, 2.0/3.0, 3.0/4.0, 4.0/5.0] ###Output _____no_output_____ ###Markdown You can compute the sum with np.sum() and the average with np.average() ###Code print np.sum(my_array) print np.average(my_array) print np.sum(my_array)/len(my_array) ###Output _____no_output_____ ###Markdown The dot product An important mathematical operation in linear algebra is the dot product. When we compute the dot product between two vectors we are simply multiplying them elementwise and adding them up. In numpy you can do this with np.dot() ###Code array1 = np.array([1., 2., 3., 4.]) array2 = np.array([2., 3., 4., 5.]) print np.dot(array1, array2) print np.sum(array1*array2) ###Output _____no_output_____ ###Markdown Recall that the Euclidean length (or magnitude) of a vector is the squareroot of the sum of the squares of the components. This is just the squareroot of the dot product of the vector with itself: ###Code array1_mag = np.sqrt(np.dot(array1, array1)) print array1_mag print np.sqrt(np.sum(array1*array1)) ###Output _____no_output_____ ###Markdown We can also use the dot product when we have a 2D array (or matrix). When you have an vector with the same number of elements as the matrix (2D array) has columns you can right-multiply the matrix by the vector to get another vector with the same number of elements as the matrix has rows. For example this is how you compute the predicted values given a matrix of features and an array of weights. ###Code my_features = np.array([[1., 2.], [3., 4.], [5., 6.], [7., 8.]]) print my_features my_weights = np.array([0.4, 0.5]) print my_weights my_predictions = np.dot(my_features, my_weights) # note that the weights are on the right print my_predictions # which has 4 elements since my_features has 4 rows ###Output _____no_output_____ ###Markdown Similarly if you have a vector with the same number of elements as the matrix has *rows* you can left multiply them. ###Code my_matrix = my_features my_array = np.array([0.3, 0.4, 0.5, 0.6]) print np.dot(my_array, my_matrix) # which has 2 elements because my_matrix has 2 columns ###Output _____no_output_____ ###Markdown Multiplying Matrices If we have two 2D arrays (matrices) matrix_1 and matrix_2 where the number of columns of matrix_1 is the same as the number of rows of matrix_2 then we can use np.dot() to perform matrix multiplication. ###Code matrix_1 = np.array([[1., 2., 3.],[4., 5., 6.]]) print matrix_1 matrix_2 = np.array([[1., 2.], [3., 4.], [5., 6.]]) print matrix_2 print np.dot(matrix_1, matrix_2) ###Output _____no_output_____
alura_imersaodados3.ipynb
###Markdown Imersão Dados da AluraAnálise exploratória de dados em cima de problema de _drug discovery_ Importação de bibliotecas e leitura de base dados ###Code # config inicial from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # config para printar todos os statements, nao apenas o último import pandas as pd import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_rows', 300) # setar nr de linhas mostradas pd.set_option('display.max_columns', 50) # setar nr de colunas mostradas sns.set() # carrega configs de exibicao padroes do seaborn # Ler base de dados de interesse dados = pd.read_csv("https://github.com/llrt/alura-imersaodados3/blob/main/dados/dados_experimentos.zip?raw=true", compression="zip") # como o arquivo é um CSV zipado, utilizar opção para tratamento automático da compressão ###Output _____no_output_____ ###Markdown Dados gerais da base de dados ###Code dados.shape # dimensões da base de dados (nr de linhas, nr de colunas) dados.info() # dados gerais das colunas dados.head(n=6) # 6 primeiras linhas da base dados.tail(n=6) # 6 últimas linhas da base print('Valores únicos para as principais colunas:\n') print_valores = \ lambda campo: print("%s: " % campo, dados[campo].unique(), "(%s valores)" % (len(dados[campo].unique()))) print_valores('id') print_valores('tratamento') print_valores('tempo') print_valores('dose') print_valores('droga') ###Output Valores únicos para as principais colunas: id: ['id_000644bb2' 'id_000779bfc' 'id_000a6266a' ... 'id_fffc1c3f4' 'id_fffcb9e7c' 'id_ffffdd77b'] (23814 valores) tratamento: ['com_droga' 'com_controle'] (2 valores) tempo: [24 72 48] (3 valores) dose: ['D1' 'D2'] (2 valores) droga: ['b68db1d53' 'df89a8e5a' '18bb41b2c' ... '573c787a2' 'b2fe3eca7' 'dd4a96d16'] (3289 valores) ###Markdown O campo _id_ parece mesmo um identificador único da linha, daquele experimento.O campo _tratamento_ indica se estamos lidando com um experimento que faz parte do grupo de controle (sem aplicação de composto ou com placebo) ou com aplicação de algum composto.O campo _tempo_ indica por quanto tempo se aplicou aquela droga (ou placebo). Parece ser medido em horas (apenas valores 24, 48 e 72, todos múltiplos de 24).O campo _dose_ identifica dois níveis de dosagem da droga, D1 e D2. Não é possível inferir qual dose é maior que a outra.E os campos _g-*_ e _c-*_ ? ###Code print_valores('g-0') print_valores('c-0') ###Output g-0: [ 1.062 0.0743 0.628 ... 0.3942 0.666 -0.8598] (14367 valores) c-0: [-0.06 0.0927 -0.1312 ... 0.3509 0.9538 -2.389 ] (14421 valores) ###Markdown Os campos _g-*_ e _c-*_ são ambos numéricos, com valores decimais. De acordo com a especialista, a semântica é:* campos _g-*_: especificam a expressão gênica para um dado gene g-n, i.e. o grau de produção daquele gene g-n naquelas condições do experimento. Normalmente é um multiplicador (positivo) em cima do valor base (de controle), mas como os valores estão decimais e há valores negativos e positivos em torno de 0, há indícios de valores normalizados (e talvez truncados) - a conferir mais a frente.* campos _c-*_: expressam o efeito relativo do experimento em cima de uma célula c-n. Também há valores negativos e positivos em torno de 0, indicando uma possível normalização dos valores - a conferir mais a frente ###Code dados.describe() # estatísticas gerais de todas as colunas ###Output _____no_output_____ ###Markdown Por fim, vamos renomear alguns campos para conveniência futura. ###Code mapa = {'droga': 'composto'} dados.rename(columns=mapa, inplace=True) ###Output _____no_output_____ ###Markdown Análise das colunas Coluna _tratamento_Como estão distribuídos os experimentos em termos de com controle/com fármaco? ###Code print('distribuição de valores em tratamento: \n') dados['tratamento'].value_counts() print('\n') print('distribuição de valores em tratamento (proporção): \n') dados['tratamento'].value_counts(normalize=True) dados['tratamento'].value_counts().plot.bar() ###Output distribuição de valores em tratamento: ###Markdown A coluna _tratamento_ indica se o experimento em questão se refere a um grupo de controle (_com_controle_ ) ou de teste (_com_droga_). Note que a proporcao é muito desbalanceada (92% com droga). Por quê? ###Code pd.crosstab(dados.composto, [dados.tempo, dados.dose]) ###Output _____no_output_____ ###Markdown Cada composto foi portanto testado, em média, 6 vezes, 1 vez para cada combinação de tempo x dose. Quantos experimentos de controle foram feitos? ###Code com_controle = dados.query('tratamento=="com_controle"') pd.crosstab(com_controle.composto, [com_controle.tempo, com_controle.dose]) ###Output _____no_output_____ ###Markdown Proporcionalmente tivemos bem menos experimentos totais de controle por combinação de tempo x dose. Foram feitos provavelmente apenas o nr suficiente de experimentos para termos depois significância estatística nos demais campos _g-*_ e _c-*_ . O efeito ao final é que a coluna _tratamento_ fica bem desbalanceada (92% dos experimentos com droga, 8% com controle). Coluna _composto_A coluna _composto_ (antiga _droga_) identifica de maneira única a droga/fármaco (ou controle) testado naquele experimento. Quantas são as drogas testadas? ###Code print('qtd de compostos únicos:\n') print_valores('composto') print('distribuição de valores em composto: \n') dados['composto'].value_counts() print('\n') print('distribuição de valores em composto (proporção): \n') dados['composto'].value_counts(normalize=True) ###Output qtd de compostos únicos: composto: ['b68db1d53' 'df89a8e5a' '18bb41b2c' ... '573c787a2' 'b2fe3eca7' 'dd4a96d16'] (3289 valores) distribuição de valores em composto: ###Markdown Temos 3289 compostos, sendo que 1 deles - o mais comum, com id _cacb2b860_ - identifica na verdade os experimentos de controle, respondendo por 8% (1866) experimentos. ###Code cod_compostos_mais_frequentes = dados['composto'].value_counts()[:5].index plt.figure(figsize=(8,6)) ax = sns.countplot(x='composto', data=dados.query('composto in @cod_compostos_mais_frequentes'), order=cod_compostos_mais_frequentes) ax.set_title('Top 5 Compostos') plt.show(ax) ###Output _____no_output_____ ###Markdown Colunas dose e tempo Como estão distribuídos os experimentos em termos de com controle/com fármaco? ###Code print('distribuição de valores em dose: \n') dados['dose'].value_counts() print('\n') print('distribuição de valores em dose (proporção): \n') dados['dose'].value_counts(normalize=True) dados['dose'].value_counts().plot.bar() ###Output distribuição de valores em dose: ###Markdown Em termos de _dose_, as amostras estão bem balanceadas entre D1 e D2. E em termos de tempo? ###Code print('distribuição de valores em dose: \n') dados['tempo'].value_counts() print('\n') print('distribuição de valores em tempo (proporção): \n') dados['tempo'].value_counts(normalize=True) dados['tempo'].value_counts().plot.bar() ###Output distribuição de valores em dose: ###Markdown Em termos de _tempo_, as amostras estão bem balanceadas entre seus valores possíveis (24, 48 e 72).Mas considerando conjuntamente ambos os campos, as amostras seguem balanceadas? ###Code pd.crosstab(dados.tempo, dados.dose) ###Output _____no_output_____ ###Markdown Sim, as amostras estão razoavelmente bem balanceadas quando consideramos conjuntamente _dose_ e _tempo_ - há ligeiramente menos experimentos com doses D2 para 48 h do que com dose D1, mas nada impactante. Colunas _g-*_As colunas _g-*_ são numéricas contínuas, não categóricas. Vamos então olhar a distribuição de seus valores. ###Code dados.loc[:, dados.columns.str.startswith("g")].describe().T fig, ax = plt.subplots(2,1) sns.histplot(data=dados, x="g-0", ax=ax[0]) sns.histplot(data=dados, x="g-1", ax=ax[1]) ###Output _____no_output_____ ###Markdown Como está a distribuição das médias? Os valores orbitam mesmo em torno de 0 em todos os _g-*_? ###Code dados.loc[:, dados.columns.str.startswith("g")].describe().T['mean'].hist() ###Output _____no_output_____ ###Markdown Sim, a maioria está em torno de 0.0 ou muito próximo disso. Mas e o min e max dos _g-*_ ? ###Code dados.loc[:, dados.columns.str.startswith("g")].describe().T['min'].hist() dados.loc[:, dados.columns.str.startswith("g")].describe().T['max'].hist() ###Output _____no_output_____ ###Markdown Os valores possuem frequência muito alta de mínimos em -10 e máximo em +10, indicando que provavelmente no processo de normalização ocorreu um truncamento nestes pontos. Colunas c-*As colunas g-* são numéricas contínuas, não categóricas. Vamos então olhar a distribuição de seus valores. ###Code dados.loc[:, dados.columns.str.startswith("c")].describe().T fig, ax = plt.subplots(2,1) sns.histplot(data=dados, x="c-0", ax=ax[0]) sns.histplot(data=dados, x="c-1", ax=ax[1]) ###Output _____no_output_____ ###Markdown Como está a distribuição das médias? Os valores orbitam também em torno de 0 em todos os c-*? ###Code dados.loc[:, dados.columns.str.startswith("g")].describe().T['mean'].hist() ###Output _____no_output_____ ###Markdown Sim, a maioria está em torno de 0.0 ou muito próximo disso. Mas e o min e max dos c-* ? ###Code dados.loc[:, dados.columns.str.startswith("c")].describe().T['min'].hist() dados.loc[:, dados.columns.str.startswith("c")].describe().T['max'].hist() ###Output _____no_output_____ ###Markdown Em termos de min, a maioria dos valores está em -10, revelando um possível truncamento.Em termos de max, os c-* variam bastante, chegando até +6 Análises multivariadas (de várias colunas conjuntamente) ###Code pd.crosstab([dados.dose, dados.tempo], dados.tratamento, normalize='index') ###Output _____no_output_____ ###Markdown Olhando _tratamento_, a proporção de _com_droga_ vs _com_controle_ é homogênea entre os valores de _dose_ e _tempo_ (cerca de 92% para com droga, 8% por tratamento). Por outro lado, os experimentos com controle estão balanceados entre doses e tempos? ###Code pd.crosstab([dados.dose, dados.tempo], dados.tratamento, normalize='columns') ###Output _____no_output_____ ###Markdown Sim, a classe com controle está bem homogênea entre combinações de doses e tempos. O mesmo vale para a classe com droga. Como fica a média em um gene g-n para os vários grupos dose / tempo x tratamento?Consideremos o gene g-0 ###Code pd.crosstab([dados.dose, dados.tempo], dados.tratamento, values=dados['g-0'], aggfunc='mean') ###Output _____no_output_____ ###Markdown A primeira vista, o uso de drogas modifica mesmo os valores de expressão gênica para g-0. Isso pode ser bom ou ruim, a depender se queremos estimular ou não a produção deste gene. Mas parece haver correlação entre uso de fármacos e alteração na expressão gênica. Como variam conjuntamente os _g-*_ ? ###Code sns.lmplot(data=dados, x='g-0', y='g-3', line_kws={"color":"red"}) sns.lmplot(data=dados, x='g-0', y='g-8', line_kws={"color":"red"}) ###Output _____no_output_____ ###Markdown No exemplo, não parece haver correlação entre g-0 e g-3, mas parece haver correlação negativa entre g-0 e g-8.O comportamento porém muda dependendo do tratamento / tempo / dose? ###Code sns.lmplot(data=dados, x='g-0', y='g-3', col='tratamento', row="tempo", hue="dose", line_kws={"color":"red"}) sns.lmplot(data=dados, x='g-0', y='g-8', col='tratamento', row="tempo", hue="dose", line_kws={"color":"red"}) ###Output _____no_output_____ ###Markdown Há comportamento diferente entre tratamentos com controle e com drogas - com drogas há uma variação conjunta mais acentuada na expressão gênica, com esse comportamento variando com o tempo de tratamento; por outro lado, não parece haver grandes variações com a dose. Vamos avaliar de maneira mais geral a correlação entre as colunas _g-*_ ###Code dados.loc[:, dados.columns.str.startswith("g")].corr() # correlacao entre colunas g-* import numpy as np d = dados.loc[:,"g-0":"g-50"] # Compute the correlation matrix corr = d.corr() # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) ###Output _____no_output_____
CODE/WIDS_Week_2.ipynb
###Markdown Women in Data ScienceWeek 2 - Data UnderstandingStephen RedmondEnterprise Insight Studio Lead ###Code # Connect to my Google Drive from google.colab import drive !ls '/content/drive/My Drive/WIDS' ###Output _____no_output_____ ###Markdown Loading the DataFrame from a CSV file using pandas ###Code # import the pandas library # Traditionally, this is loaded and named as "pd" import pandas as pd # Titanic data from https://www.kaggle.com/c/titanic/data # Load the Titanic data file from my Google Drive df = pd.read_csv('/content/drive/My Drive/WIDS/titanic/train.csv') ###Output _____no_output_____ ###Markdown Data Dictionary| Variable | Definition | Key ||----------|--------------------------------------------|------------------------------------------------|| survival | Survival | 0 = No, 1 = Yes || pclass | Ticket class | 1 = 1st, 2 = 2nd, 3 = 3rd || sex | Sex | || Age | Age in years | || sibsp | of siblings / spouses aboard the Titanic | || parch | of parents / children aboard the Titanic | || ticket | Ticket number | || fare | Passenger fare | || cabin | Cabin number | || embarked | Port of Embarkation | C = Cherbourg, Q = Queenstown, S = Southampton | ###Code # Have a quick look df.head() # Summary stats df.describe() # And the non-numeric fields df[["Name","Sex","Ticket","Cabin","Embarked"]].describe() # Some of these fields are less than useful: # - Cabin has many missing values # - Name, Ticket and PassengerId have too many unique values df = df.drop(columns = ['Cabin','Name','Ticket','PassengerId']) ###Output _____no_output_____ ###Markdown Contingency Tables Multivariate cross-tablesShows the number of values in each category. ###Code # Single category - we saw this last week! df['Pclass'].value_counts(dropna = False) df['Sex'].value_counts(dropna = False) # Multiple categories - using crosstable df_crosstab = pd.crosstab(df['Pclass'], df['Sex'], margins = False) df_crosstab df_crosstab = pd.crosstab(df['Survived'], df['Sex'], margins = False) df_crosstab df_crosstab = pd.crosstab(df['Survived'], df['Pclass'], margins = False) df_crosstab ###Output _____no_output_____ ###Markdown Statistical tests with scipy**scipy** is one of the fundamental Python librariesThere are several extensions to scipy, known as "kits" or "scikits".The most famous is **scikit-learn** (which we will come across!) ###Code # Compute the chi-square statistic and p-value for the hypothesis test # of independence of the observed frequencies in the contingency table, # e.g. are class and survival independent from scipy.stats import chi2_contingency chi2_contingency(df_crosstab) # returns: # chi2 # p-value # degrees of freedom # array of expected values ###Output _____no_output_____ ###Markdown Scatter plotsWe have seen these before - very useful for visualising relationships in data ###Code # remember the "magic" - using % in Jupyter notebooks %matplotlib inline import matplotlib.pyplot as plt # traditionally "plt" #Name columns x = df['Age'] y = df['Fare'] #Plot graphs plt.figure(figsize=(10,8)) plt.scatter(x, y, alpha=0.5) # alpha is the level of transparency (0=invisible, 1=fully opaque) plt.xlabel('Age') plt.ylabel('Fare') plt.title('Scatter plot of age versus fare paid') plt.show() # pandas DataFrame.plot lets us do all that in one! df.plot(kind = "scatter", x = "SibSp", y = "Parch", title = "Scatter of siblings / spouses versus parents / children", figsize=(10,8), alpha = 0.5).set(xlabel="siblings / spouses", ylabel="parents / children") plt.show() # we also saw the scatter matrix before from pandas.plotting import scatter_matrix attributes = ["Age", "Fare", "Pclass", "SibSp", "Parch"] scatter_matrix(df[attributes], figsize=(18,12)) plt.show() ###Output _____no_output_____ ###Markdown Check missing values ###Code df["Embarked"].value_counts(dropna = False) # The Embarked field has 2 missing - let's just assume it was Southampton df["Embarked"].fillna("S", inplace=True) df["Age"].value_counts(dropna = False) # Age has NaN values ... what should we do? df['Age'].fillna(df['Age'].mode()[0], inplace=True) ###Output _____no_output_____ ###Markdown Starting with scikit-learn A useful "add on" to scipy 😊We'll have a look at some useful initial uses of sklearn.For example, splitting a dataset into training and test (why would we do this?) ###Code from sklearn.model_selection import train_test_split train_X, test_X = train_test_split(df, test_size = 0.2) train_X.shape test_X.shape X = pd.get_dummies(train_X[["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]],drop_first=True) y = train_X["Survived"] X from sklearn import tree dt = tree.DecisionTreeClassifier() dt = dt.fit(X, y) #plt.figure(figsize=(100,100)) #tree.plot_tree(dt) #plt.show() X_test = pd.get_dummies(test_X[["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]],drop_first=True) #.to_numpy() y_test = test_X["Survived"] y_pred = dt.predict(X_test) y_pred # Use crosstab to display our results pd.crosstab(y_test, y_pred, rownames=['True'], colnames=['Predicted'], margins=True) # Calcualte F1 score: F1 = 2 * (precision * recall) / (precision + recall) from sklearn.metrics import f1_score f1_score(y_test, y_pred) ###Output _____no_output_____ ###Markdown Feature Engineering Adding new features based on existing ones ###Code # Create a new feature called FamilySize df['FamilySize'] = df['SibSp'] + df['Parch'] df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).agg('mean') # Create a new feature called IsAlone, based on FamilySize df['IsAlone'] = 0 df.loc[df['FamilySize'] == 0, 'IsAlone'] = 1 # Retrain my models train_X, test_X = train_test_split(df, test_size = 0.2) X = pd.get_dummies(train_X[["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked","FamilySize","IsAlone"]],drop_first=True) y = train_X["Survived"] dt = dt.fit(X, y) X_test = pd.get_dummies(test_X[["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked","FamilySize","IsAlone"]],drop_first=True) #.to_numpy() y_test = test_X["Survived"] y_pred = dt.predict(X_test) pd.crosstab(y_test, y_pred, rownames=['True'], colnames=['Predicted'], margins=True) f1_score(y_test, y_pred) ###Output _____no_output_____ ###Markdown Feature importance Which features have most impact on the target variable ###Code feat_importances = pd.Series(dt.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') plt.show() # Retrain my models with less features train_X, test_X = train_test_split(df, test_size = 0.2) X = pd.get_dummies(train_X[["Pclass","Sex","Age","Fare","FamilySize"]],drop_first=True) y = train_X["Survived"] dt = dt.fit(X, y) X_test = pd.get_dummies(test_X[["Pclass","Sex","Age","Fare","FamilySize"]],drop_first=True) #.to_numpy() y_test = test_X["Survived"] y_pred = dt.predict(X_test) pd.crosstab(y_test, y_pred, rownames=['True'], colnames=['Predicted'], margins=True) f1_score(y_test, y_pred) ###Output _____no_output_____
Simulated_multiresolution/Simulated_mr_scenes.ipynb
###Markdown Simulations for multi-resolution deblending In this notebook I test multi-resolution on simulated images using the galsim package. ###Code import scarlet import galsim from astropy import wcs as WCS import time from mr_tools import galsim_compare_tools as gct from mr_tools.simulations import Simulation, load_surveys, chi import proxmin import pickle # Import Packages and setup import numpy as np import scarlet.display from scarlet.display import AsinhMapping from scarlet import Starlet from scarlet.wavelet import mad_wavelet import scipy.stats as scs from functools import partial from scarlet_extensions.initialization.detection import makeCatalog, Data from scarlet_extensions.scripts.runner import Runner import warnings warnings.filterwarnings("ignore") %matplotlib inline import matplotlib import matplotlib.pyplot as plt # use a better colormap and don't interpolate the pixels matplotlib.rc('image', cmap='gist_stern') matplotlib.rc('image', interpolation='none') %pylab inline # Setup: declaring survey properties, loading catalog and making sure we have pretty colorbars data_dir=galsim.meta_data.share_dir HST, EUCLID, ROMAN, HSC, RUBIN = load_surveys() print(RUBIN) center_ra = 19.3*galsim.hours # The RA, Dec of the center of the image on the sky center_dec = -33.1*galsim.degrees cat = galsim.COSMOSCatalog(dir=data_dir, file_name = 'real_galaxy_catalog_23.5_example.fits') # Generate simulations hr_dict = EUCLID lr_dict = RUBIN nlr = 60 nhr = np.int(np.around(nlr*lr_dict['pixel']/hr_dict['pixel'], decimals = 3)) print(nlr, nhr) ngal = np.int(np.random.rand(1)*10) try: skip pics = pickle.load(open("./pictures.pkl", "rb" )) except: pics = gct.mk_scene(hr_dict, lr_dict, cat, (nhr,nhr), (nlr,nlr), 3, gal_type = 'real', pt_fraction = 0, magmin = 20, magmax = 29, shift=True) f = open("pictures.pkl","wb") pickle.dump(pics, f) f.close() pic_hr, pic_lr = pics shifts = np.array(pic_hr.shifts) wcs_hr = pic_hr.wcs wcs_lr = pic_lr.wcs hr = pic_hr.cube lr = pic_lr.cube gs_hr = pic_hr.galaxies gs_lr = pic_lr.galaxies psf_hr = np.array(pic_hr.psfs) psf_lr = np.array(pic_lr.psfs) # Channels channels_hr = hr_dict['channels'] channels_lr = lr_dict['channels'] n,n1,n2 = np.shape(hr) # Scale the HST data _,n1,n2 = np.shape(hr) # Scale the HSC data r, N1, N2 = lr.shape # Detectino of sources data_hr = Data(hr, wcs_hr, scarlet.ImagePSF(psf_hr), channels_hr) data_lr = Data(lr, wcs_lr, scarlet.ImagePSF(psf_lr), channels_lr) datas = [data_lr, data_hr] model_psf_hr = scarlet.GaussianPSF(sigma=(0.8,)*len(channels_hr), boxsize=9) model_psf_lr = scarlet.GaussianPSF(sigma=(0.8,)*len(channels_lr), boxsize=9) print(psf_hr.shape, psf_lr.shape) #Results of the detection # Create a color mapping for the HSC image lr_norm = AsinhMapping(minimum=-10, stretch=10, Q=10) hr_norm = AsinhMapping(minimum=-1, stretch=10, Q=5) # Get the source coordinates from the HST catalog xtrue, ytrue = shifts[:,0], shifts[:,1] # Convert the HST coordinates to the HSC WCS ratrue, dectrue = wcs_hr.wcs_pix2world(ytrue,xtrue,0) catalog_true = np.array([ratrue, dectrue]).T Ytrue, Xtrue = wcs_lr.wcs_world2pix(ratrue, dectrue,0) # Map the HSC image to RGB img_rgb = scarlet.display.img_to_rgb(lr, norm = lr_norm) # Apply Asinh to the HST data hr_img = scarlet.display.img_to_rgb(hr, norm=hr_norm) plt.figure(figsize=(15,30)) plt.subplot(121) plt.imshow(img_rgb) #plt.axis('off') plt.plot(Xtrue,Ytrue, 'xk', label = 'true positions') plt.legend() plt.subplot(122) plt.imshow(hr_img) #plt.axis('off') plt.plot(xtrue,ytrue, 'xk', label = 'true positions') plt.legend() plt.show() model_frame = scarlet.Frame( hr.shape, psf=model_psf_hr, channels=channels_hr) observation = scarlet.Observation( hr, psf=scarlet.ImagePSF(psf_hr), channels=channels_hr).match(model_frame) sources = [] for i in range(len(xtrue)): new_source = scarlet.ExtendedSource(model_frame, (ytrue[i] , xtrue[i]), observation) sources.append(new_source) blend = scarlet.Blend(sources, observation) blend.fit(200, e_rel=1e-6) scarlet.display.show_scene(sources, norm=hr_norm, observation=observation, show_rendered=True, show_observed=True, show_residual=True) plt.show() model_frame = sources[0].frame model = np.zeros(model_frame.shape) for src in sources: model += src.get_model(frame=model_frame) model = observation.render(model) extent = scarlet.display.get_extent(observation.bbox) model_frame = scarlet.Frame( lr.shape, psf=model_psf_lr, channels=channels_lr) observation = scarlet.Observation( lr, psf=scarlet.ImagePSF(psf_lr), channels=channels_lr).match(model_frame) sources = [] for i in range(len(Xtrue)): new_source = scarlet.ExtendedSource(model_frame, (Ytrue[i], Xtrue[i]), observation) sources.append(new_source) blend = scarlet.Blend(sources, observation) blend.fit(200, e_rel=1e-8) plt.plot(np.log10(np.array(np.abs(blend.loss)))) plt.show() scarlet.display.show_scene(sources, norm = AsinhMapping(minimum=-10, stretch=10, Q=10), observation=observation, show_rendered=True, show_observed=True, show_residual=True) plt.show() s = sources[0].get_model(frame=model_frame) model = observation.render(s) res = lr-model res /= np.max(res) pos = np.where(res == np.max(res)) norms = [lr_norm, hr_norm] try: runners = pickle.load(open("./runners_60.pkl", "rb" )) except: print("File not found.") run_multi = Runner(datas, model_psf_hr, ra_dec = catalog_true) run_hr = Runner([data_hr], model_psf_hr, ra_dec = catalog_true) run_lr = Runner([data_lr], model_psf_lr, ra_dec = catalog_true) runners = [run_lr, run_hr, run_multi] fr = open("./runners_60.pkl","wb") pickle.dump(runners, fr) fr.close() sim = Simulation(cat, runners, ngal = 10, cats = [True]*3, hr_dict=hr_dict, lr_dict=lr_dict, n_lr=nlr) print(sim.runners[-1].frame.shape) try: sim.results = pickle.load(open("./sim_results.pkl", "rb" )) sim.plot() except: print("File not found") sim.run(5, plot = True, norms = norms, init_param=True) sim.plot() f = open("sim_results.pkl","wb") pickle.dump(sim.results, f) f.close() for i in range(100): sim.run(5, init_param=True) sim.plot() f = open("sim_results.pkl","wb") pickle.dump(sim.results, f) f.close() diff = sim.runners[-1].observations[0]._diff_kernels[0] diff_lr = sim.runners[0].observations[0]._diff_kernels[0] diff_hr = sim.runners[1].observations[0]._diff_kernels[0] from mr_tools.pictures import Pictures import galsim dirac = galsim.Gaussian(sigma = 1.e-20).withFlux(1) star = galsim.Convolve(dirac, pic1.psfs_obj[0]).drawImage(nx=51, ny=51, method = 'real_space', use_true_center = True, scale = 0.1).array psf = pic1.psfs_obj[0].withFlux(1).drawImage(nx=51, ny=51, method = 'real_space', use_true_center = True, scale = 0.1).array plt.imshow(star) plt.colorbar() plt.show() plt.imshow((star-psf)) plt.colorbar() plt.show() ###Output _____no_output_____
CrossValidationExploration.ipynb
###Markdown Cross-validation explorationThis notebook analyzes the results of cross-validation and presents them in the format of the paper. ###Code import pandas as pd from collections import defaultdict from CV import get_latex, get_latex_performance, get_latex_fairness from pprint import pprint df = pd.read_csv('CV/CV5_no_protected_attribute/cross_validation.csv', sep=';') MODEL_NAMES = ('logreg', 'rf', 'logregreweight', 'rfreweight', 'prejudiceremover') def get_mean_stdev_for_single_model(selection: pd.DataFrame, model: str, deltas=False) -> pd.DataFrame: means = selection.mean() stds = selection.std() mean_std = pd.concat([means, stds], axis=1).rename(columns={0: 'Mean', 1: 'StdDev'}) # The following check for statistical significance only applies to deltas if deltas: statistically_significant = mean_std[mean_std.apply(lambda row: row['StdDev'] * 2 < abs(row['Mean']), 1)] if len(statistically_significant) > 0: print('Statistical significance in ' + model, statistically_significant.index) else: print('No statistically significant rows') mean_std[model] = mean_std.apply(lambda row: ' +/- '.join(['{:.3f}'.format(row[el]) for el in ('Mean', 'StdDev')]), 1) report = mean_std.drop(columns=['Mean', 'StdDev']).transpose() report.drop(columns=[el for el in ('Bias Mitigator', 'fold') if el in report.columns], inplace=True) return report def select_rows(df: pd.DataFrame, model: str) -> pd.DataFrame: if model in ('logreg', 'rf'): bias_mitigator = lambda frame: frame['Bias Mitigator'].isnull() else: bias_mitigator = lambda frame: frame['Bias Mitigator'] == ('Reweighting' if model.endswith('reweight') else 'Prejudice Remover') clf = 'Random Forest' if model.startswith('rf') else 'Logistic Regression' return df[(bias_mitigator(df)) & (df['Classifier'] == clf)] def get_mean_stdev(model: str) -> pd.DataFrame: selection = select_rows(df, model) return get_mean_stdev_for_single_model(selection, model) def get_classifier_name(model_name: str) -> str: if model_name.startswith('rf'): return 'Random Forest' else: return 'Logistic Regression' def get_mitigator_name(model_name: str) -> str: if model_name.endswith('reweight'): return 'Reweighing' elif model_name == 'prejudiceremover': return 'Prejudice Remover' else: return '' metrics_table = pd.concat([get_mean_stdev(el) for el in MODEL_NAMES]) metrics_table.reset_index(drop=False, inplace=True) metrics_table['Classifier'] = metrics_table['index'].apply(get_classifier_name) metrics_table['Bias Mitigator'] = metrics_table['index'].apply(get_mitigator_name) metrics_table.drop(columns=['index'], errors='ignore', inplace=True) metrics_table ###Output _____no_output_____ ###Markdown Difference between original model and bias mitigators ###Code by_clf = [] for clf in df['Classifier'].unique(): clf_df = df[df['Classifier'] == clf].reset_index(drop=True) df_by_fold = [] for fold in clf_df['fold'].unique(): clf_fold_df = clf_df[clf_df['fold'] == fold].reset_index(drop=True) mitigators = [] deltas = defaultdict(list) for mitigator in clf_fold_df[clf_fold_df['Bias Mitigator'].notnull()]['Bias Mitigator'].unique(): mitigators.append(mitigator) for metric in clf_fold_df.columns: if metric in ('Classifier', 'Bias Mitigator', 'fold'): continue effect_metric_values = clf_fold_df[clf_fold_df['Bias Mitigator'] == mitigator][metric].tolist() baseline_metric_values = clf_fold_df[clf_fold_df['Bias Mitigator'].isnull()][metric].tolist() assert len(effect_metric_values) == 1 and len(baseline_metric_values) == 1 delta = effect_metric_values[0] - baseline_metric_values[0] deltas[metric].append(delta) delta_df = pd.DataFrame() delta_df['Bias Mitigator'] = mitigators for metric, delta in deltas.items(): delta_df[metric] = delta delta_df['fold'] = fold df_by_fold.append(delta_df) delta_all_folds = pd.concat(df_by_fold) mean_std = [] for lbl, grp in delta_all_folds.groupby('Bias Mitigator'): mean_std_for_mitigator = get_mean_stdev_for_single_model(grp, clf + '_' + lbl, deltas=True) mean_std_for_mitigator['Bias Mitigator'] = lbl mean_std.append(mean_std_for_mitigator) mean_std = pd.concat(mean_std) mean_std['Classifier'] = clf by_clf.append(mean_std) final_diffs = pd.concat(by_clf) final_diffs ###Output _____no_output_____ ###Markdown Format tables for Latex ###Code for row in get_latex_performance(metrics_table).split('\n'): print(row) for row in get_latex_fairness(metrics_table).split('\n'): print(row) for row in get_latex_performance(final_diffs, True).split('\n'): print(row) for row in get_latex_fairness(final_diffs, True).split('\n'): print(row) ###Output _____no_output_____
storm_analysis/diagnostics/jpy_notebooks/dao_3d_2d_dwls.ipynb
###Markdown Diagnostics for 3D-DAOSTORM (2d, Data weighted least squares) Configure environment ###Code import os os.chdir("/home/hbabcock/Data/storm_analysis/sa_diagnostics/dao_3d_2d_dwls") print(os.getcwd()) import numpy numpy.random.seed(1) import storm_analysis.diagnostics.daostorm_3d.settings as settings import storm_analysis.diagnostics.daostorm_3d.configure as configure import storm_analysis.diagnostics.daostorm_3d.make_data as makeData import storm_analysis.diagnostics.daostorm_3d.analyze_data as analyzeData import storm_analysis.diagnostics.daostorm_3d.collate as collate settings.photons = [[20, 500], [20, 1000]] print(settings.photons) settings.fit_error_model = "DWLS" settings.iterations = 20 settings.model = '2d' settings.n_frames = 10 settings.peak_locations = None ###Output _____no_output_____ ###Markdown Configure ###Code configure.configure() ###Output _____no_output_____ ###Markdown Make Data ###Code makeData.makeData() ###Output _____no_output_____ ###Markdown Analyze data ###Code %time analyzeData.analyzeData() ###Output _____no_output_____ ###Markdown Collate data ###Code collate.collate() ###Output _____no_output_____ ###Markdown Reference results ###Code 2019-03-29 commit 9bdd3dca5324012b8f05e83500e725002cd3c60b Processing test_01 Using max_distance 200.0 nm for error calcuations. Processing test_02 Using max_distance 200.0 nm for error calcuations. Analysis Summary: Processed 2520 localizations in 1.40 seconds, 1796.41/sec Recall 0.94563 Noise 0.05437 XY Error Standard Deviation (nm): test_01 14.08 14.18 test_02 8.11 7.88 XY RMSE (nm): test_01 14.08 14.18 test_02 8.11 7.88 XY Width Error, Mean difference with truth, Standard deviation (pixels): test_01 0.033 0.164 0.033 0.164 test_02 0.039 0.162 0.039 0.162 ###Output _____no_output_____
notebooks/2_data_preparation.ipynb
###Markdown Data preparation **Methodology**For each postal code:1. For each postal code (PLZ) perform cross-check with OSM data dump from Geofabrik to populate building object info with: 1. Region name from OSM dump 1. Geometry info (polygon coords)1. Calculate total area for all objects-------------To do list:1. Classify buildings into types (manual)1. Group buildings by type to get for each type (residential, industrial,...etc.) 1. Rectangularity (area of polygon / area of minimum bounding box of polygon) 1. Total area Initialization ###Code import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import pandas as pd import numpy as np import sys import os from pyrosm import OSM ###Output _____no_output_____ ###Markdown Load custom modules ###Code import data_preparation as dp import gemeindeverz import helpers # Reload module (incase new update) import importlib importlib.reload(gemeindeverz) pd.set_option('display.max_rows', 1000) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) ###Output _____no_output_____ ###Markdown Input ###Code # Obtain from https://www.destatis.de/DE/Themen/Laender-Regionen/Regionales/Gemeindeverzeichnis/_inhalt.html GV_path = '../data/01_raw/GV/GV100AD_301120.asc' # plz land list plz_ags_csv = '../data/01_raw/zuordnung_plz_ort_landkreis.csv' buildings_data_location = '../data/01_raw/buildings_data/' geofabrik_location = '../data/01_raw/geofabrik/' buildings_int_location = '../data/02_intermediate/buildings_data/' # Create ags code dict for each state / region region_ags_dict = { # BE_BB 'brandenburg-latest.osm.pbf': ['11','12'], # BW 'freiburg-regbez-latest.osm.pbf': ['083'], 'karlsruhe-regbez-latest.osm.pbf': ['082'], 'stuttgart-regbez-latest.osm.pbf': ['081'], 'tuebingen-regbez-latest.osm.pbf': ['084'], # BY 'mittelfranken-latest.osm.pbf': ['095'], 'niederbayern-latest.osm.pbf': ['092'], 'oberbayern-latest.osm.pbf': ['091'], 'oberfranken-latest.osm.pbf': ['094'], 'oberpfalz-latest.osm.pbf': ['093'], 'schwaben-latest.osm.pbf': ['097'], 'unterfranken-latest.osm.pbf': ['096'], # HB 'bremen-latest.osm.pbf': ['04'], # HE 'hessen-latest.osm.pbf': ['06'], # HH 'hamburg-latest.osm.pbf': ['02'], # MV 'mecklenburg-vorpommern-latest.osm.pbf': ['13'], # NI 'niedersachsen-latest.osm.pbf': ['03'], # NW 'arnsberg-regbez-latest.osm.pbf': ['059'], 'detmold-regbez-latest.osm.pbf': ['057'], 'duesseldorf-regbez-latest.osm.pbf': ['051'], 'koeln-regbez-latest.osm.pbf': ['053'], 'muenster-regbez-latest.osm.pbf': ['055'], # RP 'rheinland-pfalz-latest.osm.pbf': ['07'], # SH 'schleswig-holstein-latest.osm.pbf': ['01'], # SL 'saarland-latest.osm.pbf': ['10'], # SN 'sachsen-latest.osm.pbf': ['14'], # ST 'sachsen-anhalt-latest.osm.pbf': ['15'], # TH 'thueringen-latest.osm.pbf': ['16'] } plz_ags = pd.read_csv(plz_ags_csv, dtype = {'plz': str, 'ags':str}) plz_ags.plz.nunique() plz_ags.ags.nunique() boundary_type = 'ags' # Community directory dataframe # Use this file to manually get ags code for region available on Geofabrik (inside state) com_dir_df = gemeindeverz.einlesen(GV_path) com_dir_df[com_dir_df.gemeinde_bez.str.contains('thüringen', case = False)] ###Output _____no_output_____ ###Markdown Process Get buildings in region ###Code # Extract plz list id_list = os.listdir(buildings_data_location) id_list = [x.split('.')[0].split('_')[2] for x in id_list if 'buildings' in x] id_list[0:10] # full path pbf region_list_path = [os.path.join(path, name) for path, subdirs, files in os.walk(geofabrik_location) for name in files] # pbf name pbf_list = [name for path, subdirs, files in os.walk(geofabrik_location) for name in files] pbf_list i = 8 # Get target region target_region_path = region_list_path[i] target_region = pbf_list[i] target_region # Get ags belong to the target region target_ags_list = region_ags_dict.get(target_region) target_ags_list ags_len = len(target_ags_list[0]) ags_len # Initialize the OSM parser object osm = OSM(target_region_path) %%time buildings = osm.get_buildings() ###Output _____no_output_____ ###Markdown Get boundary_ids in region ###Code # Extract info of all PLZ belong to that region region_id_list = plz_ags[(dp.left(plz_ags.ags.str, ags_len).isin(target_ags_list))][[boundary_type]].drop_duplicates().reset_index(drop=True) print(f'Number of {boundary_type}(s) in region of {target_region} is {region_id_list.shape[0]}') ###Output _____no_output_____ ###Markdown Read in boundary_type file ###Code if not os.path.exists(buildings_int_location): os.makedirs(buildings_int_location) # Check for progress of already enhanced areas name_list = os.listdir(buildings_int_location) id_list = [x.split('.')[0].split('_')[2] for x in name_list if 'buildings' in x] # Get to-be-enhanced list region_id_list = pd.DataFrame(np.setdiff1d(region_id_list, id_list), columns = [boundary_type]) logging.info(f'Total of {len(region_id_list)} {boundary_type}(s) in the region') boundary_id = region_id_list[boundary_type].iloc[0] boundary_id buildings_boundary_path = f'../data/01_raw/buildings_data/buildings_{boundary_type}_{boundary_id}.csv' buildings_boundary_path # Read in building objects data in the postal code df = pd.read_csv(buildings_boundary_path, dtype={'tags.addr:suburb': 'object', 'tags.building:levels': 'object', 'tags.source': str, 'tags.addr:postcode': str}, converters={"nodes": lambda x: x.strip("[]").split(", ")}) # read column as list # remove empty elements (no lat/lon) df = df[df['center.lat'].isna() == False].reset_index(drop=True) # replace NaN in building_levels df = df.rename(columns = {'tags.building:levels': 'building_levels', 'tags.addr:postcode' : 'postcode'}) df.building_levels = df.building_levels.fillna(1) f'Total of {len(df)} buildings in {boundary_type} {boundary_id}' ###Output _____no_output_____ ###Markdown Populate data into PLZ building objects ###Code df_res = df.merge(buildings[['id','geometry','timestamp']], how = 'left', on = 'id') df_res.geometry = df_res.geometry.fillna(np.nan) # Calculate total area for all building objects df_res['surface_area'] = df_res.geometry.apply(lambda x: dp._calculate_surface_area(x) * 10**10) df_res['total_area'] = df['building_levels'].astype(int) * df_res['surface_area'] # Classify to building types df_res['building_types'] = df_res['tags.building'].apply(lambda x: dp.manual_classify_building(x)) # # Save result to 02_intermediate/buildings_plz/buildings_<plz>.csv # output_path = '../data/02_intermediate/buildings_plz/' # # create saving location folder if not exists # if not os.path.exists(output_path): # os.makedirs(output_path) # df_res.to_csv(output_path + f'buildings_{plz}.csv', index = False) ###Output _____no_output_____
src/old_files/Classification models.ipynb
###Markdown LDA ###Code LDA_results = results_df_gen() LDA_results = gen_best_result_df('lda', 0, LDA_results) LDA_results ###Output _____no_output_____ ###Markdown Logistic ###Code log_list = [-2, -1.75, -1.5, -1.25, -1.0, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0] log_results = results_df_gen() for hp in log_list: log_results = gen_best_result_df('logistic', hp, log_results) log_results ###Output _____no_output_____ ###Markdown QDA ###Code qda_list = [0.0, 0.1, 0.5, 1.0, 2.0, 5.0, 10.0] qda_results = results_df_gen() for hp in qda_list: qda_results = gen_best_result_df('qda', hp, qda_results) qda_results ###Output _____no_output_____ ###Markdown Ridge Classifier ###Code ridge_list = [-1, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0] ridge_results = results_df_gen() for hp in ridge_list: ridge_results = gen_best_result_df('ridge', hp, ridge_results) ridge_results ###Output _____no_output_____ ###Markdown Gaussian Process Classifier ###Code gauz_list = [1, 3, 5, 6, 8, 10, 12, 13, 14, 15, 17, 18, 19, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 37, 38, 39] gauz_results = results_df_gen() for hp in range(1, 40): try: gauz_results = gen_best_result_df('gaussian', hp, gauz_results) except FileNotFoundError: i += 1 print(i) print(f'Gaussian done: {len(gauz_results)}') gauz_results gauz_list = [1, 3, 5, 6, 8, 10, 12, 13, 14, 15, 17, 18, 19, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 37, 38, 39] gauz_results = results_df_gen() for hp in gauz_list: gauz_results = gen_best_result_df('gaussian', hp, gauz_results) gauz_results ###Output _____no_output_____ ###Markdown K-Nearest Neighbor Classifier ###Code knn_results = results_df_gen() for hp in (range(2, 40)): knn_results = gen_best_result_df('knn', hp, knn_results) knn_results ###Output _____no_output_____ ###Markdown SVC ###Code svc_list = [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0] svc_results = results_df_gen() for hp in svc_list: svc_results = gen_best_result_df('svc', hp, svc_results) svc_results ###Output _____no_output_____ ###Markdown Ada Boost Classifier ###Code abc_list = [0.25, 0.251, 0.252, 0.253, 0.85, 0.851, 0.852, 0.853, 1.45, 1.451, 1.452, 1.453, 2.05, 2.051, 2.052, 2.053, 2.65, 2.651, 2.652, 2.653, 3.25, 3.251, 3.252, 3.253, 3.85] # 3.851, 3.852, 3.853, 4.45, 4.451, 4.452, 4.453 abc_results = results_df_gen() for hp in abc_list: abc_results = gen_best_result_df('ada_boost', hp, abc_results) abc_results ###Output _____no_output_____ ###Markdown Random Forest Classifier ###Code rf_results = results_df_gen() for hp in (range(1, 5)): rf_results = gen_best_result_df('random_forest', hp, rf_results) rf_results results_nd = pd.concat([LDA_results, log_results, qda_results, ridge_results, gauz_results, knn_results, svc_results, abc_results, rf_results], axis=0) results_nd.to_csv('../data/botbrnlys-nd.csv', index=False) ###Output _____no_output_____
hogg-exercises.ipynb
###Markdown Fitting a model to data Hogg, Bovy, Lang exercises in Python, NumPy David Craig, 2019 January Using my slice sampler See `fit-slicer` in this directory for the Julia notebook where I did the first couple of versions of the slice sampler the first time. This is a pure Python (NumPy) version of the samplerHere I am trying to work out how to set up my own MCMC slice sample to use the procedure for rejecting outliers to a linear fit that is described in Hogg, et. al. [arXiv 1008.4686](https://arxiv.org/abs/1008.4686). ###Code import numpy as np import matplotlib.pyplot as plt from numpy.random import rand, normal import corner def slicer(g, dim, x0, xargs, N=1000, w=0.5, m=10): """MCMC slice sampler: g -- function or distribution dim -- number of dimensions x0 -- initial guess (vector of parameters) xargs -- extra arguments for g (often data-related) N -- number of values in Markov chain to return w -- step-out width for slice sampling m -- maximum for step-out scaling Returns: (xs, likelies) xs[N, dim] -- resulting Markov chain (includes initial guess as 0th) likelies[N] -- vector of log-likelihoods of chain See: Neal, "Slice Sampling," The Annals of Statistics 2003, vol. 31 (705-767). This is available online --D. Craig converted from Julia mslicer, 2019 Jan 15. """ # based on Julia mslicer, version in mslicer-keeplikes.ipynb xs = np.zeros((N, dim), dtype=np.float64) # array (Markov chain) that will be returned xs[0,:] = x0 #initial guess into the chain x1 = np.zeros(dim) L = np.zeros(dim) R = np.zeros(dim) likelies = np.zeros(N) # record log likelihoods likelies[0] = g(x0,xargs) # get log-like of initial guess; avoid fencepost error way = np.zeros(dim) # which axis to go along in space i = 1 # assumed start values for chain are recorded at xs[0,:]; this will be index of first generated point while i < N: for d in range(dim): # go one step in each dimensional direction. way = 0.0 * way #clear it way[d] = 1.0 #set nonzero in direction we go for slicing on this step y0 = g(x0,xargs) #height of distribution at x0 y = y0 + np.log(rand()) # height for slice (using log scaled distribution) #start stepping out U = rand() # between 0 and 1 L = x0 - (w * way * U) R = L + w * way V = rand() J = np.floor(m*V) K = (m - 1) - J while J > 0 and y < g(L,xargs): L = L - w * way J = J - 1 while K > 0 and y < g(R,xargs): R = R + w * way K = K - 1 #now should be stepped out beyond distribution at slice level # work back in if no value found: Lbar, Rbar = L, R while True: U = rand() x1 = Lbar + U * (Rbar - Lbar) # vector subtraction should be correct dir if y < g(x1,xargs): break # exit while loop if x1[d] < x0[d]: Lbar = x1 else: Rbar = x1 xs[i,:] = x1 # found an acceptable point, record in chain (a row) likelies[i] = y0 # record log-likelhood x0 = x1 # set initial to new point for next round. i += 1 if i >= N: break # catch case where we reach N in the middle of set of dimensions return xs, likelies # from Hogg, Bovy, Lang arXiv:1008.4686 # Data for fitting exercises # ID, x, y, sigy, sigx, rhoxy # the first four are "outliers" d =[ [ 1.0, 201.0, 592.0, 61.0, 9.0, -0.84], [ 2.0, 244.0, 401.0, 25.0, 4.0, 0.31], [ 3.0, 47.0, 583.0, 38.0, 11.0, 0.64], [ 4.0, 287.0, 402.0, 15.0, 7.0, -0.27], [ 5.0, 203.0, 495.0, 21.0, 5.0, -0.33], [ 6.0, 58.0, 173.0, 15.0, 9.0, 0.67], [ 7.0, 210.0, 479.0, 27.0, 4.0, -0.02], [ 8.0, 202.0, 504.0, 14.0, 4.0, -0.05], [ 9.0, 198.0, 510.0, 30.0, 11.0, -0.84], [ 10.0, 158.0, 416.0, 16.0, 7.0, -0.69], [ 11.0, 165.0, 393.0, 14.0, 5.0, 0.3 ], [ 12.0, 201.0, 442.0, 25.0, 5.0, -0.46], [ 13.0, 157.0, 317.0, 52.0, 5.0, -0.03], [ 14.0, 131.0, 311.0, 16.0, 6.0, 0.5 ], [ 15.0, 166.0, 400.0, 34.0, 6.0, 0.73], [ 16.0, 160.0, 337.0, 31.0, 5.0, -0.52], [ 17.0, 186.0, 423.0, 42.0, 9.0, 0.9 ], [ 18.0, 125.0, 334.0, 26.0, 8.0, 0.4 ], [ 19.0, 218.0, 533.0, 16.0, 6.0, -0.78], [ 20.0, 146.0, 344.0, 22.0, 5.0, -0.56] ] data = np.array(d) np.shape(data) plt.errorbar(data[4:,1], data[4:,2], xerr=data[4:,4], yerr=data[4:,3],fmt='bo') plt.errorbar(data[:4,1], data[:4,2], xerr=data[:4,4], yerr=data[:4,3],fmt='ro'); ###Output _____no_output_____ ###Markdown 2D MCMC fit for slope, intercept without the outliers ###Code x = data[:,1] y = data[:,2] sig_y = data[:,3] #scatter in y sig_x = data[:,4] #scatter in x rho_xy = data[:,5]; #covariance # this names all data nicely # ignore the first four points good = range(4,20) xt = x[good] yt = y[good] s_xt = sig_x[good] s_yt = sig_y[good]; def lnlike(parms, data): """Log-likelihood for the good points""" m,b = parms[0], parms[1] xdata, ydata, sigy = data[0], data[1], data[2] return -np.sum( ((ydata - m*xdata-b)**2)/sigy**2 ) p = [2.3, 30] # initial guesses xyd = [xt, yt, s_yt] lnlike(p, xyd) def lnprior(parms): m,b = parms[0], parms[1] return -0.5 * np.log(1 + m**2) lnprior(p) def lnposterior(parms, data): return lnprior(parms) + lnlike(parms, data) lnposterior(p, xyd) Nchain = 10000 %%time res, likes = slicer(lnposterior, 2, p, xyd, N=Nchain); plt.hist(res[:,0], bins='fd'); plt.hist(res[:,1], bins='fd'); np.shape(res) # check the size of output sampi = np.random.choice(range(Nchain), 20) # random set of indices sampi ysamp = res[sampi, :] #this is a choice from the sampling of 20 values of m, b, s xp = np.linspace(30,300)# for plotting lines for s in ysamp: plt.plot(xp, xp*s[0]+s[1], color='lightgrey',zorder=1) plt.errorbar(data[4:,1], data[4:,2], yerr=data[4:,3],fmt='bo',zorder=2) plt.errorbar(data[:4,1], data[:4,2], yerr=data[:4,3],fmt='ro', zorder=2); ###Output _____no_output_____ ###Markdown Take a look at the Markov chains: ###Code plt.plot(res[:,0]) ###Output _____no_output_____ ###Markdown Looks ergodic to me! ###Code plt.plot(res[:,1]) cplot = corner.corner(res, labels=("slope", "intercept")) ###Output _____no_output_____ ###Markdown Model good/bad or foreground/background points We want to add to the model- $N$ binary integers $q_i$: $q_i = 1$ if the $i$th point is good, 0 if bad.- $P_b$ the _prior_ probability that any one data point is bad (that is, a proportion of sorts.)- $Y_b, V_b$ the (prior) mean and variance of the distribution of bad points in $y$, the $y$ of the fit.The gives $N+3$ extra parameters besides the ones for the slope, intercept, and noise of the simple linear mode.Remember that $\{y_i\}_{i=1}^N$ is the set of data points in $y$.Then the likelihood of the data is:$$ {\cal L} = p \left( \{y_i\}_{i=1}^N \vert m, b, \{q_i\}_{i=1}^N, Y_b,V_b, I \right)$$$$ {\cal L} = \prod_{i=1}^N \left[ p_{fg} \left( \{y_i\}_{i=1}^N \vert m, b, I \right) \right]^{q_i} \left[ p_{bg} \left( \{y_i\}_{i=1}^N \vert Y_b, V_b, I \right) \right]^{[1-q_i]}$$$$ {\cal L} = \prod_{i=1}^N \left[ \frac{1}{\sqrt{2\pi\sigma_{yi}^2} }\exp \left( -\frac{[y_i - m x_i - b]^2}{2\sigma_{yi}^2} \right) \right]^{q_i}\left[ \frac{1}{\sqrt{2\pi[V_b + \sigma_{yi}^2]} }\exp \left( -\frac{[y_i - Y_b]^2}{2[V_b +\sigma_{yi}^2]} \right) \right]^{[1-q_i]}$$where $p_{fg}(\cdot)$ and $p_{bg}(\cdot)$ are the generative mdoels for the foreground (good) and background (bad) points. **This is equation 13 in Hogg++**"Because we are permitting data rejection, there is an important prior probability on the $\{q_i\}_{i=1}^N$that penalizes each rejection:$$ p \left(m, b, \{q_i\}_{i=1}^N, P_b, Y_b, V_b \vert I \right) = p\left(\{q_i\}_{i=1}^N \vert P_b, I\right) p\left(m, b, P_b Y_b, V_b ,I\right)$$$$ p\left(\{q_i\}_{i=1}^N \vert P_b, I\right) = \prod_{i=1}^{N} \left[1 - P_b\right]^{q_i} P_b^{[1-q_i]},$$that is, the binomial probability of the particular sequence $\{q_i\}_{i=1}^N$." The above things look like they need to be logarithmed to get the log-likelihood. **NOTE** if one looks down to the paragraphs before equation (17), it is argued that the $q_i$ marginalize or sum down to (eq 17):$$ {\cal L} \propto \prod_{i=1}^N \left[\frac{1 - P_b}{\sqrt{2 \pi \sigma_{yi}^2}} \exp \left( -\frac{[y_i - m x_i - b]^2}{2\sigma_{yi}^2} \right) +\frac{P_b}{\sqrt{2\pi[V_b + \sigma_{yi}^2]} }\exp \left( -\frac{[y_i - Y_b]^2}{2[V_b +\sigma_{yi}^2]} \right)\right]$$**SEEK TO UNDERSTAND THIS MARGINALIZATION.** It's a form of "mixture model." See also [Press, 1996](https://arxiv.org/abs/astro-ph/9604126)I found Foreman-Mackey's explanation of this, see [F-M notebook](mixture-models.ipynb)This looks a lot more tractable! This only adds 3 parameters vs. $N+3$.There is not a digital parameter $q_i$ for each point$(x_i,y_i)$ now. The log-likelihood is just going to be: $$ \ln {\cal L} \propto \sum_{i=1}^N \ln \left[\frac{1 - P_b}{\sqrt{2 \pi \sigma_{yi}^2}} \exp \left( -\frac{[y_i - m x_i - b]^2}{2\sigma_{yi}^2} \right) +\frac{P_b}{\sqrt{2\pi[V_b + \sigma_{yi}^2]} }\exp \left( -\frac{[y_i - Y_b]^2}{2[V_b +\sigma_{yi}^2]} \right)\right]$$Now let's try this outlier rejection trick (code here for variable reference from above): x = data[:,1] y = data[:,2] sig_y = data[:,3] scatter in y sig_x = data[:,4] scatter in x rho_xy = data[:,5]; covariance this names all data nicely ###Code def lnlike(parms, d): """Log-likelihood for mixture model where there is an average probability for a bad or background point as interloper parms -- vector of parameters: m, b, Pbg, Ybg, Vbg d -- array of data to compare with model, columns: x, y, sigy""" m, b, Pbg, Ybg, Vbg = parms #slope, intercept, prob bkgrd, mean bkrg, variance bkgrd. x, y, sigy = d[:,0], d[:,1], d[:,2] # note indices vy = sigy**2 #square for variance lnL = np.sum(np.log( ( (1-Pbg)/np.sqrt(2*np.pi*vy) ) * np.exp( -((y-m*x-b)**2)/(2*vy) ) + ( Pbg/np.sqrt(2*np.pi*(Vbg+vy)) ) * np.exp( -((y-Ybg)**2)/(2*(Vbg+vy)) ) )) return lnL d3 = data[:,1:4] # take only relevant columns, drop index col and trailing cols. # p = [2.4, 10, 0.2, 500, 2000] # lnlike(p, d3) #just a check def lnprior(parms): """a somewhat scandalous prior""" m,b, Pbg, Ybg, Vbg = parms if Pbg < 0.0 or Pbg > 0.9: return -np.inf elif Ybg < 0 or Ybg > 1000: return -np.inf elif Vbg < 0: return -np.inf elif m <= 0: return -np.inf #it's pretty obviously positive else: return -0.5 * np.log(1 + m**2) lnprior(p) def lnposterior(parms, data): """log posterior = log prior + log likelihood parms -- vector of parameters data -- data for modeling""" return lnprior(parms) + lnlike(parms, data) lnposterior(p, d3) %%time p = [2.3, 30, 0.25, 500, 2000] # initial guesses res, likes = slicer(lnposterior, 5, p, d3, N=50000); cp = corner.corner(res, labels = ["slope", "intcp", "Pbg", "Ybg", "Vbg"]) hh = plt.hist2d(res[:,0], res[:,1],bins=31) ###Output _____no_output_____ ###Markdown Use the histogram binning to find the MAP value for m,b ###Code mflat = np.argmax(hh[0]) #get the array of bin values (gives flattened index) maxm, maxb = np.unravel_index(mflat,np.shape(hh[0])) #indices of hist peak print("peak of histogram at: ", maxm, maxb) hh[0][maxm,maxb] #see how many we have there (just a check) m_MAP = (hh[1][maxm]+hh[1][maxm+1])/2 # average edges of bin (? check) m_MAP b_MAP = (hh[2][maxb]+hh[2][maxb+1])/2 b_MAP sampi = np.random.choice(range(Nchain), 20) # random set of indices ysamp = res[sampi, :] #this is a choice from the sampling of 20 values of parameters xp = np.linspace(0,300)# for plotting lines plt.figure(figsize=(10,7)) for s in ysamp: plt.plot(xp, xp*s[0]+s[1], color='lightgrey',zorder=1) plt.plot(xp, xp*m_MAP + b_MAP, color='black', zorder=2, label="MAP line") plt.errorbar(data[4:,1], data[4:,2], yerr=data[4:,3],fmt='bo',zorder=3, label="foreground pts") plt.errorbar(data[:4,1], data[:4,2], yerr=data[:4,3],fmt='ro', zorder=3, label="background pts"); plt.legend(loc=4) #lower right st = "MAP m = {:4.2f}; b = {:4.1f}".format(m_MAP, b_MAP) plt.text(100,100,st); plt.grid() print(np.median(res[:,2])) # median outlier probability ###Output 0.2947266669326646
deep_learning/word2vec.ipynb
###Markdown I followed along [Aneesh Joshi's blog post](https://medium.com/towards-data-science/learn-word2vec-by-implementing-it-in-tensorflow-45641adaf2ac) on word2vec in this notebook. ###Code import numpy as np import tensorflow as tf ###Output _____no_output_____ ###Markdown step 1. read in the data, create word dictionary, created one-hot vectors for each word ###Code # load data with open('darksouls_training.txt', 'r') as fh: training = [sent.replace('.','').replace('\n', '').lower() for sent in fh.readlines()] # with open('darksouls_test.txt', 'r') as fh: # test = [sent.replace('.','').replace('\n', '').lower() for sent in fh.readlines()] # create vocabulary word_list = [] for sent in training: for word in sent.split(' '): word_list.append(word) # for sent in test: # for word in sent.split(' '): # word_list.append(word) voc = set(word_list) # create one-hot vector for each word word2int = {} int2word = {} for ind, word in enumerate(voc): word2int[word] = ind int2word[ind] = word # split the sentences sent_train = [] for sent in training: sent_train.append(sent.split(' ')) # sent_test = [] # for sent in test: # sent_test.append(sent.split(' ')) # create word pairs data_train = [] WINDOW_SIZE = 5 for sentence in sent_train: for ind, word in enumerate(sentence): for nb_word in sentence[max(ind - WINDOW_SIZE, 0) : min(ind + WINDOW_SIZE, len(sentence)) + 1] : if nb_word != word: data_train.append([word, nb_word]) # convert to one-hot def to_one_hot(data_point_index, vocab_size): temp = np.zeros(vocab_size) temp[data_point_index] = 1 return temp data_train[0] x_train = [] y_train = [] for word_pair in data_train: x_train.append(to_one_hot(word2int[word_pair[0]], len(voc))) y_train.append(to_one_hot(word2int[word_pair[1]], len(voc))) x_train = np.asarray(x_train) y_train = np.asarray(y_train) ###Output _____no_output_____ ###Markdown step 2. create tensorflow word2vec model ###Code x = tf.placeholder(dtype=tf.float32, shape=(None, len(voc))) y_label = tf.placeholder(dtype=tf.float32, shape=(None, len(voc))) # hidden layer EMBEDDING_DIM = 5 W1 = tf.Variable(tf.random_normal([len(voc), EMBEDDING_DIM])) b1 = tf.Variable(tf.random_normal([EMBEDDING_DIM])) hidden_rep = tf.add(tf.matmul(x, W1), b1) W2 = tf.Variable(tf.random_normal([EMBEDDING_DIM, len(voc), ])) b2 = tf.Variable(tf.random_normal([len(voc)])) pred = tf.nn.softmax(tf.add(tf.matmul(hidden_rep, W2), b2)) # run the model sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) #loss function cross_entropy_loss = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(pred), reduction_indices=1)) # training step train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy_loss) # epoch number n_epoch = 10000 for epoch in xrange(n_epoch): sess.run(train_step, feed_dict={x: x_train, y_label:y_train}) if epoch % 100 == 0: print('epoch {}: loss is '.format(epoch), sess.run(cross_entropy_loss, feed_dict={x: x_train, y_label: y_train})) saver = tf.train.Saver() saver.save(sess, 'dark_souls_word2vec_model.ckpt') ###Output _____no_output_____
tutorials/notebook/cx_site_chart_examples/stackedpercent_5.ipynb
###Markdown Example: CanvasXpress stackedpercent Chart No. 5This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:https://www.canvasxpress.org/examples/stackedpercent-5.htmlThis example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.Everything required for the chart to render is included in the code below. Simply run the code block. ###Code from canvasxpress.canvas import CanvasXpress from canvasxpress.js.collection import CXEvents from canvasxpress.render.jupyter import CXNoteBook cx = CanvasXpress( render_to="stackedpercent5", data={ "y": { "vars": [ "Females-in", "Females-out", "Males-in", "Males-out" ], "smps": [ "Top 2 Roles", "Self Reliant", "Human", "Adult with Job", "Child", "No Magic" ], "data": [ [ 35, 33, 34, 18, 14, 50 ], [ 22, 24, 23, 39, 43, 7 ], [ 56, 43, 25, 34, 12, 47 ], [ 4, 17, 35, 26, 48, 13 ] ] }, "z": { "Gender": [ "Female", "Female", "Male", "Male" ], "InOut": [ "In", "Out", "In", "Out" ] }, "x": { "Color": [ "A", "B", "C", "D", "E", "F" ] } }, config={ "background": "rgb(63,149,180)", "backgroundType": "solid", "barPath": True, "barPathColor": "rgb(48,116,154)", "barPathTransparency": 1, "colorBy": "Color", "colors": [ "rgb(255,190,179)", "rgb(251,153,134)", "rgb(244,112,96)", "rgb(230,68,72)", "rgb(210,41,63)", "rgb(173,18,58)" ], "fontName": "Waltograph", "fontsExternal": [ { "name": "Waltograph", "url": "https://www.canvasxpress.org/assets/fonts/waltograph42.otf" } ], "graphOrientation": "horizontal", "graphType": "StackedPercent", "layoutBoxShow": False, "layoutLabelColor": "rgb(255,255,255)", "marginBottom": 50, "marginLeft": 50, "marginRight": 50, "marginTop": 50, "maxTextSize": 80, "objectBorderColor": "rgba(0,0,0,0.0)", "patternBy": "InOut", "patterns": [ "solid", "stripeHorizontal", "hatchForward", "hatchReverse", "stripeVertical", "polkaDot" ], "segregateVariablesBy": [ "Gender" ], "showLegend": False, "smpLabelFontColor": "rgb(255,255,255)", "smpLabelScaleFontFactor": 3, "title": "Fewer Role Models", "titleColor": "rgb(252,157,156)", "titleScaleFontFactor": 2.5, "widthFactor": 1.1, "xAxis2Show": False, "xAxisMajorTicks": False, "xAxisMinorTicks": False, "xAxisShow": False }, width=613, height=613, events=CXEvents(), after_render=[], other_init_params={ "version": 35, "events": False, "info": False, "afterRenderInit": False, "noValidate": True } ) display = CXNoteBook(cx) display.render(output_file="stackedpercent_5.html") ###Output _____no_output_____
Model backlog/Inference/168-tweet-inference-5fold-roberta-balanced-finetun.ipynb
###Markdown Dependencies ###Code import json, glob from tweet_utility_scripts import * from tweet_utility_preprocess_roberta_scripts_aux import * from transformers import TFRobertaModel, RobertaConfig from tokenizers import ByteLevelBPETokenizer from tensorflow.keras import layers from tensorflow.keras.models import Model ###Output _____no_output_____ ###Markdown Load data ###Code test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv') print('Test samples: %s' % len(test)) display(test.head()) ###Output Test samples: 3534 ###Markdown Model parameters ###Code input_base_path = '/kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/' with open(input_base_path + 'config.json') as json_file: config = json.load(json_file) config base_path = '/kaggle/input/qa-transformers/roberta/' vocab_path = input_base_path + 'vocab.json' merges_path = input_base_path + 'merges.txt' config['base_model_path'] = base_path + 'roberta-base-tf_model.h5' config['config_path'] = base_path + 'roberta-base-config.json' model_path_list = glob.glob(input_base_path + 'model' + '*.h5') model_path_list.sort() print('Models to predict:') print(*model_path_list, sep = "\n") ###Output Models to predict: /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_1.h5 /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_2.h5 /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_3.h5 /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_4.h5 /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_5.h5 ###Markdown Tokenizer ###Code tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path, lowercase=True, add_prefix_space=True) ###Output _____no_output_____ ###Markdown Pre process ###Code test['text'].fillna('', inplace=True) test["text"] = test["text"].apply(lambda x: x.lower()) test["text"] = test["text"].apply(lambda x: x.strip()) x_test, x_test_aux, x_test_aux_2 = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test) ###Output _____no_output_____ ###Markdown Model ###Code module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False) def model_fn(MAX_LEN): input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids') attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask') base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model") last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask}) x_start = layers.Dropout(.1)(last_hidden_state) x_start = layers.Dense(1)(x_start) x_start = layers.Flatten()(x_start) y_start = layers.Activation('softmax', name='y_start')(x_start) x_end = layers.Dropout(.1)(last_hidden_state) x_end = layers.Dense(1)(x_end) x_end = layers.Flatten()(x_end) y_end = layers.Activation('softmax', name='y_end')(x_end) model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end]) return model ###Output _____no_output_____ ###Markdown Make predictions ###Code NUM_TEST_IMAGES = len(test) test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN'])) test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN'])) for model_path in model_path_list: print(model_path) model = model_fn(config['MAX_LEN']) model.load_weights(model_path) test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'])) test_start_preds += test_preds[0] test_end_preds += test_preds[1] ###Output /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_1.h5 /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_2.h5 /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_3.h5 /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_4.h5 /kaggle/input/168-tweet-train-5fold-roberta-balanced-finetune2/model_fold_5.h5 ###Markdown Post process ###Code test['start'] = test_start_preds.argmax(axis=-1) test['end'] = test_end_preds.argmax(axis=-1) test['text_len'] = test['text'].apply(lambda x : len(x)) test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' '))) test["end"].clip(0, test["text_len"], inplace=True) test["start"].clip(0, test["end"], inplace=True) test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1) test["selected_text"].fillna(test["text"], inplace=True) ###Output _____no_output_____ ###Markdown Visualize predictions ###Code display(test.head(10)) ###Output _____no_output_____ ###Markdown Test set predictions ###Code submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv') submission['selected_text'] = test["selected_text"] submission.to_csv('submission.csv', index=False) submission.head(10) ###Output _____no_output_____
notebooks/nowcast/August 2016 renewal.ipynb
###Markdown Looking at the August 2016 renewal in nowcast-green, nowcast and obs ###Code import matplotlib.pyplot as plt import numpy as np import netCDF4 as nc import datetime from dateutil import tz import os import pandas as pd from salishsea_tools import ( geo_tools, places, psu_tools, teos_tools, data_tools, tidetools, ) from nowcast import analyze from nowcast.figures import shared %matplotlib inline runs={} t_o=datetime.datetime(2016,8,1); t_f = datetime.datetime(2016,9,21) fnames = analyze.get_filenames(t_o, t_f, '1d', 'grid_T', '/results/SalishSea/nowcast-green/') grid_B=nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_downonegrid2.nc') mesh_mask = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/mesh_mask_downbyone2.nc') runs = {'nowcast-green': {'grid': grid_B, 'mesh': mesh_mask, 'fnames': fnames, 'nemo36': True}} fnames = analyze.get_filenames(t_o, t_f, '1d', 'grid_T', '/results/SalishSea/nowcast/') grid_B=nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc') mesh_mask = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/mesh_mask_SalishSea2.nc') runs['nowcast']= {'grid': grid_B, 'mesh': mesh_mask, 'fnames': fnames, 'nemo36': False} def get_onc_TS_time_series(station, t_o, t_f): """Grab the ONC temperature and salinity time series for a station between dates t_o and t_f. Return results as separate temperature and salinty data frames.""" numdays = (t_f-t_o).days dates = [t_o + datetime.timedelta(days=num) for num in range(0, numdays+1)] sal_pd = pd.DataFrame({'time':[], 'data': []}) temp_pd = pd.DataFrame({'time': [], 'data': []}) station_code = places.PLACES[station]['ONC stationCode'] for date in dates: onc_data = data_tools.get_onc_data( 'scalardata', 'getByStation', os.environ['ONC_USER_TOKEN'], station=station_code, deviceCategory='CTD', sensors='salinity,temperature', dateFrom=data_tools.onc_datetime(date, 'utc')) try: ctd_data=data_tools.onc_json_to_dataset(onc_data, teos=False) #keep in PSU! #quality control qc_sal = np.array(ctd_data.salinity.qaqcFlag) qc_temp = np.array(ctd_data.temperature.qaqcFlag) #average sal_pd = sal_pd.append({'time': ctd_data.salinity.sampleTime.values[0], 'data': ctd_data.salinity.values[qc_sal==1].mean()}, ignore_index=True) temp_pd = temp_pd.append({'time': ctd_data.temperature.sampleTime.values[0], 'data': ctd_data.temperature.values[qc_temp==1].mean()}, ignore_index=True) except TypeError: print('No data for {} at {}'.format(date, station)) return sal_pd, temp_pd def get_model_time_series(station, fnames, grid_B, mesh_mask, nemo_36=True): """Retrieve the density, salinity and temperature time series at a station. Time series is created from files listed in fnames""" if nemo_36: depth_var='gdept_0' depth_var_w = 'gdepw_0' else: depth_var='gdept' depth_var_w = 'gdepw' #station info lon = places.PLACES[station]['lon lat'][0] lat = places.PLACES[station]['lon lat'][1] depth = places.PLACES[station]['depth'] # model corresponding locations and variables bathy, X, Y = tidetools.get_bathy_data(grid_B) j, i = geo_tools.find_closest_model_point(lon,lat,X,Y, land_mask=bathy.mask) model_depths = mesh_mask.variables[depth_var][0,:,j,i] tmask = mesh_mask.variables['tmask'][0,:,j,i] wdeps = mesh_mask.variables[depth_var_w][0,:,j,i] sal, time = analyze.combine_files(fnames,'vosaline','None',j,i) temp, time = analyze.combine_files(fnames,'votemper','None',j,i) # interpolate: sal_interp=np.array( [shared.interpolate_tracer_to_depths( sal[i,:],model_depths,depth,tmask,wdeps) for i in range(sal.shape[0])]) temp_interp=np.array( [shared.interpolate_tracer_to_depths( temp[i,:],model_depths,depth,tmask,wdeps) for i in range(temp.shape[0])]) # convert to psu for using density function if nemo_36: sal_interp = teos_tools.teos_psu(sal_interp) density = psu_tools.calculate_density(temp_interp, sal_interp) return density, sal_interp, temp_interp, time rhos = {'nowcast': {}, 'nowcast-green': {}} times = {'nowcast': {}, 'nowcast-green': {}} sals={'nowcast': {}, 'nowcast-green': {}} temps={'nowcast': {}, 'nowcast-green': {}} stations = ['Central node', 'East node', 'Delta BBL node', 'Delta DDL node'] for sim in ['nowcast', 'nowcast-green']: print(sim) for station in stations: print(station) rhos[sim][station], sals[sim][station], temps[sim][station], times[sim][station] = \ get_model_time_series(station, runs[sim]['fnames'], runs[sim]['grid'], runs[sim]['mesh'], nemo_36=runs[sim]['nemo36'] ) fig, axs = plt.subplots(3,4,figsize=(20,10), sharex=True) names = ['Density [kg/m^3]', 'Salinty [psu]', 'Temperature [C]'] titles = ['density', 'salinity', 'temperature'] ticks = [[1022.6,1024.6], [29.6, 32], [9,12]] cols = ['b', 'g'] for i, station in enumerate(stations): axc = axs[:,i] obs_sal, obs_temp = get_onc_TS_time_series(station, t_o, t_f) obs_rho = pd.DataFrame({'time':[], 'data':[]}) obs_rho.data = psu_tools.calculate_density(obs_temp['data'].values, obs_sal['data'].values ) obs_rho.time = obs_temp.time obs = [obs_rho, obs_sal,obs_temp] for sim, col in zip(['nowcast', 'nowcast-green'], cols): variables = [rhos[sim], sals[sim], temps[sim]] t = times[sim] for var, name, title, ax, ob, tick in zip(variables, names, titles, axc, obs,ticks): if sim == 'nowcast': #only plot obs once ax.plot(ob.time, ob.data, 'k', label='obs'.format(station), lw=2) ax.plot(np.array(t[station]), np.array(var[station]), c=col, label=sim, lw=2) ax.set_ylabel('Daily averaged {}'.format(name)) ax.set_title('{} - {} m'.format(station, places.PLACES[station]['depth'])) ax.set_ylim(tick) for ax in axs.flatten(): ax.grid() ax.get_yaxis().get_major_formatter().set_useOffset(False) for ax in axs[:,-1]: ax.legend(loc=(1,0.25)) fig.autofmt_xdate() ###Output _____no_output_____
初稿/Contingency tables(列联表).ipynb
###Markdown http://www.statsmodels.org/stable/contingency_tables.htmlStatsmodels支持各种分析列联表的方法,包括评估独立性,对称性,同质性的方法,以及处理来自分层人群的表集合的方法。这里描述的方法主要用于双向表。可以使用对数线性模型分析多路表。Statsmodels目前没有statsmodels.genmod.GLM用于对数线性建模的专用API,但Poisson回归可用于此目的。 ![image.png](attachment:image.png) statsmodels.stats.Table是使用列联表的最基本的类。我们可以Table直接从任何包含列联表单元格计数的矩形数组对象创建对象: ###Code import numpy as np import pandas as pd import statsmodels.api as sm df = sm.datasets.get_rdataset("Arthritis", "vcd").data tab = pd.crosstab(df['Treatment'], df['Improved']) tab = tab.loc[:, ["None", "Some", "Marked"]] table = sm.stats.Table(tab) df.head() ###Output _____no_output_____ ###Markdown 或者,我们可以传递原始数据,让Table类为我们构建单元格数组: ###Code table = sm.stats.Table.from_data(df[['Treatment','Improved']]) ###Output _____no_output_____ ###Markdown Independence(独立性) 独立性(Independence)是行和列因子独立出现的属性。联合(Association)缺乏独立性。如果联合分布是独立的,则可以将其写为行和列边缘分布的外积![image.png](attachment:image.png)**我们可以为我们观察到的数据获得最佳拟合的独立分布,然后查看识别最强烈违反独立性的特定残差** ###Code table.table_orig table.fittedvalues table.resid_pearson ###Output _____no_output_____ ###Markdown **如果表的行和列是无序的(即名义变量,nominal factors),那么正式评估独立性的最常用方法是使用Pearson的卡方统计。** ###Code rslt = table.test_nominal_association() print(rslt.pvalue) table.chi2_contribs ###Output _____no_output_____ ###Markdown **对于有序行和列因子(factors)的表,我们可以通过线性相关检验,以获得更多权重来对抗关于排序的替代假设。线性相关检验的统计量为**![image.png](attachment:image.png)ri,cj是行和列分数。通常将这些分数设置为序列0,1,…这给出了Cochran-Armitage趋势测试。 ###Code rslt = table.test_ordinal_association() print(rslt.pvalue) ###Output 0.023644578093923983 ###Markdown **我们可以通过构建一系列2×2表格并计算它们的比值比(OR)来评估r×x表中的关联。有两种方法可以做到这一点。从相邻行和列类别的本地优势比(local odds ratios)来构建2×2表。** ###Code table.local_oddsratios taloc = sm.stats.Table2x2(np.asarray([[7,29],[21,13]])) print(taloc.oddsratio) taloc = sm.stats.Table2x2(np.asarray([[29,7],[13,7]])) print(taloc.oddsratio) ###Output 2.230769230769231 ###Markdown **也可以通过在每个可能的点上对行和列因子进行二分法的累积比值比(cumulative odds ratios)来构建2×2表。** ###Code table.cumulative_oddsratios tab1 = np.asarray([[7, 29 + 7], [21, 13 + 7]]) tacum = sm.stats.Table2x2(tab1) print(tacum.oddsratio) tab1 = np.asarray([[7 + 29, 7], [21 + 13, 7]]) tacum = sm.stats.Table2x2(tab1) print(tacum.oddsratio) ###Output 1.0588235294117647 ###Markdown **马赛克图(mosaic plot)是一种非正式评估双向表中依赖性的图形方法。** ###Code from statsmodels.graphics.mosaicplot import mosaic mosaic(data) ###Output _____no_output_____ ###Markdown Symmetry and homogeneity(对称性和同质性) Symmetry(对称性) is the property that![image.png](attachment:image.png) Homogeneity(同质性)是行因子和列因子的边际分布相同的特性![image.png](attachment:image.png)**注意,P (and T)必须是正方形,行和列类别必须相同,并且必须以相同的顺序出现。** 为了说明这点,我们加载一个数据集,创建一个列联表,并计算行和列边距(the row and column margins)。本Table类包含分析方法r×cr×c列联表。下面加载的数据集包含人们左眼和右眼视敏度的评估。我们首先加载数据并创建一个列联表。 ###Code df = sm.datasets.get_rdataset("VisualAcuity", "vcd").data df = df.loc[df.gender == "female", :] tab = df.set_index(['left', 'right']) del tab["gender"] tab = tab.unstack() tab.columns = tab.columns.get_level_values(1) print(tab) ###Output _____no_output_____ ###Markdown 从列联表创建一个SquareTable对象 ###Code sqtab = sm.stats.SquareTable(tab) row, col = sqtab.marginal_probabilities print(row) print(col) ###Output Improved Marked 0.025907 None 0.025907 Placebo 0.471503 Some 0.025907 Treated 0.450777 dtype: float64 Improved Marked 0.316062 None 0.461140 Placebo 0.025907 Some 0.170984 Treated 0.025907 dtype: float64 ###Markdown 这种summary方法输出的是对称性和均匀性的检测结果 ###Code sqtab.summary() ###Output _____no_output_____ ###Markdown 如果在名为data的dataframe中有单独的案例记录,我们同样也可以通过使用**SquareTable.from_data类**来完成相同的分析 A single 2x2 table(单个2x2表) sm.stats.Table2x2类提供了几种处理单个2x2表的方法。summary方法显示表的行和列之间的若干关联度量。 ###Code table = np.asarray([[35,21],[25,58]]) t22 = sm.stats.Table2x2(table) t22.summary() ###Output _____no_output_____ ###Markdown Stratified 2x2 tables(分层2x2表) 当我们有一组由同样行和列因子定义的列联表时,就会发生分层。案例 我们有一组2x2表,反映了中国几个地区吸烟和肺癌的联合分布。表格可能都具有共同的比值比,即使边际概率在各阶层之间变化也是如此。 “Breslow-Day”程序测试数据是否与常见优势比一致。它在下面显示为常数OR的测试。 Mantel-Haenszel程序测试这个常见优势比是否等于1。它在下面显示为OR = 1的测试。还可以估计共同的几率和风险比并获得它们的置信区间。 summary方法显示所有这些结果。可以从类方法和属性中获得单个结果。 ###Code data = sm.datasets.china_smoking.load() mat = np.asarray(data.data) tables = [np.reshape(x.tolist()[1:],(2,2)) for x in mat] st = sm.stats.StratifiedTable(tables) st.summary() ###Output _____no_output_____
data-web-scraping.ipynb
###Markdown **Space X Falcon 9 First Stage Landing Prediction** Web scraping Falcon 9 and Falcon Heavy Launches Records from Wikipedia we will performing web scraping to collect Falcon 9 historical launch records from a Wikipedia page titled `List of Falcon 9 and Falcon Heavy launches`https://en.wikipedia.org/wiki/List_of_Falcon\_9\_and_Falcon_Heavy_launches ObjectivesWeb scrap Falcon 9 launch records with `BeautifulSoup`:* Extract a Falcon 9 launch records HTML table from Wikipedia* Parse the table and convert it into a Pandas data frame ###Code import sys import requests # Trying to import BeautifulSoup try: from bs4 import BeautifulSoup except ImportError as e: !{sys.executable} -m pip install beautifulsoup4 from bs4 import BeautifulSoup import re import unicodedata import pandas as pd def date_time(table_cells): """ This function returns the data and time from the HTML table cell Input: the element of a table data cell extracts extra row """ return [data_time.strip() for data_time in list(table_cells.strings)][0:2] def booster_version(table_cells): """ This function returns the booster version from the HTML table cell Input: the element of a table data cell extracts extra row """ out=''.join([booster_version for i,booster_version in enumerate( table_cells.strings) if i%2==0][0:-1]) return out def landing_status(table_cells): """ This function returns the landing status from the HTML table cell Input: the element of a table data cell extracts extra row """ out=[i for i in table_cells.strings][0] return out def get_mass(table_cells): mass=unicodedata.normalize("NFKD", table_cells.text).strip() if mass: mass.find("kg") new_mass=mass[0:mass.find("kg")+2] else: new_mass=0 return new_mass def extract_column_from_header(row): """ This function returns the landing status from the HTML table cell Input: the element of a table data cell extracts extra row """ if (row.br): row.br.extract() if row.a: row.a.extract() if row.sup: row.sup.extract() colunm_name = ' '.join(row.contents) # Filter the digit and empty names if not(colunm_name.strip().isdigit()): colunm_name = colunm_name.strip() return colunm_name static_url = "https://en.wikipedia.org/w/index.php?title=List_of_Falcon_9_and_Falcon_Heavy_launches&oldid=1027686922" ###Output _____no_output_____ ###Markdown Next, request the HTML page from the above URL and get a `response` object TASK 1: Request the Falcon9 Launch Wiki page from its URL First, let's perform an HTTP GET method to request the Falcon9 Launch HTML page, as an HTTP response. ###Code # use requests.get() method with the provided static_url # assign the response to a object page = requests.get(static_url) page.status_code ###Output _____no_output_____ ###Markdown Create a `BeautifulSoup` object from the HTML `response` ###Code # Use BeautifulSoup() to create a BeautifulSoup object from a response text content soup = BeautifulSoup(page.text, 'html.parser') ###Output _____no_output_____ ###Markdown Print the page title to verify if the `BeautifulSoup` object was created properly ###Code # Use soup.title attribute soup.title ###Output _____no_output_____ ###Markdown TASK 2: Extract all column/variable names from the HTML table header Next, we want to collect all relevant column names from the HTML table header Let's try to find all tables on the wiki page first. If you need to refresh your memory about `BeautifulSoup`, please check the external reference link towards the end of this lab ###Code # Use the find_all function in the BeautifulSoup object, with element type `table` # Assign the result to a list called `html_tables` html_tables = soup.find_all('table') ###Output _____no_output_____ ###Markdown Starting from the third table is our target table contains the actual launch records. ###Code # Let's print the third table and check its content first_launch_table = html_tables[2] print(first_launch_table) ###Output <table class="wikitable plainrowheaders collapsible" style="width: 100%;"> <tbody><tr> <th scope="col">Flight No. </th> <th scope="col">Date and<br/>time (<a href="/wiki/Coordinated_Universal_Time" title="Coordinated Universal Time">UTC</a>) </th> <th scope="col"><a href="/wiki/List_of_Falcon_9_first-stage_boosters" title="List of Falcon 9 first-stage boosters">Version,<br/>Booster</a> <sup class="reference" id="cite_ref-booster_11-0"><a href="#cite_note-booster-11">[b]</a></sup> </th> <th scope="col">Launch site </th> <th scope="col">Payload<sup class="reference" id="cite_ref-Dragon_12-0"><a href="#cite_note-Dragon-12">[c]</a></sup> </th> <th scope="col">Payload mass </th> <th scope="col">Orbit </th> <th scope="col">Customer </th> <th scope="col">Launch<br/>outcome </th> <th scope="col"><a href="/wiki/Falcon_9_first-stage_landing_tests" title="Falcon 9 first-stage landing tests">Booster<br/>landing</a> </th></tr> <tr> <th rowspan="2" scope="row" style="text-align:center;">1 </th> <td>4 June 2010,<br/>18:45 </td> <td><a href="/wiki/Falcon_9_v1.0" title="Falcon 9 v1.0">F9 v1.0</a><sup class="reference" id="cite_ref-MuskMay2012_13-0"><a href="#cite_note-MuskMay2012-13">[7]</a></sup><br/>B0003.1<sup class="reference" id="cite_ref-block_numbers_14-0"><a href="#cite_note-block_numbers-14">[8]</a></sup> </td> <td><a href="/wiki/Cape_Canaveral_Space_Force_Station" title="Cape Canaveral Space Force Station">CCAFS</a>,<br/><a href="/wiki/Cape_Canaveral_Space_Launch_Complex_40" title="Cape Canaveral Space Launch Complex 40">SLC-40</a> </td> <td><a href="/wiki/Dragon_Spacecraft_Qualification_Unit" title="Dragon Spacecraft Qualification Unit">Dragon Spacecraft Qualification Unit</a> </td> <td> </td> <td><a href="/wiki/Low_Earth_orbit" title="Low Earth orbit">LEO</a> </td> <td><a href="/wiki/SpaceX" title="SpaceX">SpaceX</a> </td> <td class="table-success" style="background: LightGreen; color: black; vertical-align: middle; text-align: center;">Success </td> <td class="table-failure" style="background: #ffbbbb; color: black; vertical-align: middle; text-align: center;">Failure<sup class="reference" id="cite_ref-ns20110930_15-0"><a href="#cite_note-ns20110930-15">[9]</a></sup><sup class="reference" id="cite_ref-16"><a href="#cite_note-16">[10]</a></sup><br/><small>(parachute)</small> </td></tr> <tr> <td colspan="9">First flight of Falcon 9 v1.0.<sup class="reference" id="cite_ref-sfn20100604_17-0"><a href="#cite_note-sfn20100604-17">[11]</a></sup> Used a boilerplate version of Dragon capsule which was not designed to separate from the second stage.<small>(<a href="#First_flight_of_Falcon_9">more details below</a>)</small> Attempted to recover the first stage by parachuting it into the ocean, but it burned up on reentry, before the parachutes even deployed.<sup class="reference" id="cite_ref-parachute_18-0"><a href="#cite_note-parachute-18">[12]</a></sup> </td></tr> <tr> <th rowspan="2" scope="row" style="text-align:center;">2 </th> <td>8 December 2010,<br/>15:43<sup class="reference" id="cite_ref-spaceflightnow_Clark_Launch_Report_19-0"><a href="#cite_note-spaceflightnow_Clark_Launch_Report-19">[13]</a></sup> </td> <td><a href="/wiki/Falcon_9_v1.0" title="Falcon 9 v1.0">F9 v1.0</a><sup class="reference" id="cite_ref-MuskMay2012_13-1"><a href="#cite_note-MuskMay2012-13">[7]</a></sup><br/>B0004.1<sup class="reference" id="cite_ref-block_numbers_14-1"><a href="#cite_note-block_numbers-14">[8]</a></sup> </td> <td><a href="/wiki/Cape_Canaveral_Space_Force_Station" title="Cape Canaveral Space Force Station">CCAFS</a>,<br/><a href="/wiki/Cape_Canaveral_Space_Launch_Complex_40" title="Cape Canaveral Space Launch Complex 40">SLC-40</a> </td> <td><a href="/wiki/SpaceX_Dragon" title="SpaceX Dragon">Dragon</a> <a class="mw-redirect" href="/wiki/COTS_Demo_Flight_1" title="COTS Demo Flight 1">demo flight C1</a><br/>(Dragon C101) </td> <td> </td> <td><a href="/wiki/Low_Earth_orbit" title="Low Earth orbit">LEO</a> (<a href="/wiki/International_Space_Station" title="International Space Station">ISS</a>) </td> <td><div class="plainlist"> <ul><li><a href="/wiki/NASA" title="NASA">NASA</a> (<a href="/wiki/Commercial_Orbital_Transportation_Services" title="Commercial Orbital Transportation Services">COTS</a>)</li> <li><a href="/wiki/National_Reconnaissance_Office" title="National Reconnaissance Office">NRO</a></li></ul> </div> </td> <td class="table-success" style="background: LightGreen; color: black; vertical-align: middle; text-align: center;">Success<sup class="reference" id="cite_ref-ns20110930_15-1"><a href="#cite_note-ns20110930-15">[9]</a></sup> </td> <td class="table-failure" style="background: #ffbbbb; color: black; vertical-align: middle; text-align: center;">Failure<sup class="reference" id="cite_ref-ns20110930_15-2"><a href="#cite_note-ns20110930-15">[9]</a></sup><sup class="reference" id="cite_ref-20"><a href="#cite_note-20">[14]</a></sup><br/><small>(parachute)</small> </td></tr> <tr> <td colspan="9">Maiden flight of <a class="mw-redirect" href="/wiki/Dragon_capsule" title="Dragon capsule">Dragon capsule</a>, consisting of over 3 hours of testing thruster maneuvering and reentry.<sup class="reference" id="cite_ref-spaceflightnow_Clark_unleashing_Dragon_21-0"><a href="#cite_note-spaceflightnow_Clark_unleashing_Dragon-21">[15]</a></sup> Attempted to recover the first stage by parachuting it into the ocean, but it disintegrated upon reentry, before the parachutes were deployed.<sup class="reference" id="cite_ref-parachute_18-1"><a href="#cite_note-parachute-18">[12]</a></sup> <small>(<a href="#COTS_demo_missions">more details below</a>)</small> It also included two <a href="/wiki/CubeSat" title="CubeSat">CubeSats</a>,<sup class="reference" id="cite_ref-NRO_Taps_Boeing_for_Next_Batch_of_CubeSats_22-0"><a href="#cite_note-NRO_Taps_Boeing_for_Next_Batch_of_CubeSats-22">[16]</a></sup> and a wheel of <a href="/wiki/Brou%C3%A8re" title="Brouère">Brouère</a> cheese. </td></tr> <tr> <th rowspan="2" scope="row" style="text-align:center;">3 </th> <td>22 May 2012,<br/>07:44<sup class="reference" id="cite_ref-BBC_new_era_23-0"><a href="#cite_note-BBC_new_era-23">[17]</a></sup> </td> <td><a href="/wiki/Falcon_9_v1.0" title="Falcon 9 v1.0">F9 v1.0</a><sup class="reference" id="cite_ref-MuskMay2012_13-2"><a href="#cite_note-MuskMay2012-13">[7]</a></sup><br/>B0005.1<sup class="reference" id="cite_ref-block_numbers_14-2"><a href="#cite_note-block_numbers-14">[8]</a></sup> </td> <td><a href="/wiki/Cape_Canaveral_Space_Force_Station" title="Cape Canaveral Space Force Station">CCAFS</a>,<br/><a href="/wiki/Cape_Canaveral_Space_Launch_Complex_40" title="Cape Canaveral Space Launch Complex 40">SLC-40</a> </td> <td><a href="/wiki/SpaceX_Dragon" title="SpaceX Dragon">Dragon</a> <a class="mw-redirect" href="/wiki/Dragon_C2%2B" title="Dragon C2+">demo flight C2+</a><sup class="reference" id="cite_ref-C2_24-0"><a href="#cite_note-C2-24">[18]</a></sup><br/>(Dragon C102) </td> <td>525 kg (1,157 lb)<sup class="reference" id="cite_ref-25"><a href="#cite_note-25">[19]</a></sup> </td> <td><a href="/wiki/Low_Earth_orbit" title="Low Earth orbit">LEO</a> (<a href="/wiki/International_Space_Station" title="International Space Station">ISS</a>) </td> <td><a href="/wiki/NASA" title="NASA">NASA</a> (<a href="/wiki/Commercial_Orbital_Transportation_Services" title="Commercial Orbital Transportation Services">COTS</a>) </td> <td class="table-success" style="background: LightGreen; color: black; vertical-align: middle; text-align: center;">Success<sup class="reference" id="cite_ref-26"><a href="#cite_note-26">[20]</a></sup> </td> <td class="table-noAttempt" style="background: #ececec; color: black; vertical-align: middle; white-space: nowrap; text-align: center;">No attempt </td></tr> <tr> <td colspan="9">Dragon spacecraft demonstrated a series of tests before it was allowed to approach the <a href="/wiki/International_Space_Station" title="International Space Station">International Space Station</a>. Two days later, it became the first commercial spacecraft to board the ISS.<sup class="reference" id="cite_ref-BBC_new_era_23-1"><a href="#cite_note-BBC_new_era-23">[17]</a></sup> <small>(<a href="#COTS_demo_missions">more details below</a>)</small> </td></tr> <tr> <th rowspan="3" scope="row" style="text-align:center;">4 </th> <td rowspan="2">8 October 2012,<br/>00:35<sup class="reference" id="cite_ref-SFN_LLog_27-0"><a href="#cite_note-SFN_LLog-27">[21]</a></sup> </td> <td rowspan="2"><a href="/wiki/Falcon_9_v1.0" title="Falcon 9 v1.0">F9 v1.0</a><sup class="reference" id="cite_ref-MuskMay2012_13-3"><a href="#cite_note-MuskMay2012-13">[7]</a></sup><br/>B0006.1<sup class="reference" id="cite_ref-block_numbers_14-3"><a href="#cite_note-block_numbers-14">[8]</a></sup> </td> <td rowspan="2"><a href="/wiki/Cape_Canaveral_Space_Force_Station" title="Cape Canaveral Space Force Station">CCAFS</a>,<br/><a href="/wiki/Cape_Canaveral_Space_Launch_Complex_40" title="Cape Canaveral Space Launch Complex 40">SLC-40</a> </td> <td><a href="/wiki/SpaceX_CRS-1" title="SpaceX CRS-1">SpaceX CRS-1</a><sup class="reference" id="cite_ref-sxManifest20120925_28-0"><a href="#cite_note-sxManifest20120925-28">[22]</a></sup><br/>(Dragon C103) </td> <td>4,700 kg (10,400 lb) </td> <td><a href="/wiki/Low_Earth_orbit" title="Low Earth orbit">LEO</a> (<a href="/wiki/International_Space_Station" title="International Space Station">ISS</a>) </td> <td><a href="/wiki/NASA" title="NASA">NASA</a> (<a href="/wiki/Commercial_Resupply_Services" title="Commercial Resupply Services">CRS</a>) </td> <td class="table-success" style="background: LightGreen; color: black; vertical-align: middle; text-align: center;">Success </td> <td rowspan="2" style="background:#ececec; text-align:center;"><span class="nowrap">No attempt</span> </td></tr> <tr> <td><a href="/wiki/Orbcomm_(satellite)" title="Orbcomm (satellite)">Orbcomm-OG2</a><sup class="reference" id="cite_ref-Orbcomm_29-0"><a href="#cite_note-Orbcomm-29">[23]</a></sup> </td> <td>172 kg (379 lb)<sup class="reference" id="cite_ref-gunter-og2_30-0"><a href="#cite_note-gunter-og2-30">[24]</a></sup> </td> <td><a href="/wiki/Low_Earth_orbit" title="Low Earth orbit">LEO</a> </td> <td><a href="/wiki/Orbcomm" title="Orbcomm">Orbcomm</a> </td> <td class="table-partial" style="background: wheat; color: black; vertical-align: middle; text-align: center;">Partial failure<sup class="reference" id="cite_ref-nyt-20121030_31-0"><a href="#cite_note-nyt-20121030-31">[25]</a></sup> </td></tr> <tr> <td colspan="9">CRS-1 was successful, but the <a href="/wiki/Secondary_payload" title="Secondary payload">secondary payload</a> was inserted into an abnormally low orbit and subsequently lost. This was due to one of the nine <a href="/wiki/SpaceX_Merlin" title="SpaceX Merlin">Merlin engines</a> shutting down during the launch, and NASA declining a second reignition, as per <a href="/wiki/International_Space_Station" title="International Space Station">ISS</a> visiting vehicle safety rules, the primary payload owner is contractually allowed to decline a second reignition. NASA stated that this was because SpaceX could not guarantee a high enough likelihood of the second stage completing the second burn successfully which was required to avoid any risk of secondary payload's collision with the ISS.<sup class="reference" id="cite_ref-OrbcommTotalLoss_32-0"><a href="#cite_note-OrbcommTotalLoss-32">[26]</a></sup><sup class="reference" id="cite_ref-sn20121011_33-0"><a href="#cite_note-sn20121011-33">[27]</a></sup><sup class="reference" id="cite_ref-34"><a href="#cite_note-34">[28]</a></sup> </td></tr> <tr> <th rowspan="2" scope="row" style="text-align:center;">5 </th> <td>1 March 2013,<br/>15:10 </td> <td><a href="/wiki/Falcon_9_v1.0" title="Falcon 9 v1.0">F9 v1.0</a><sup class="reference" id="cite_ref-MuskMay2012_13-4"><a href="#cite_note-MuskMay2012-13">[7]</a></sup><br/>B0007.1<sup class="reference" id="cite_ref-block_numbers_14-4"><a href="#cite_note-block_numbers-14">[8]</a></sup> </td> <td><a href="/wiki/Cape_Canaveral_Space_Force_Station" title="Cape Canaveral Space Force Station">CCAFS</a>,<br/><a href="/wiki/Cape_Canaveral_Space_Launch_Complex_40" title="Cape Canaveral Space Launch Complex 40">SLC-40</a> </td> <td><a href="/wiki/SpaceX_CRS-2" title="SpaceX CRS-2">SpaceX CRS-2</a><sup class="reference" id="cite_ref-sxManifest20120925_28-1"><a href="#cite_note-sxManifest20120925-28">[22]</a></sup><br/>(Dragon C104) </td> <td>4,877 kg (10,752 lb) </td> <td><a href="/wiki/Low_Earth_orbit" title="Low Earth orbit">LEO</a> (<a class="mw-redirect" href="/wiki/ISS" title="ISS">ISS</a>) </td> <td><a href="/wiki/NASA" title="NASA">NASA</a> (<a href="/wiki/Commercial_Resupply_Services" title="Commercial Resupply Services">CRS</a>) </td> <td class="table-success" style="background: LightGreen; color: black; vertical-align: middle; text-align: center;">Success </td> <td class="table-noAttempt" style="background: #ececec; color: black; vertical-align: middle; white-space: nowrap; text-align: center;">No attempt </td></tr> <tr> <td colspan="9">Last launch of the original Falcon 9 v1.0 <a href="/wiki/Launch_vehicle" title="Launch vehicle">launch vehicle</a>, first use of the unpressurized trunk section of Dragon.<sup class="reference" id="cite_ref-sxf9_20110321_35-0"><a href="#cite_note-sxf9_20110321-35">[29]</a></sup> </td></tr> <tr> <th rowspan="2" scope="row" style="text-align:center;">6 </th> <td>29 September 2013,<br/>16:00<sup class="reference" id="cite_ref-pa20130930_36-0"><a href="#cite_note-pa20130930-36">[30]</a></sup> </td> <td><a href="/wiki/Falcon_9_v1.1" title="Falcon 9 v1.1">F9 v1.1</a><sup class="reference" id="cite_ref-MuskMay2012_13-5"><a href="#cite_note-MuskMay2012-13">[7]</a></sup><br/>B1003<sup class="reference" id="cite_ref-block_numbers_14-5"><a href="#cite_note-block_numbers-14">[8]</a></sup> </td> <td><a class="mw-redirect" href="/wiki/Vandenberg_Air_Force_Base" title="Vandenberg Air Force Base">VAFB</a>,<br/><a href="/wiki/Vandenberg_Space_Launch_Complex_4" title="Vandenberg Space Launch Complex 4">SLC-4E</a> </td> <td><a href="/wiki/CASSIOPE" title="CASSIOPE">CASSIOPE</a><sup class="reference" id="cite_ref-sxManifest20120925_28-2"><a href="#cite_note-sxManifest20120925-28">[22]</a></sup><sup class="reference" id="cite_ref-CASSIOPE_MDA_37-0"><a href="#cite_note-CASSIOPE_MDA-37">[31]</a></sup> </td> <td>500 kg (1,100 lb) </td> <td><a href="/wiki/Polar_orbit" title="Polar orbit">Polar orbit</a> <a href="/wiki/Low_Earth_orbit" title="Low Earth orbit">LEO</a> </td> <td><a href="/wiki/Maxar_Technologies" title="Maxar Technologies">MDA</a> </td> <td class="table-success" style="background: LightGreen; color: black; vertical-align: middle; text-align: center;">Success<sup class="reference" id="cite_ref-pa20130930_36-1"><a href="#cite_note-pa20130930-36">[30]</a></sup> </td> <td class="table-no2" style="background: #ffdddd; color: black; vertical-align: middle; text-align: center;">Uncontrolled<br/><small>(ocean)</small><sup class="reference" id="cite_ref-ocean_landing_38-0"><a href="#cite_note-ocean_landing-38">[d]</a></sup> </td></tr> <tr> <td colspan="9">First commercial mission with a private customer, first launch from Vandenberg, and demonstration flight of Falcon 9 v1.1 with an improved 13-tonne to LEO capacity.<sup class="reference" id="cite_ref-sxf9_20110321_35-1"><a href="#cite_note-sxf9_20110321-35">[29]</a></sup> After separation from the second stage carrying Canadian commercial and scientific satellites, the first stage booster performed a controlled reentry,<sup class="reference" id="cite_ref-39"><a href="#cite_note-39">[32]</a></sup> and an <a href="/wiki/Falcon_9_first-stage_landing_tests" title="Falcon 9 first-stage landing tests">ocean touchdown test</a> for the first time. This provided good test data, even though the booster started rolling as it neared the ocean, leading to the shutdown of the central engine as the roll depleted it of fuel, resulting in a hard impact with the ocean.<sup class="reference" id="cite_ref-pa20130930_36-2"><a href="#cite_note-pa20130930-36">[30]</a></sup> This was the first known attempt of a rocket engine being lit to perform a supersonic retro propulsion, and allowed SpaceX to enter a public-private partnership with <a href="/wiki/NASA" title="NASA">NASA</a> and its Mars entry, descent, and landing technologies research projects.<sup class="reference" id="cite_ref-40"><a href="#cite_note-40">[33]</a></sup> <small>(<a href="#Maiden_flight_of_v1.1">more details below</a>)</small> </td></tr> <tr> <th rowspan="2" scope="row" style="text-align:center;">7 </th> <td>3 December 2013,<br/>22:41<sup class="reference" id="cite_ref-sfn_wwls20130624_41-0"><a href="#cite_note-sfn_wwls20130624-41">[34]</a></sup> </td> <td><a href="/wiki/Falcon_9_v1.1" title="Falcon 9 v1.1">F9 v1.1</a><br/>B1004 </td> <td><a href="/wiki/Cape_Canaveral_Space_Force_Station" title="Cape Canaveral Space Force Station">CCAFS</a>,<br/><a href="/wiki/Cape_Canaveral_Space_Launch_Complex_40" title="Cape Canaveral Space Launch Complex 40">SLC-40</a> </td> <td><a href="/wiki/SES-8" title="SES-8">SES-8</a><sup class="reference" id="cite_ref-sxManifest20120925_28-3"><a href="#cite_note-sxManifest20120925-28">[22]</a></sup><sup class="reference" id="cite_ref-spx-pr_42-0"><a href="#cite_note-spx-pr-42">[35]</a></sup><sup class="reference" id="cite_ref-aw20110323_43-0"><a href="#cite_note-aw20110323-43">[36]</a></sup> </td> <td>3,170 kg (6,990 lb) </td> <td><a href="/wiki/Geostationary_transfer_orbit" title="Geostationary transfer orbit">GTO</a> </td> <td><a href="/wiki/SES_S.A." title="SES S.A.">SES</a> </td> <td class="table-success" style="background: LightGreen; color: black; vertical-align: middle; text-align: center;">Success<sup class="reference" id="cite_ref-SNMissionStatus7_44-0"><a href="#cite_note-SNMissionStatus7-44">[37]</a></sup> </td> <td class="table-noAttempt" style="background: #ececec; color: black; vertical-align: middle; white-space: nowrap; text-align: center;">No attempt<br/><sup class="reference" id="cite_ref-sf10120131203_45-0"><a href="#cite_note-sf10120131203-45">[38]</a></sup> </td></tr> <tr> <td colspan="9">First <a href="/wiki/Geostationary_transfer_orbit" title="Geostationary transfer orbit">Geostationary transfer orbit</a> (GTO) launch for Falcon 9,<sup class="reference" id="cite_ref-spx-pr_42-1"><a href="#cite_note-spx-pr-42">[35]</a></sup> and first successful reignition of the second stage.<sup class="reference" id="cite_ref-46"><a href="#cite_note-46">[39]</a></sup> SES-8 was inserted into a <a href="/wiki/Geostationary_transfer_orbit" title="Geostationary transfer orbit">Super-Synchronous Transfer Orbit</a> of 79,341 km (49,300 mi) in apogee with an <a href="/wiki/Orbital_inclination" title="Orbital inclination">inclination</a> of 20.55° to the <a href="/wiki/Equator" title="Equator">equator</a>. </td></tr></tbody></table> ###Markdown You should able to see the columns names embedded in the table header elements `` as follows: ```Flight No.Date andtime (UTC)Version,Booster [b]Launch sitePayload[c]Payload massOrbitCustomerLaunchoutcomeBoosterlanding``` Next, we just need to iterate through the `` elements and apply the provided `extract_column_from_header()` to extract column name one by one ###Code column_names = [] temp = soup.find_all('th') for x in range(len(temp)): try: name = extract_column_from_header(temp[x]) if (name is not None and len(name) > 0): column_names.append(name) except: pass ###Output _____no_output_____ ###Markdown Check the extracted column names ###Code print(column_names) ###Output ['Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'N/A', 'Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'FH 2', 'FH 3', 'Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome', 'Date and time ( )', 'Launch site', 'Payload', 'Orbit', 'Customer', 'Date and time ( )', 'Launch site', 'Payload', 'Orbit', 'Customer', 'Date and time ( )', 'Launch site', 'Payload', 'Orbit', 'Customer', 'Date and time ( )', 'Launch site', 'Payload', 'Orbit', 'Customer', 'Demo flights', 'logistics', 'Crewed missions', 'Commercial satellites', 'Scientific satellites', 'Military satellites', 'Rideshares', 'Current', 'In development', 'Retired', 'Cancelled', 'Spacecraft', 'Cargo', 'Crewed', 'Test vehicles', 'Current', 'Retired', 'Unflown', 'Orbital', 'Atmospheric', 'Landing sites', 'Other facilities', 'Support', 'Contracts', 'R&D programs', 'Key people', 'Related', 'General', 'General', 'Vehicles', 'Launches by rocket type', 'Launches by spaceport', 'Agencies, companies and facilities', 'Other mission lists and timelines'] ###Markdown TASK 3: Create a data frame by parsing the launch HTML tables We will create an empty dictionary with keys from the extracted column names in the previous task. Later, this dictionary will be converted into a Pandas dataframe ###Code launch_dict= dict.fromkeys(column_names) # Remove an irrelvant column del launch_dict['Date and time ( )'] launch_dict['Flight No.'] = [] launch_dict['Launch site'] = [] launch_dict['Payload'] = [] launch_dict['Payload mass'] = [] launch_dict['Orbit'] = [] launch_dict['Customer'] = [] launch_dict['Launch outcome'] = [] launch_dict['Version Booster']=[] launch_dict['Booster landing']=[] launch_dict['Date']=[] launch_dict['Time']=[] ###Output _____no_output_____ ###Markdown Next, we just need to fill up the `launch_dict` with launch records extracted from table rows. Usually, HTML tables in Wiki pages are likely to contain unexpected annotations and other types of noises, such as reference links `B0004.1[8]`, missing values `N/A [e]`, inconsistent formatting, etc. ###Code extracted_row = 0 #Extract each table for table_number,table in enumerate(soup.find_all('table',"wikitable plainrowheaders collapsible")): # get table row for rows in table.find_all("tr"): #check to see if first table heading is as number corresponding to launch a number if rows.th: if rows.th.string: flight_number=rows.th.string.strip() flag=flight_number.isdigit() else: flag=False #get table element row=rows.find_all('td') #if it is number save cells in a dictonary if flag: extracted_row += 1 # Flight Number value launch_dict["Flight No."].append(flight_number) datatimelist=date_time(row[0]) # Date value date = datatimelist[0].strip(',') launch_dict["Date"].append(date) # Time value time = datatimelist[1] launch_dict["Time"].append(time) # Booster version bv=booster_version(row[1]) if not(bv): bv=row[1].a.string launch_dict["Version Booster"].append(bv) # Launch Site launch_site = row[2].a.string launch_dict["Launch site"].append(launch_site) # Payload payload = row[3].a.string launch_dict["Payload"].append(payload) # Payload Mass payload_mass = get_mass(row[4]) launch_dict["Payload mass"].append(payload_mass) # Orbit orbit = row[5].a.string launch_dict["Orbit"].append(orbit) # Customer customer = row[6].a.string launch_dict["Customer"].append(customer) # Launch outcome launch_outcome = list(row[7].strings)[0] launch_dict["Launch outcome"].append(launch_outcome) # Booster landing booster_landing = landing_status(row[8]) launch_dict["Booster landing"].append(booster_landing) headings = [] for key,values in dict(launch_dict).items(): if key not in headings: headings.append(key) if values is None: del launch_dict[key] def pad_dict_list(dict_list, padel): lmax = 0 for lname in dict_list.keys(): lmax = max(lmax, len(dict_list[lname])) for lname in dict_list.keys(): ll = len(dict_list[lname]) if ll < lmax: dict_list[lname] += [padel] * (lmax - ll) return dict_list pad_dict_list(launch_dict,0) df = pd.DataFrame.from_dict(launch_dict) df.head() df.to_csv('spacex_web_scraped.csv', index=False) ###Output _____no_output_____
tutorial/English/00-PixyzOverview.ipynb
###Markdown Pixyz API takes into account the features of deep generative models- The Deep Neural Network that composes the generative model is hidden by the probability distribution - A framework that can separate defining DNNs and operating probability distributions(Distribution API) - Model types and regularization of random variables are described as objective functions(error functions) - A framework that receives probability distribution and define objective function(Loss API) - Deep generative models learn by defining objective function and using gradient descent method - A framework in which objective function and optimization algorithm can be set independently(Model API) ###Code from __future__ import print_function import torch import torch.utils.data from torch import nn, optim from torch.nn import functional as F from torchvision import datasets, transforms from tensorboardX import SummaryWriter from tqdm import tqdm if torch.cuda.is_available(): device = "cuda" else: device = "cpu" ###Output _____no_output_____ ###Markdown Overviewing relationships between each APIs through implementing VAE 1. Distribution API- A framework that can separate defining DNNs and operating probability distributions(Distribution API)- https://pixyz.readthedocs.io/en/latest/distributions.html We define these three probability distributionsPrior: $p(z) = N(z; 0, 1)$Generator: $p_{\theta}(x|z) = B(x; \lambda = g(z))$Inference: $q_{\phi}(z|x) = N(z; µ = f_{\mu}(x), \sigma^2 = f_{\sigma^2}(x))$ ###Code from pixyz.distributions import Normal, Bernoulli from pixyz.utils import print_latex ###Output _____no_output_____ ###Markdown Define prior probability distributionprior is a gaussian distribution with mean 0 and variance 1$p(z) = N(z; 0, 1)$ ###Code # prior z_dim = 64 prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["z"], features_shape=[z_dim], name="p_{prior}").to(device) print(prior) print_latex(prior) ###Output Distribution: p_{prior}(z) Network architecture: Normal( name=p_{prior}, distribution_name=Normal, var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([64]) (loc): torch.Size([1, 64]) (scale): torch.Size([1, 64]) ) ###Markdown Define generator probability distributionGenerator is a bernoulli distribution over x given z$p_{\theta}(x|z) = B(x; \lambda = g(z))$Inherit pixyz.Distribution class to define a distribution with Deep neural networks ###Code x_dim = 784 # generative model p(x|z) # inherit pixyz.Distribution Bernoulli class class Generator(Bernoulli): def __init__(self): super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p") self.fc1 = nn.Linear(z_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc3 = nn.Linear(512, x_dim) def forward(self, z): h = F.relu(self.fc1(z)) h = F.relu(self.fc2(h)) return {"probs": torch.sigmoid(self.fc3(h))} p = Generator().to(device) print(p) print_latex(p) ###Output Distribution: p(x|z) Network architecture: Generator( name=p, distribution_name=Bernoulli, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) (fc1): Linear(in_features=64, out_features=512, bias=True) (fc2): Linear(in_features=512, out_features=512, bias=True) (fc3): Linear(in_features=512, out_features=784, bias=True) ) ###Markdown Define Inference probability distributionInference is a gaussian distribution over z given x $\mu$ and $\sigma$ are parameterized by $\phi$$q_{\phi}(z|x) = N(z; µ = f_{\mu}(x), \sigma^2 = f_{\sigma^2}(x))$ ###Code # inference model q(z|x) # inherit pixyz.Distribution Normal class class Inference(Normal): def __init__(self): super(Inference, self).__init__(var=["z"], cond_var=["x"], name="q") self.fc1 = nn.Linear(x_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc31 = nn.Linear(512, z_dim) self.fc32 = nn.Linear(512, z_dim) def forward(self, x): h = F.relu(self.fc1(x)) h = F.relu(self.fc2(h)) return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))} q = Inference().to(device) print(q) print_latex(q) ###Output Distribution: q(z|x) Network architecture: Inference( name=q, distribution_name=Normal, var=['z'], cond_var=['x'], input_var=['x'], features_shape=torch.Size([]) (fc1): Linear(in_features=784, out_features=512, bias=True) (fc2): Linear(in_features=512, out_features=512, bias=True) (fc31): Linear(in_features=512, out_features=64, bias=True) (fc32): Linear(in_features=512, out_features=64, bias=True) ) ###Markdown Sampling from a probability distribution- Sampling can be done by .sample() in defined Distribution class regardless of DNN architecture or distribution type- In Pixyz, samples are dict type(key is variable name, value is sample) $z\sim p(z)$ ###Code # z ~ p(z) prior_samples = prior.sample(batch_n=1) print(prior_samples) print(prior_samples.keys()) print(prior_samples['z'].shape) ###Output {'z': tensor([[-0.5438, 0.5853, 0.9415, 1.0591, 1.4031, -0.0520, 0.7588, -1.3387, 0.4586, 0.2402, 0.6899, -1.4430, 0.8306, 1.6975, 0.3532, -0.3980, -1.5879, 0.8015, -0.7279, 1.2902, 0.6434, -0.4299, -0.0147, -0.7769, -0.2355, 0.8801, -0.8768, -0.0911, -0.8140, -0.2988, -0.5511, -0.1526, -0.1219, -0.3171, -0.2924, 0.3731, 1.8659, 1.3274, 2.4092, -0.4386, 0.4175, -0.9096, 0.4095, 2.1348, 0.2795, 0.4564, -2.5351, 1.5394, -1.2816, 0.4562, 0.5690, -0.8027, -0.4947, -0.7010, -1.6218, -0.7865, -0.4135, -0.4891, 0.0258, -0.3843, 0.8516, -0.1511, -0.0327, -0.9058]], device='cuda:0')} dict_keys(['z']) torch.Size([1, 64]) ###Markdown Define joint distribution- joint distribution can be difined by multiplying distributions - Sampling can be done by .sample() $p_{\theta}(x, z) = p_{\theta}(x|z)p(z)$ ###Code p_joint = p * prior print(p_joint) print_latex(p_joint) ###Output Distribution: p(x,z) = p(x|z)p_{prior}(z) Network architecture: Normal( name=p_{prior}, distribution_name=Normal, var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([64]) (loc): torch.Size([1, 64]) (scale): torch.Size([1, 64]) ) Generator( name=p, distribution_name=Bernoulli, var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([]) (fc1): Linear(in_features=64, out_features=512, bias=True) (fc2): Linear(in_features=512, out_features=512, bias=True) (fc3): Linear(in_features=512, out_features=784, bias=True) ) ###Markdown Sampling from a joint distribution $x, z \sim p_{\theta}(x, z) $ ###Code p_joint_samples = p_joint.sample(batch_n=1) print(p_joint_samples) print(p_joint_samples.keys()) print(p_joint_samples['x'].shape) print(p_joint_samples['z'].shape) ###Output {'z': tensor([[ 0.1798, -0.2534, 1.9239, 0.7444, -0.2541, -0.5951, 1.1151, 0.4059, 0.5807, -0.8940, -0.7727, 0.1663, -0.0572, 2.3262, 2.4288, 1.1539, -1.7565, -0.0071, -0.7027, 0.9958, -0.5287, -1.2675, 0.7315, 0.6763, 0.2179, 0.6958, 0.2657, 0.2117, -1.2440, -0.1694, 0.9022, -1.0702, -0.3973, 0.7750, -1.2522, 0.2898, 0.3006, 0.7156, -0.0205, -0.2505, -1.0893, -1.0576, -1.1959, -0.3639, -0.5362, 0.7473, 0.0541, 2.0923, -0.4051, 0.8123, 1.8256, 0.5847, 1.4084, -0.3716, -1.0299, 1.4635, -0.0438, -0.0964, 0.4627, -1.2500, -2.2660, -0.3602, 1.6857, -0.4131]], device='cuda:0'), 'x': tensor([[1., 0., 1., 0., 0., 1., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 0., 1., 0., 1., 0., 1., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 1., 1., 0., 0., 1., 0., 0., 0., 1., 1., 0., 0., 1., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 0., 1., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 1., 0., 1., 1., 1., 0., 0., 1., 0., 1., 0., 1., 1., 1., 0., 0., 1., 0., 0., 1., 0., 0., 1., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 0., 1., 1., 0., 0., 1., 1., 1., 0., 1., 0., 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 0., 1., 1., 0., 1., 1., 0., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 0., 0., 0., 1., 1., 0., 1., 0., 0., 0., 0., 1., 0., 1., 0., 1., 0., 0., 1., 1., 1., 1., 0., 0., 0., 0., 1., 1., 1., 0., 0., 1., 1., 0., 1., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 1., 1., 1., 1., 0., 1., 1., 1., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 1., 0., 1., 0., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 0., 0., 0., 1., 0., 0., 0., 0., 1., 1., 0., 1., 0., 0., 0., 0., 1., 0., 1., 0., 1., 1., 0., 1., 0., 1., 1., 1., 0., 1., 0., 0., 1., 1., 1., 0., 1., 1., 1., 0., 0., 1., 0., 0., 1., 1., 0., 0., 1., 0., 0., 0., 1., 1., 0., 1., 1., 1., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 1., 0., 0., 0., 0., 1., 1., 1., 1., 0., 0., 0., 0., 1., 0., 0., 0., 1., 1., 0., 0., 0., 1., 1., 0., 0., 0., 1., 1., 1., 0., 1., 1., 0., 0., 1., 0., 0., 1., 0., 0., 1., 1., 0., 1., 0., 0., 1., 0., 1., 1., 1., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 1., 0., 0., 0., 1., 1., 1., 0., 1., 0., 0., 1., 1., 1., 0., 1., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 1., 1., 1., 0., 1., 1., 0., 0., 1., 0., 1., 1., 0., 1., 0., 1., 0., 1., 0., 0., 0., 1., 0., 1., 0., 1., 0., 0., 1., 1., 1., 1., 0., 0., 1., 0., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 0., 0., 1., 0., 1., 1., 0., 1., 1., 1., 0., 1., 1., 0., 1., 1., 0., 1., 1., 1., 0., 0., 1., 1., 1., 1., 0., 1., 0., 0., 1., 0., 1., 1., 1., 0., 0., 1., 1., 1., 0., 1., 1., 1., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 1., 1., 1., 0., 0., 0., 0., 1., 0., 1., 0., 1., 1., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 1., 0., 1., 0., 1., 1., 0., 1., 0., 0., 0., 1., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 1., 1., 0., 1., 1., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 1., 0., 1., 1., 1., 0., 1., 0., 1., 1., 0., 1., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 0., 0., 1., 0., 1., 0., 1., 0., 0., 1., 1., 1., 0., 1., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 1., 1., 1., 0., 0., 1., 0., 1., 0., 1., 1., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 1., 1., 1., 0., 0., 0., 1., 1., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 1., 1., 0., 1., 0., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0., 0., 1., 0., 0., 1., 0., 1., 1.]], device='cuda:0')} dict_keys(['z', 'x']) torch.Size([1, 784]) torch.Size([1, 64]) ###Markdown For more detailed Distribution API Turorial- 01-DistributionAPITutorial.ipynb 2. Loss API- A framework that receives probability distribution and define objective function(Loss API) - pixyz.Loss receives Distribution and defines Loss - Arithmetic operations can be done between Loss classes, so any Loss can be designed - -> Paper's formula can be put into codes easily- Loss value is evaluated by inputting the data - Each Loss is treated as a symbol - Independent of data or DNN, we can design probabilistic model explicitly ->Define-and-run like framework VAE Loss$$\mathcal { L } _ { \mathrm { VAE } } ( \theta , \phi ) = \mathbb { E } _ { p_{data}( x ) } \left [D _ { \mathrm { KL } } \left[ q _ \phi ( z | x ) \| p ( z ) \right] - \mathbb { E } _ { q _ { \phi } ( z | x ) } \left[\log p _ { \theta } ( x | z ) \right]\right]$$ Define loss using pixyz.loss $D _ { \mathrm { KL } } \left[ q _ \phi ( z | x ) \| p ( z ) \right]$ ###Code from pixyz.losses import KullbackLeibler kl = KullbackLeibler(q, prior) print_latex(kl) reconst = -p.log_prob().expectation(q) print_latex(reconst) ###Output _____no_output_____ ###Markdown Operations between Loss classes ###Code vae_loss = (kl + reconst).mean() print_latex(vae_loss) ###Output _____no_output_____ ###Markdown Input data and loss is evaluated- loss is calculated by .eval() ###Code # dummy_x for data dummy_x = torch.randn([4, 784]).to(device) vae_loss.eval({"x": dummy_x}) ###Output _____no_output_____ ###Markdown For more detailed Loss API Turorial- 02-LossAPITutorial.ipynb 3. Model API- A framework in which objective function and optimization algorithm can be set independently- Set loss and optimization algorithm, then train with data ###Code from pixyz.models import Model model = Model(loss=vae_loss, distributions=[p, q], optimizer=optim.Adam, optimizer_params={"lr": 1e-3}) print(model) print_latex(model) dummy_x = torch.randn([10, 784]) def train_dummy(epoch): global dummy_x dummy_x = dummy_x.to(device) loss = model.train({"x": dummy_x}) print('Epoch: {} Train Loss: {:4f}'.format(epoch, loss)) for epoch in range(10): train_dummy(epoch) ###Output Epoch: 0 Train Loss: 554.029114 Epoch: 1 Train Loss: 530.868591 Epoch: 2 Train Loss: 499.061432 Epoch: 3 Train Loss: 442.698639 Epoch: 4 Train Loss: 340.971588 Epoch: 5 Train Loss: 176.686768 Epoch: 6 Train Loss: 26.550779 Epoch: 7 Train Loss: -125.541313 Epoch: 8 Train Loss: -347.148285 Epoch: 9 Train Loss: -607.047791 ###Markdown For more detailed Model API Turorial- 03-ModelAPITutorial.ipynb Training VAE with MNIST dataset Install modules ###Code from __future__ import print_function import torch import torch.utils.data from torch import nn, optim from torch.nn import functional as F import torchvision from torchvision import datasets, transforms from tensorboardX import SummaryWriter from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt %matplotlib inline batch_size = 256 epochs = 3 seed = 1 torch.manual_seed(seed) if torch.cuda.is_available(): device = "cuda" else: device = "cpu" ###Output _____no_output_____ ###Markdown Prepare MNIST dataset ###Code root = '../data' transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambd=lambda x: x.view(-1))]) kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True} train_loader = torch.utils.data.DataLoader( datasets.MNIST(root=root, train=True, transform=transform, download=True), shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST(root=root, train=False, transform=transform), shuffle=False, **kwargs) ###Output _____no_output_____ ###Markdown Install Pixyz modules ###Code from pixyz.distributions import Normal, Bernoulli from pixyz.losses import KullbackLeibler, Expectation as E from pixyz.models import Model from pixyz.utils import print_latex ###Output _____no_output_____ ###Markdown Define probability distributions ###Code x_dim = 784 z_dim = 64 # inference model q(z|x) class Inference(Normal): def __init__(self): super(Inference, self).__init__(var=["z"], cond_var=["x"], name="q") self.fc1 = nn.Linear(x_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc31 = nn.Linear(512, z_dim) self.fc32 = nn.Linear(512, z_dim) def forward(self, x): h = F.relu(self.fc1(x)) h = F.relu(self.fc2(h)) return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))} # generative model p(x|z) class Generator(Bernoulli): def __init__(self): super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p") self.fc1 = nn.Linear(z_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc3 = nn.Linear(512, x_dim) def forward(self, z): h = F.relu(self.fc1(z)) h = F.relu(self.fc2(h)) return {"probs": torch.sigmoid(self.fc3(h))} p = Generator().to(device) q = Inference().to(device) prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["z"], features_shape=[z_dim], name="p_{prior}").to(device) print(prior) print_latex(prior) print(p) print_latex(p) print(q) print_latex(q) ###Output Distribution: q(z|x) Network architecture: Inference( name=q, distribution_name=Normal, var=['z'], cond_var=['x'], input_var=['x'], features_shape=torch.Size([]) (fc1): Linear(in_features=784, out_features=512, bias=True) (fc2): Linear(in_features=512, out_features=512, bias=True) (fc31): Linear(in_features=512, out_features=64, bias=True) (fc32): Linear(in_features=512, out_features=64, bias=True) ) ###Markdown Define Loss ###Code kl = KullbackLeibler(q, prior) reconst = -p.log_prob().expectation(q) vae_loss = (kl + reconst).mean() print_latex(vae_loss) ###Output _____no_output_____ ###Markdown Set optimization algorithm and model ###Code model = Model(loss=vae_loss, distributions=[p, q], optimizer=optim.Adam, optimizer_params={"lr": 1e-3}) print(model) print_latex(model) def train(epoch): train_loss = 0 #for x, _ in tqdm(train_loader): for x, _ in train_loader: x = x.to(device) loss = model.train({"x": x}) train_loss += loss train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset) print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss)) return train_loss def test(epoch): test_loss = 0 #for x, _ in tqdm(test_loader): for x, _ in test_loader: x = x.to(device) loss = model.test({"x": x}) test_loss += loss test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset) print('Test loss: {:.4f}'.format(test_loss)) return test_loss ###Output _____no_output_____ ###Markdown Reconstruction ###Code def plot_reconstrunction(x): with torch.no_grad(): z = q.sample({"x": x}, return_all=False) recon_batch = p.sample_mean(z).view(-1, 1, 28, 28) comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu() return comparison ###Output _____no_output_____ ###Markdown generate images from latent variable space ###Code def plot_image_from_latent(z_sample): with torch.no_grad(): sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu() return sample # functions to show an image def imshow(img): npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() # z_sample for generating imgs from prior z_sample = 0.5 * torch.randn(64, z_dim).to(device) # fixed _x for watching reconstruction improvement _x, _ = iter(test_loader).next() _x = _x.to(device) for epoch in range(1, epochs + 1): train_loss = train(epoch) test_loss = test(epoch) recon = plot_reconstrunction(_x[:8]) sample = plot_image_from_latent(z_sample) print('Epoch: {}'.format(epoch)) print('Reconstruction') imshow(torchvision.utils.make_grid(recon)) print('generate from prior z:') imshow(torchvision.utils.make_grid(sample)) ###Output Epoch: 1 Train loss: 201.0661 Test loss: 172.5077 Epoch: 1 Reconstruction
ICA demo.ipynb
###Markdown 8个人,8个麦克风。从8个麦克风得到一组数据 30 x = Asx是已知的,A和S是未知的A 称为 mixing matrix示例:生成(10,360)维的cos曲线 A,每个维度有2度的角度变化,对A增加噪声生成(10,360)维的随机整数 W,对 A 进行线性变化,模拟多个传感器接收到的独立信号的组合分量。X对X进行分解,属于盲分解问题(blind source separation)应用场景用于信号提取,提取的信号(特征)必须有规律,不符合高斯(正态)分布PCA与ICA的差异PCA用于提取信号中正交的高斯变量组合作为主成分进行数据降维ICA用于提取非高斯信号 ###Code G=[[np.cos(0.1*i) for i in range(20,3600+20,5)], [np.sin(0.02*i) for i in range(60,3600+60,5)], [np.sign(np.sin(0.005*i)) for i in range(80,3600+80,5)]] G = np.array(G).T A = G + 0.2 * np.random.normal(size=G.shape) # Add noise W = np.random.randint(1,10,(3,3)) X = np.dot(A,W) for i in range(G.shape[1]): plt.subplot(3,1,i+1) plt.plot(G[:,i].T) for i in range(A.shape[1]): plt.subplot(3,1,i+1) plt.plot(A[:,i].T) for i in range(X.shape[1]): plt.subplot(3,1,i+1) plt.plot(X[:,i].T) for i in range(X.shape[1]): plt.plot(X[:,i].T) ica = FastICA(n_components=3) S_ = ica.fit_transform(X) # Reconstruct signals A_ = ica.mixing_ # Get estimated mixing matrix S_.shape #检查原矩阵与分解矩阵是否一致 assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_) #S为还原后的矩阵 S = np.dot(S_, A_.T)+ica.mean_ np.allclose(S,X) ica.mean_ #还原矩阵可视化 for i in range(S.shape[1]): plt.plot(S[:,i].T) ica.n_iter_ #独立成分可视化 for i in range(S_.shape[1]): plt.subplot(3,1,i+1) plt.plot(S_[:,i].T) #独立成分可视化 for i in range(Ss.shape[1]): plt.subplot(3,1,i+1) plt.plot(Ss[:,i].T) W A_.T ###Output _____no_output_____
5 - Sequence Models/Operations on word vectors - v2.ipynb
###Markdown Operations on word vectorsWelcome to your first assignment of this week! Because word embeddings are very computionally expensive to train, most ML practitioners will load a pre-trained set of embeddings. **After this assignment you will be able to:**- Load pre-trained word vectors, and measure similarity using cosine similarity- Use word embeddings to solve word analogy problems such as Man is to Woman as King is to ______. - Modify word embeddings to reduce their gender bias Let's get started! Run the following cell to load the packages you will need. ###Code import numpy as np from w2v_utils import * ###Output _____no_output_____ ###Markdown Next, lets load the word vectors. For this assignment, we will use 50-dimensional GloVe vectors to represent words. Run the following cell to load the `word_to_vec_map`. ###Code words, word_to_vec_map = read_glove_vecs('../../readonly/glove.6B.50d.txt') ###Output _____no_output_____ ###Markdown You've loaded:- `words`: set of words in the vocabulary.- `word_to_vec_map`: dictionary mapping words to their GloVe vector representation.You've seen that one-hot vectors do not do a good job cpaturing what words are similar. GloVe vectors provide much more useful information about the meaning of individual words. Lets now see how you can use GloVe vectors to decide how similar two words are. 1 - Cosine similarityTo measure how similar two words are, we need a way to measure the degree of similarity between two embedding vectors for the two words. Given two vectors $u$ and $v$, cosine similarity is defined as follows: $$\text{CosineSimilarity(u, v)} = \frac {u . v} {||u||_2 ||v||_2} = cos(\theta) \tag{1}$$where $u.v$ is the dot product (or inner product) of two vectors, $||u||_2$ is the norm (or length) of the vector $u$, and $\theta$ is the angle between $u$ and $v$. This similarity depends on the angle between $u$ and $v$. If $u$ and $v$ are very similar, their cosine similarity will be close to 1; if they are dissimilar, the cosine similarity will take a smaller value. **Figure 1**: The cosine of the angle between two vectors is a measure of how similar they are**Exercise**: Implement the function `cosine_similarity()` to evaluate similarity between word vectors.**Reminder**: The norm of $u$ is defined as $ ||u||_2 = \sqrt{\sum_{i=1}^{n} u_i^2}$ ###Code # GRADED FUNCTION: cosine_similarity def cosine_similarity(u, v): """ Cosine similarity reflects the degree of similariy between u and v Arguments: u -- a word vector of shape (n,) v -- a word vector of shape (n,) Returns: cosine_similarity -- the cosine similarity between u and v defined by the formula above. """ distance = 0.0 ### START CODE HERE ### # Compute the dot product between u and v (≈1 line) dot = np.dot(u, v) # Compute the L2 norm of u (≈1 line) norm_u = np.sqrt(np.sum(np.square(u))) # Compute the L2 norm of v (≈1 line) norm_v = np.sqrt(np.sum(np.square(v))) # Compute the cosine similarity defined by formula (1) (≈1 line) cosine_similarity = np.divide(dot, norm_u*norm_v) ### END CODE HERE ### return cosine_similarity father = word_to_vec_map["father"] mother = word_to_vec_map["mother"] ball = word_to_vec_map["ball"] crocodile = word_to_vec_map["crocodile"] france = word_to_vec_map["france"] italy = word_to_vec_map["italy"] paris = word_to_vec_map["paris"] rome = word_to_vec_map["rome"] print("cosine_similarity(father, mother) = ", cosine_similarity(father, mother)) print("cosine_similarity(ball, crocodile) = ",cosine_similarity(ball, crocodile)) print("cosine_similarity(france - paris, rome - italy) = ",cosine_similarity(france - paris, rome - italy)) ###Output cosine_similarity(father, mother) = 0.890903844289 cosine_similarity(ball, crocodile) = 0.274392462614 cosine_similarity(france - paris, rome - italy) = -0.675147930817 ###Markdown **Expected Output**: **cosine_similarity(father, mother)** = 0.890903844289 **cosine_similarity(ball, crocodile)** = 0.274392462614 **cosine_similarity(france - paris, rome - italy)** = -0.675147930817 After you get the correct expected output, please feel free to modify the inputs and measure the cosine similarity between other pairs of words! Playing around the cosine similarity of other inputs will give you a better sense of how word vectors behave. 2 - Word analogy taskIn the word analogy task, we complete the sentence "*a* is to *b* as *c* is to **____**". An example is '*man* is to *woman* as *king* is to *queen*' . In detail, we are trying to find a word *d*, such that the associated word vectors $e_a, e_b, e_c, e_d$ are related in the following manner: $e_b - e_a \approx e_d - e_c$. We will measure the similarity between $e_b - e_a$ and $e_d - e_c$ using cosine similarity. **Exercise**: Complete the code below to be able to perform word analogies! ###Code # GRADED FUNCTION: complete_analogy def complete_analogy(word_a, word_b, word_c, word_to_vec_map): """ Performs the word analogy task as explained above: a is to b as c is to ____. Arguments: word_a -- a word, string word_b -- a word, string word_c -- a word, string word_to_vec_map -- dictionary that maps words to their corresponding vectors. Returns: best_word -- the word such that v_b - v_a is close to v_best_word - v_c, as measured by cosine similarity """ # convert words to lower case word_a, word_b, word_c = word_a.lower(), word_b.lower(), word_c.lower() ### START CODE HERE ### # Get the word embeddings v_a, v_b and v_c (≈1-3 lines) e_a, e_b, e_c = word_to_vec_map[word_a], word_to_vec_map[word_b], word_to_vec_map[word_c] ### END CODE HERE ### words = word_to_vec_map.keys() max_cosine_sim = -100 # Initialize max_cosine_sim to a large negative number best_word = None # Initialize best_word with None, it will help keep track of the word to output # loop over the whole word vector set for w in words: # to avoid best_word being one of the input words, pass on them. if w in [word_a, word_b, word_c] : continue ### START CODE HERE ### # Compute cosine similarity between the vector (e_b - e_a) and the vector ((w's vector representation) - e_c) (≈1 line) cosine_sim = cosine_similarity(e_b - e_a, word_to_vec_map[w] - e_c) # If the cosine_sim is more than the max_cosine_sim seen so far, # then: set the new max_cosine_sim to the current cosine_sim and the best_word to the current word (≈3 lines) if cosine_sim > max_cosine_sim: max_cosine_sim = cosine_sim best_word = w ### END CODE HERE ### return best_word ###Output _____no_output_____ ###Markdown Run the cell below to test your code, this may take 1-2 minutes. ###Code triads_to_try = [('man', 'president', 'woman')] for triad in triads_to_try: print ('{} -> {} :: {} -> {}'.format( *triad, complete_analogy(*triad,word_to_vec_map))) ###Output _____no_output_____ ###Markdown **Expected Output**: **italy -> italian** :: spain -> spanish **india -> delhi** :: japan -> tokyo **man -> woman ** :: boy -> girl **small -> smaller ** :: large -> larger Once you get the correct expected output, please feel free to modify the input cells above to test your own analogies. Try to find some other analogy pairs that do work, but also find some where the algorithm doesn't give the right answer: For example, you can try small->smaller as big->?. Congratulations!You've come to the end of this assignment. Here are the main points you should remember:- Cosine similarity a good way to compare similarity between pairs of word vectors. (Though L2 distance works too.) - For NLP applications, using a pre-trained set of word vectors from the internet is often a good way to get started. Even though you have finished the graded portions, we recommend you take a look too at the rest of this notebook. Congratulations on finishing the graded portions of this notebook! 3 - Debiasing word vectors (OPTIONAL/UNGRADED) In the following exercise, you will examine gender biases that can be reflected in a word embedding, and explore algorithms for reducing the bias. In addition to learning about the topic of debiasing, this exercise will also help hone your intuition about what word vectors are doing. This section involves a bit of linear algebra, though you can probably complete it even without being expert in linear algebra, and we encourage you to give it a shot. This portion of the notebook is optional and is not graded. Lets first see how the GloVe word embeddings relate to gender. You will first compute a vector $g = e_{woman}-e_{man}$, where $e_{woman}$ represents the word vector corresponding to the word *woman*, and $e_{man}$ corresponds to the word vector corresponding to the word *man*. The resulting vector $g$ roughly encodes the concept of "gender". (You might get a more accurate representation if you compute $g_1 = e_{mother}-e_{father}$, $g_2 = e_{girl}-e_{boy}$, etc. and average over them. But just using $e_{woman}-e_{man}$ will give good enough results for now.) ###Code g = word_to_vec_map['woman'] - word_to_vec_map['man'] print(g) ###Output _____no_output_____ ###Markdown Now, you will consider the cosine similarity of different words with $g$. Consider what a positive value of similarity means vs a negative cosine similarity. ###Code print ('List of names and their similarities with constructed vector:') # girls and boys name name_list = ['john', 'marie', 'sophie', 'ronaldo', 'priya', 'rahul', 'danielle', 'reza', 'katy', 'yasmin'] for w in name_list: print (w, cosine_similarity(word_to_vec_map[w], g)) ###Output _____no_output_____ ###Markdown As you can see, female first names tend to have a positive cosine similarity with our constructed vector $g$, while male first names tend to have a negative cosine similarity. This is not suprising, and the result seems acceptable. But let's try with some other words. ###Code print('Other words and their similarities:') word_list = ['lipstick', 'guns', 'science', 'arts', 'literature', 'warrior','doctor', 'tree', 'receptionist', 'technology', 'fashion', 'teacher', 'engineer', 'pilot', 'computer', 'singer'] for w in word_list: print (w, cosine_similarity(word_to_vec_map[w], g)) ###Output _____no_output_____ ###Markdown Do you notice anything surprising? It is astonishing how these results reflect certain unhealthy gender stereotypes. For example, "computer" is closer to "man" while "literature" is closer to "woman". Ouch! We'll see below how to reduce the bias of these vectors, using an algorithm due to [Boliukbasi et al., 2016](https://arxiv.org/abs/1607.06520). Note that some word pairs such as "actor"/"actress" or "grandmother"/"grandfather" should remain gender specific, while other words such as "receptionist" or "technology" should be neutralized, i.e. not be gender-related. You will have to treat these two type of words differently when debiasing. 3.1 - Neutralize bias for non-gender specific words The figure below should help you visualize what neutralizing does. If you're using a 50-dimensional word embedding, the 50 dimensional space can be split into two parts: The bias-direction $g$, and the remaining 49 dimensions, which we'll call $g_{\perp}$. In linear algebra, we say that the 49 dimensional $g_{\perp}$ is perpendicular (or "othogonal") to $g$, meaning it is at 90 degrees to $g$. The neutralization step takes a vector such as $e_{receptionist}$ and zeros out the component in the direction of $g$, giving us $e_{receptionist}^{debiased}$. Even though $g_{\perp}$ is 49 dimensional, given the limitations of what we can draw on a screen, we illustrate it using a 1 dimensional axis below. **Figure 2**: The word vector for "receptionist" represented before and after applying the neutralize operation. **Exercise**: Implement `neutralize()` to remove the bias of words such as "receptionist" or "scientist". Given an input embedding $e$, you can use the following formulas to compute $e^{debiased}$: $$e^{bias\_component} = \frac{e \cdot g}{||g||_2^2} * g\tag{2}$$$$e^{debiased} = e - e^{bias\_component}\tag{3}$$If you are an expert in linear algebra, you may recognize $e^{bias\_component}$ as the projection of $e$ onto the direction $g$. If you're not an expert in linear algebra, don't worry about this.<!-- **Reminder**: a vector $u$ can be split into two parts: its projection over a vector-axis $v_B$ and its projection over the axis orthogonal to $v$:$$u = u_B + u_{\perp}$$where : $u_B = $ and $ u_{\perp} = u - u_B $!--> ###Code def neutralize(word, g, word_to_vec_map): """ Removes the bias of "word" by projecting it on the space orthogonal to the bias axis. This function ensures that gender neutral words are zero in the gender subspace. Arguments: word -- string indicating the word to debias g -- numpy-array of shape (50,), corresponding to the bias axis (such as gender) word_to_vec_map -- dictionary mapping words to their corresponding vectors. Returns: e_debiased -- neutralized word vector representation of the input "word" """ ### START CODE HERE ### # Select word vector representation of "word". Use word_to_vec_map. (≈ 1 line) e = None # Compute e_biascomponent using the formula give above. (≈ 1 line) e_biascomponent = None # Neutralize e by substracting e_biascomponent from it # e_debiased should be equal to its orthogonal projection. (≈ 1 line) e_debiased = None ### END CODE HERE ### return e_debiased e = "receptionist" print("cosine similarity between " + e + " and g, before neutralizing: ", cosine_similarity(word_to_vec_map["receptionist"], g)) e_debiased = neutralize("receptionist", g, word_to_vec_map) print("cosine similarity between " + e + " and g, after neutralizing: ", cosine_similarity(e_debiased, g)) ###Output _____no_output_____ ###Markdown **Expected Output**: The second result is essentially 0, up to numerical roundof (on the order of $10^{-17}$). **cosine similarity between receptionist and g, before neutralizing:** : 0.330779417506 **cosine similarity between receptionist and g, after neutralizing:** : -3.26732746085e-17 3.2 - Equalization algorithm for gender-specific wordsNext, lets see how debiasing can also be applied to word pairs such as "actress" and "actor." Equalization is applied to pairs of words that you might want to have differ only through the gender property. As a concrete example, suppose that "actress" is closer to "babysit" than "actor." By applying neutralizing to "babysit" we can reduce the gender-stereotype associated with babysitting. But this still does not guarantee that "actor" and "actress" are equidistant from "babysit." The equalization algorithm takes care of this. The key idea behind equalization is to make sure that a particular pair of words are equi-distant from the 49-dimensional $g_\perp$. The equalization step also ensures that the two equalized steps are now the same distance from $e_{receptionist}^{debiased}$, or from any other work that has been neutralized. In pictures, this is how equalization works: The derivation of the linear algebra to do this is a bit more complex. (See Bolukbasi et al., 2016 for details.) But the key equations are: $$ \mu = \frac{e_{w1} + e_{w2}}{2}\tag{4}$$ $$ \mu_{B} = \frac {\mu \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} *\text{bias_axis}\tag{5}$$ $$\mu_{\perp} = \mu - \mu_{B} \tag{6}$$$$ e_{w1B} = \frac {e_{w1} \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} *\text{bias_axis}\tag{7}$$ $$ e_{w2B} = \frac {e_{w2} \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} *\text{bias_axis}\tag{8}$$$$e_{w1B}^{corrected} = \sqrt{ |{1 - ||\mu_{\perp} ||^2_2} |} * \frac{e_{\text{w1B}} - \mu_B} {|(e_{w1} - \mu_{\perp}) - \mu_B)|} \tag{9}$$$$e_{w2B}^{corrected} = \sqrt{ |{1 - ||\mu_{\perp} ||^2_2} |} * \frac{e_{\text{w2B}} - \mu_B} {|(e_{w2} - \mu_{\perp}) - \mu_B)|} \tag{10}$$$$e_1 = e_{w1B}^{corrected} + \mu_{\perp} \tag{11}$$$$e_2 = e_{w2B}^{corrected} + \mu_{\perp} \tag{12}$$**Exercise**: Implement the function below. Use the equations above to get the final equalized version of the pair of words. Good luck! ###Code def equalize(pair, bias_axis, word_to_vec_map): """ Debias gender specific words by following the equalize method described in the figure above. Arguments: pair -- pair of strings of gender specific words to debias, e.g. ("actress", "actor") bias_axis -- numpy-array of shape (50,), vector corresponding to the bias axis, e.g. gender word_to_vec_map -- dictionary mapping words to their corresponding vectors Returns e_1 -- word vector corresponding to the first word e_2 -- word vector corresponding to the second word """ ### START CODE HERE ### # Step 1: Select word vector representation of "word". Use word_to_vec_map. (≈ 2 lines) w1, w2 = None e_w1, e_w2 = None # Step 2: Compute the mean of e_w1 and e_w2 (≈ 1 line) mu = None # Step 3: Compute the projections of mu over the bias axis and the orthogonal axis (≈ 2 lines) mu_B = None mu_orth = None # Step 4: Use equations (7) and (8) to compute e_w1B and e_w2B (≈2 lines) e_w1B = None e_w2B = None # Step 5: Adjust the Bias part of e_w1B and e_w2B using the formulas (9) and (10) given above (≈2 lines) corrected_e_w1B = None corrected_e_w2B = None # Step 6: Debias by equalizing e1 and e2 to the sum of their corrected projections (≈2 lines) e1 = None e2 = None ### END CODE HERE ### return e1, e2 print("cosine similarities before equalizing:") print("cosine_similarity(word_to_vec_map[\"man\"], gender) = ", cosine_similarity(word_to_vec_map["man"], g)) print("cosine_similarity(word_to_vec_map[\"woman\"], gender) = ", cosine_similarity(word_to_vec_map["woman"], g)) print() e1, e2 = equalize(("man", "woman"), g, word_to_vec_map) print("cosine similarities after equalizing:") print("cosine_similarity(e1, gender) = ", cosine_similarity(e1, g)) print("cosine_similarity(e2, gender) = ", cosine_similarity(e2, g)) ###Output _____no_output_____
Day7 OOP/.ipynb_checkpoints/operator_overloading -checkpoint.ipynb
###Markdown Outline: - Operator Overloading - comparsion - String represenation Operator Overloading: Comparsion object equality ###Code class Customer: def __init__(self, name, balance): self.name, self.balance = name, balance cust1 = Customer("Muneed ul Hassan", 1000) cust2 = Customer('Mehmood ul Hassan', 5000) cust1 == cust2 class Customer: def __init__(self, name, balance): self.name, self.balance = name, balance cust1 = Customer("Muneed ul Hassan", 1000) cust2 = Customer("Muneed ul Hassan", 1000) cust1 == cust2 print(cust1, cust2, sep='\n') ###Output <__main__.Customer object at 0x00000238B66ED640> <__main__.Customer object at 0x00000238B66ED610> ###Markdown custom comparsion ###Code # two different array contain the same data arr1 = np.array([1,2,3]) arr2 = np.array([1,2,3]) arr1 == arr2 Over ###Output _____no_output_____ ###Markdown Overloading \__eq__()- **\__ed__()** is called when 2 object od a same class are compared using **==**- accept two argument, self and ither objects to be compare- return boolean ###Code class Customer: def __init__(self, ids, name): self.ids, self.name = ids, name #will be called == is used def __eq__(self,other): #Diagnostic printout print("__eq__() is called..!!") # return True if all attributes match return (self.ids == other.ids) and \ (self.name == other.name) cust1 = Customer(123, "Muneed ul Hassan") cust2 = Customer(123, "Muneed ul Hassan") cust1 == cust2 # Two unequal objects - different ids customer1 = Customer(123,"Maryam Azar") customer2 = Customer(456,"Maryam Azar") customer1 == customer2 ###Output __eq__() is called..!! ###Markdown \__hash__() to used object ad dictionary key and in sets Operator overloading: String represenation printing an object ###Code class Customer: def __init__(self, name, balance): self.name, self.balance = name, balance cust = Customer('Qasim Hassan', 5000) print(cust) arr = np.array([1,2,3,4]) print(arr) ###Output [1 2 3 4] ###Markdown **\__str__()**- print(obj)- str(obj) ###Code print(np.array([1,2,3,4])) str(np.array([1,2,3,4])) ###Output _____no_output_____ ###Markdown - formal for end user- **str**ing representation **\__repr__()**- repr(obj)- printing on console ###Code repr(np.array([1,2,3,4])) np.array([1,2,3,4]) ###Output _____no_output_____ ###Markdown - formal, for developer- **repr**oduceable **repr**esentation- fallback for print() Implementation: str ###Code class Customer: def __init__(self, name, balance): self.name, self.balance = name, balance def __str__(self): cust_str = """ Customer: name : {name} balance: {balance} """.format(name= self.name,\ balance= self.balance) return cust_str cust1 = Customer('Qasim Hassan', 3000) # will implicitly call __str__() print(cust1) ###Output Customer: name : Qasim Hassan balance: 3000 ###Markdown Implementation: repr ###Code class Customer: def __init__(self, name, balance): self.name, self.balance = name, balance def __repr__(self): #Notice the '---' around name return "Customer('{name}', {balance})".format(name= self.name, balance=self.balance) cust = Customer('Qasim Hassan', 20000) cust # <-- will implicity call __repr__() ###Output _____no_output_____
ref_financial-planner.ipynb
###Markdown Unit 5 - Financial Planning ###Code # Initial imports import os import requests import pandas as pd from dotenv import load_dotenv import alpaca_trade_api as tradeapi from MCForecastTools import MCSimulation %matplotlib inline # Load .env enviroment variables load_dotenv() ###Output _____no_output_____ ###Markdown Part 1 - Personal Finance Planner Collect Crypto Prices Using the `requests` Library ###Code # Set current amount of crypto assets # YOUR CODE HERE! # Crypto API URLs btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=CAD" eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=CAD" # Fetch current BTC price # YOUR CODE HERE! # Fetch current ETH price # YOUR CODE HERE!] # Compute current value of my crpto # YOUR CODE HERE! # Print current crypto wallet balance print(f"The current value of your {my_btc} BTC is ${my_btc_value:0.2f}") print(f"The current value of your {my_eth} ETH is ${my_eth_value:0.2f}") ###Output The current value of your 1.2 BTC is $14309.16 The current value of your 5.3 ETH is $2096.47 ###Markdown Collect Investments Data Using Alpaca: `SPY` (stocks) and `AGG` (bonds) ###Code # Current amount of shares # YOUR CODE HERE! # Set Alpaca API key and secret # YOUR CODE HERE! # Create the Alpaca API object # YOUR CODE HERE! # Format current date as ISO format # YOUR CODE HERE! # Set the tickers tickers = ["AGG", "SPY"] # Set timeframe to '1D' for Alpaca API timeframe = "1D" # Get current closing prices for SPY and AGG # YOUR CODE HERE! # Preview DataFrame # YOUR CODE HERE! # Pick AGG and SPY close prices # YOUR CODE HERE! # Print AGG and SPY close prices print(f"Current AGG closing price: ${agg_close_price}") print(f"Current SPY closing price: ${spy_close_price}") # Compute the current value of shares # YOUR CODE HERE! # Print current value of share print(f"The current value of your {my_spy} SPY shares is ${my_spy_value:0.2f}") print(f"The current value of your {my_agg} AGG shares is ${my_agg_value:0.2f}") ###Output The current value of your 50 SPY shares is $16727.50 The current value of your 200 AGG shares is $23889.00 ###Markdown Savings Health Analysis ###Code # Set monthly household income # YOUR CODE HERE! # Create savings DataFrame # YOUR CODE HERE! # Display savings DataFrame display(df_savings) # Plot savings pie chart # YOUR CODE HERE! # Set ideal emergency fund emergency_fund = monthly_income * 3 # Calculate total amount of savings # YOUR CODE HERE! # Validate saving health # YOUR CODE HERE! ###Output Congratulations! You have enough money in your emergency fund. ###Markdown Part 2 - Retirement Planning Monte Carlo Simulation ###Code # Set start and end dates of five years back from today. # Sample results may vary from the solution based on the time frame chosen start_date = pd.Timestamp('2015-08-07', tz='America/New_York').isoformat() end_date = pd.Timestamp('2020-08-07', tz='America/New_York').isoformat() # Get 5 years' worth of historical data for SPY and AGG # YOUR CODE HERE! # Display sample data df_stock_data.head() # Configuring a Monte Carlo simulation to forecast 30 years cumulative returns # YOUR CODE HERE! # Printing the simulation input data # YOUR CODE HERE! # Running a Monte Carlo simulation to forecast 30 years cumulative returns # YOUR CODE HERE! # Plot simulation outcomes # YOUR CODE HERE! # Plot probability distribution and confidence intervals # YOUR CODE HERE! ###Output _____no_output_____ ###Markdown Retirement Analysis ###Code # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! ###Output count 500.000000 mean 9.200762 std 6.659594 min 1.045214 25% 4.941116 50% 7.381182 75% 11.327780 max 50.993592 95% CI Lower 2.296975 95% CI Upper 26.810558 Name: 7560, dtype: float64 ###Markdown Calculate the expected portfolio return at the 95% lower and upper confidence intervals based on a `$20,000` initial investment. ###Code # Set initial investment initial_investment = 20000 # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $20,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 30 years will end within in the range of" f" ${ci_lower} and ${ci_upper}") ###Output There is a 95% chance that an initial investment of $20000 in the portfolio over the next 30 years will end within in the range of $45939.5 and $536211.17 ###Markdown Calculate the expected portfolio return at the `95%` lower and upper confidence intervals based on a `50%` increase in the initial investment. ###Code # Set initial investment initial_investment = 20000 * 1.5 # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $30,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 30 years will end within in the range of" f" ${ci_lower} and ${ci_upper}") ###Output There is a 95% chance that an initial investment of $30000.0 in the portfolio over the next 30 years will end within in the range of $68909.24 and $804316.75 ###Markdown Optional Challenge - Early Retirement Five Years Retirement Option ###Code # Configuring a Monte Carlo simulation to forecast 5 years cumulative returns # YOUR CODE HERE! # Running a Monte Carlo simulation to forecast 5 years cumulative returns # YOUR CODE HERE! # Plot simulation outcomes # YOUR CODE HERE! # Plot probability distribution and confidence intervals # YOUR CODE HERE! # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! # Set initial investment # YOUR CODE HERE! # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 5 years will end within in the range of" f" ${ci_lower_five} and ${ci_upper_five}") ###Output There is a 95% chance that an initial investment of $60000 in the portfolio over the next 30 years will end within in the range of $55713.36 and $91030.38 ###Markdown Ten Years Retirement Option ###Code # Configuring a Monte Carlo simulation to forecast 10 years cumulative returns # YOUR CODE HERE! # Running a Monte Carlo simulation to forecast 10 years cumulative returns # YOUR CODE HERE! # Plot simulation outcomes # YOUR CODE HERE! # Plot probability distribution and confidence intervals # YOUR CODE HERE! # Fetch summary statistics from the Monte Carlo simulation results # YOUR CODE HERE! # Print summary statistics # YOUR CODE HERE! # Set initial investment # YOUR CODE HERE! # Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes of our $60,000 # YOUR CODE HERE! # Print results print(f"There is a 95% chance that an initial investment of ${initial_investment} in the portfolio" f" over the next 10 years will end within in the range of" f" ${ci_lower_ten} and ${ci_upper_ten}") ###Output There is a 95% chance that an initial investment of $60000 in the portfolio over the next 30 years will end within in the range of $64982.9 and $119425.0
notebooks/analyze-sklearn-benchmark6.ipynb
###Markdown Count the number of algorithm evaluations each model had ###Code import pandas as pd data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['classifier', 'dataset', 'parameters'])['accuracy'].mean().reset_index() data.groupby('classifier')['parameters'].count() ###Output _____no_output_____ ###Markdown Rank the parameters for each model ###Code import pandas as pd data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['classifier', 'dataset', 'parameters'])['accuracy'].mean().reset_index() data['accuracy'] = data['accuracy'].apply(lambda x: round(x, 3)) # find data set SVM did not finish on svm_data = data[data['classifier']=='SVC'] print([problem for problem,d in data.groupby('dataset') if problem not in svm_data['dataset'].unique()]) print('number of data sets in svm_data:',len(svm_data['dataset'].unique())) print('number of data sets:',len(data['dataset'].unique())) print('svm missing ',[p for p in data['dataset'].unique() if p not in svm_data['dataset'].unique()]) from collections import defaultdict from tqdm import tqdm import numpy as np model_param_ranks = defaultdict(list) for dataset, group_dataset in tqdm(data.groupby('dataset')): num_scores = float(len(group_dataset['accuracy'].unique())) accuracy_ranks = {} for rank, accuracy in enumerate(sorted(group_dataset['accuracy'].unique(), reverse=True)): accuracy_ranks[accuracy] = (rank + 1) / num_scores for index, row in group_dataset.iterrows(): model_param_ranks['{}-{}'.format(row['classifier'], row['parameters'])].append(accuracy_ranks[row['accuracy']]) model_average_param_ranks = defaultdict(float) for model_param in model_param_ranks: model_average_param_ranks[model_param] = np.mean(model_param_ranks[model_param]) ###Output _____no_output_____ ###Markdown Best params for each model from rankings ###Code models_seen = set() for model_param in sorted(model_average_param_ranks, key=model_average_param_ranks.get, reverse=False): model = model_param.split('-')[0] if model not in models_seen: models_seen.add(model) else: continue print('{}\t{}'.format(round(model_average_param_ranks[model_param], 3), model_param)) if len(models_seen) >= 15: break ###Output 0.081 XGBClassifier-n_estimators=500,learning_rate=0.01,gamma=0.45,max_depth=50,subsample=0.7000000000000001,seed=324089,nthread=1 0.092 RandomForestClassifier-n_estimators=500,min_impurity_decrease=0.0,max_features=sqrt,criterion=entropy,random_state=324089 0.097 GradientBoostingClassifier-n_estimators=500,min_impurity_decrease=0.005,max_features=sqrt,learning_rate=0.1,loss=deviance,random_state=324089 0.098 ExtraTreesClassifier-n_estimators=500,min_impurity_decrease=0.0,max_features=0.5,criterion=gini,random_state=324089 0.127 SVC-C=10.0,gamma=auto,kernel=poly,degree=2,coef0=0.5,random_state=324089 0.163 KNeighborsClassifier-n_neighbors=14,weights=distance 0.172 DecisionTreeClassifier-min_impurity_decrease=0.0,max_features=None,criterion=entropy,random_state=324089 0.184 LogisticRegression-C=2.0,penalty=l1,fit_intercept=True,dual=False,random_state=324089 0.188 LinearSVC-C=1.0,penalty=l1,fit_intercept=True,dual=False,random_state=324089 0.222 PassiveAggressiveClassifier-C=0.01,loss=squared_hinge,fit_intercept=True,random_state=324089 0.225 AdaBoostClassifier-learning_rate=0.5,n_estimators=50,random_state=324089 0.314 BernoulliNB-alpha=0.1,fit_prior=True,binarize=0.5 0.331 GaussianNB- 0.343 MultinomialNB-alpha=0.0,fit_prior=True ###Markdown Average each model parameter's rankings ###Code from collections import defaultdict import numpy as np model_param_breakdown_rankings = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for model_param in model_average_param_ranks: model = model_param.split('-')[0] if model == 'GaussianNB': continue params = '-'.join(model_param.split('-')[1:]) params = params.split(',') rank = model_average_param_ranks[model_param] for param in params: model_param_breakdown_rankings[model][param.split('=')[0]][param.split('=')[-1]].append(rank) model_param_breakdown_average_rankings = defaultdict(lambda: defaultdict(lambda: defaultdict(float))) for model in sorted(model_param_breakdown_rankings): for param in model_param_breakdown_rankings[model]: for param_val in model_param_breakdown_rankings[model][param]: model_param_breakdown_average_rankings[model][param][param_val] = round(np.mean( model_param_breakdown_rankings[model][param][param_val]), 3) for model in sorted(model_param_breakdown_average_rankings): print(model) print('--------------------') for param in model_param_breakdown_average_rankings[model]: print(param) for param_val in sorted(model_param_breakdown_average_rankings[model][param], key=model_param_breakdown_average_rankings[model][param].get): print('\t={}{}{}'.format(param_val, (' ' * 25)[:25 - len(param_val)], model_param_breakdown_average_rankings[model][param][param_val])) print('') ###Output AdaBoostClassifier -------------------- learning_rate =0.5 0.233 =0.1 0.246 =1.0 0.262 =0.01 0.287 =50.0 0.679 =100.0 0.709 =10.0 0.797 n_estimators =500 0.447 =100 0.453 =50 0.458 =10 0.478 random_state =324089 0.459 BernoulliNB -------------------- alpha =10.0 0.42 =5.0 0.422 =25.0 0.424 =1.0 0.429 =0.75 0.43 =0.5 0.431 =50.0 0.431 =0.25 0.432 =0.1 0.434 =0.0 0.438 fit_prior =True 0.401 =False 0.457 binarize =0.5 0.351 =0.25 0.363 =0.75 0.394 =0.1 0.401 =0.9 0.442 =0.0 0.46 =1.0 0.593 DecisionTreeClassifier -------------------- min_impurity_decrease =0.0 0.207 =0.00025 0.221 =0.0005 0.225 =0.00075 0.228 =0.001 0.231 =0.00125 0.232 =0.0015 0.235 =0.00175 0.237 =0.002 0.238 =0.0022500000000000003 0.24 =0.0025 0.241 =0.00275 0.243 =0.003 0.244 =0.0032500000000000003 0.246 =0.0035 0.247 =0.00375 0.248 =0.004 0.25 =0.00425 0.25 =0.0045000000000000005 0.251 =0.00475 0.253 =0.005 0.254 =0.0075 0.264 =0.01 0.273 =0.0125 0.282 =0.015 0.288 =0.0175 0.295 =0.02 0.302 =0.0225 0.308 =0.025 0.314 =0.0275 0.32 =0.03 0.325 =0.0325 0.331 =0.035 0.336 =0.0375 0.34 =0.04 0.345 =0.0425 0.349 =0.045 0.355 =0.0475 0.358 =0.05 0.362 =0.1 0.417 =0.15000000000000002 0.454 =0.2 0.481 =0.25 0.501 =0.30000000000000004 0.52 =0.35000000000000003 0.537 =0.4 0.545 =0.45 0.551 =0.5 0.558 max_features =None 0.257 =0.75 0.272 =0.5 0.29 =0.25 0.341 =sqrt 0.343 =log2 0.347 =0.1 0.387 criterion =entropy 0.301 =gini 0.338 random_state =324089 0.319 ExtraTreesClassifier -------------------- n_estimators =500 0.261 =100 0.262 =50 0.264 =10 0.282 min_impurity_decrease =0.0 0.113 =0.00025 0.129 =0.0005 0.139 =0.00075 0.146 =0.001 0.152 =0.00125 0.157 =0.0015 0.161 =0.00175 0.165 =0.002 0.168 =0.0022500000000000003 0.17 =0.0025 0.173 =0.00275 0.175 =0.003 0.178 =0.0032500000000000003 0.18 =0.0035 0.182 =0.00375 0.184 =0.004 0.186 =0.00425 0.187 =0.0045000000000000005 0.188 =0.00475 0.19 =0.005 0.192 =0.0075 0.206 =0.01 0.218 =0.0125 0.227 =0.015 0.235 =0.0175 0.244 =0.02 0.252 =0.0225 0.259 =0.025 0.265 =0.0275 0.271 =0.03 0.277 =0.0325 0.283 =0.035 0.289 =0.0375 0.294 =0.04 0.299 =0.0425 0.304 =0.045 0.308 =0.0475 0.313 =0.05 0.317 =0.1 0.396 =0.15000000000000002 0.434 =0.2 0.463 =0.25 0.486 =0.30000000000000004 0.508 =0.35000000000000003 0.521 =0.4 0.532 =0.45 0.546 =0.5 0.555 max_features =None 0.232 =0.75 0.237 =0.5 0.247 =sqrt 0.275 =log2 0.278 =0.25 0.279 =0.1 0.322 criterion =entropy 0.241 =gini 0.293 random_state =324089 0.267 GradientBoostingClassifier -------------------- n_estimators =500 0.319 =100 0.338 =50 0.354 =10 0.413 min_impurity_decrease =0.00425 0.351 =0.0045000000000000005 0.351 =0.00475 0.351 =0.00025 0.352 =0.0005 0.352 =0.00075 0.352 =0.001 0.352 =0.00125 0.352 =0.0015 0.352 =0.00175 0.352 =0.002 0.352 =0.0022500000000000003 0.352 =0.00275 0.352 =0.003 0.352 =0.0032500000000000003 0.352 =0.0035 0.352 =0.00375 0.352 =0.004 0.352 =0.0 0.353 =0.0025 0.353 =0.005 0.353 =0.0075 0.353 =0.01 0.353 =0.0125 0.354 =0.015 0.354 =0.0175 0.354 =0.02 0.354 =0.0225 0.354 =0.025 0.354 =0.0275 0.355 =0.03 0.355 =0.0325 0.355 =0.035 0.355 =0.0375 0.355 =0.04 0.356 =0.0425 0.356 =0.045 0.356 =0.0475 0.356 =0.05 0.358 =0.1 0.362 =0.15000000000000002 0.363 =0.2 0.365 =0.25 0.367 =0.30000000000000004 0.369 =0.35000000000000003 0.37 =0.4 0.371 =0.45 0.372 =0.5 0.373 max_features =0.5 0.347 =0.25 0.348 =sqrt 0.352 =log2 0.356 =0.1 0.357 =0.75 0.357 =None 0.377 learning_rate =0.5 0.142 =0.1 0.146 =1.0 0.186 =0.01 0.242 =10.0 0.56 =50.0 0.603 =100.0 0.615 loss =exponential 0.295 =deviance 0.417 random_state =324089 0.356 KNeighborsClassifier -------------------- n_neighbors =14 0.178 =12 0.179 =7 0.179 =10 0.18 =13 0.18 =11 0.181 =15 0.181 =16 0.182 =8 0.182 =9 0.183 =6 0.184 =17 0.186 =5 0.186 =18 0.187 =19 0.193 =4 0.193 =20 0.195 =3 0.195 =21 0.199 =22 0.201 =23 0.203 =24 0.203 =25 0.205 =1 0.218 =50 0.221 =2 0.226 =100 0.24 weights =distance 0.181 =uniform 0.207 LinearSVC -------------------- C =0.5 0.222 =0.6000000000000001 0.222 =0.7000000000000001 0.222 =0.8 0.222 =0.9 0.222 =1.0 0.222 =0.4 0.223 =2.0 0.223 =3.0 0.223 =0.2 0.224 =0.30000000000000004 0.224 =10.0 0.224 =4.0 0.224 =5.0 0.224 =6.0 0.224 =7.0 0.224 =8.0 0.224 =9.0 0.224 =0.1 0.228 penalty =l1 0.223 =l2 0.224 fit_intercept =True 0.193 =False 0.254 dual =False 0.223 =True 0.223 random_state =324089 0.223 LogisticRegression -------------------- C =5.0 0.219 =5.5 0.219 =6.0 0.219 =6.5 0.219 =7.0 0.219 =7.5 0.219 =10.0 0.22 =10.5 0.22 =11.0 0.22 =11.5 0.22 =12.0 0.22 =12.5 0.22 =13.0 0.22 =13.5 0.22 =14.0 0.22 =14.5 0.22 =15.0 0.22 =15.5 0.22 =16.0 0.22 =16.5 0.22 =17.0 0.22 =17.5 0.22 =2.0 0.22 =2.5 0.22 =3.0 0.22 =3.5 0.22 =4.0 0.22 =4.5 0.22 =8.0 0.22 =8.5 0.22 =9.0 0.22 =9.5 0.22 =1.5 0.221 =18.0 0.221 =18.5 0.221 =19.0 0.221 =19.5 0.221 =20.0 0.221 =1.0 0.222 =0.5 0.226 penalty =l1 0.218 =l2 0.221 fit_intercept =True 0.188 =False 0.252 dual =False 0.219 =True 0.222 random_state =324089 0.22 MultinomialNB -------------------- alpha =0.75 0.368 =0.25 0.369 =0.5 0.369 =1.0 0.369 =5.0 0.369 =0.0 0.37 =0.1 0.37 =10.0 0.373 =25.0 0.382 =50.0 0.392 fit_prior =True 0.354 =False 0.393 PassiveAggressiveClassifier -------------------- C =0.01 0.252 =0.001 0.277 =0.1 0.286 =0.0001 0.324 =0.5 0.325 =1.0 0.338 =10.0 0.353 =100.0 0.355 =50.0 0.355 =1e-05 0.366 =1e-06 0.384 loss =squared_hinge 0.326 =hinge 0.331 fit_intercept =True 0.303 =False 0.354 random_state =324089 0.329 RandomForestClassifier -------------------- n_estimators =500 0.217 =100 0.219 =50 0.221 =10 0.237 min_impurity_decrease =0.0 0.108 =0.00025 0.113 =0.0005 0.116 =0.00075 0.119 =0.001 0.122 =0.00125 0.125 =0.0015 0.127 =0.00175 0.13 =0.002 0.132 =0.0022500000000000003 0.134 =0.0025 0.135 =0.00275 0.137 =0.003 0.139 =0.0032500000000000003 0.141 =0.0035 0.143 =0.00375 0.145 =0.004 0.146 =0.00425 0.148 =0.0045000000000000005 0.149 =0.00475 0.15 =0.005 0.152 =0.0075 0.162 =0.01 0.17 =0.0125 0.177 =0.015 0.184 =0.0175 0.19 =0.02 0.196 =0.0225 0.202 =0.025 0.208 =0.0275 0.213 =0.03 0.219 =0.0325 0.223 =0.035 0.229 =0.0375 0.234 =0.04 0.239 =0.0425 0.244 =0.045 0.248 =0.0475 0.253 =0.05 0.256 =0.1 0.334 =0.15000000000000002 0.379 =0.2 0.412 =0.25 0.441 =0.30000000000000004 0.467 =0.35000000000000003 0.489 =0.4 0.505 =0.45 0.517 =0.5 0.531 max_features =0.75 0.209 =None 0.21 =0.5 0.212 =sqrt 0.225 =log2 0.227 =0.25 0.228 =0.1 0.254 criterion =entropy 0.199 =gini 0.248 random_state =324089 0.224 SVC -------------------- C =100.0 0.288 =50.0 0.288 =10.0 0.292 =1.0 0.303 =0.5 0.308 =0.1 0.32 =0.01 0.343 gamma =auto 0.29 =0.1 0.292 =0.5 0.297 =1.0 0.301 =10.0 0.313 =50.0 0.315 =100.0 0.316 =0.01 0.325 kernel =poly 0.192 =rbf 0.34 =sigmoid 0.53 degree =3 0.194 =2 0.359 coef0 =1.0 0.293 =0.5 0.296 =10.0 0.298 =50.0 0.304 =100.0 0.305 =0.1 0.307 =0.0 0.332 random_state =324089 0.306 XGBClassifier -------------------- n_estimators =500 0.327 =100 0.334 =50 0.338 =10 0.348 learning_rate =0.1 0.181 =0.5 0.182 =1.0 0.198 =0.01 0.206 =100.0 0.49 =50.0 0.519 =10.0 0.58 gamma =0.45 0.335 =0.5 0.335 =0.30000000000000004 0.336 =0.35000000000000003 0.336 =0.4 0.336 =0.2 0.337 =0.25 0.337 =0.0 0.338 =0.05 0.338 =0.1 0.338 =0.15000000000000002 0.338 max_depth =3 0.331 =20 0.332 =2 0.333 =50 0.333 =10 0.336 =4 0.339 =5 0.34 =1 0.351 subsample =1.0 0.283 =0.9 0.286 =0.7000000000000001 0.288 =0.8 0.289 =0.6000000000000001 0.292 =0.5 0.294 =0.4 0.297 =0.30000000000000004 0.3 =0.2 0.307 =0.1 0.328 =0.0 0.74 seed =324089 0.337 nthread =1 0.337 ###Markdown Rank each model on a per-data set basis ###Code import pandas as pd data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['classifier', 'dataset', 'parameters'])['accuracy'].mean().reset_index() data['accuracy'] = data['accuracy'].apply(lambda x: round(x, 3)) from collections import defaultdict from tqdm import tqdm import numpy as np model_ranks = defaultdict(list) for dataset, group_dataset in tqdm(data.groupby('dataset')): if len(group_dataset['classifier'].unique()) != 14: continue num_scores = float(len(group_dataset['accuracy'].unique())) accuracy_ranks = {} for rank, accuracy in enumerate(sorted(group_dataset['accuracy'].unique(), reverse=True)): accuracy_ranks[accuracy] = (rank + 1) / num_scores for index, row in group_dataset.iterrows(): model_ranks[row['classifier']].append(accuracy_ranks[row['accuracy']]) model_average_ranks = defaultdict(float) for model in model_ranks: model_average_ranks[model] = np.mean(model_ranks[model]) for model in sorted(model_average_ranks, key=model_average_ranks.get, reverse=False): print('{}\t{}'.format(round(model_average_ranks[model], 3), model)) ###Output 0.194 KNeighborsClassifier 0.22 LogisticRegression 0.223 LinearSVC 0.224 RandomForestClassifier 0.267 ExtraTreesClassifier 0.319 DecisionTreeClassifier 0.325 SVC 0.329 PassiveAggressiveClassifier 0.331 GaussianNB 0.336 XGBClassifier 0.372 GradientBoostingClassifier 0.373 MultinomialNB 0.429 BernoulliNB 0.459 AdaBoostClassifier ###Markdown How often is model X better than model Y? ###Code import pandas as pd import pdb data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['dataset','classifier'])['accuracy'].max().reset_index() data['accuracy'] = data['accuracy'].apply(lambda x: round(x, 3)) data.head() from collections import defaultdict from tqdm import tqdm import numpy as np from itertools import combinations import pdb model_tourneys = defaultdict(int) all_models = sorted(data['classifier'].unique()) for dataset, group_dataset in tqdm(data.groupby('dataset')): # pdb.set_trace() group_dataset.loc[:, 'accuracy']= group_dataset['accuracy'].values / group_dataset['accuracy'].max() group_dataset = group_dataset.set_index('classifier')['accuracy'].to_dict() for (model1, model2) in combinations(group_dataset.keys(), 2): if group_dataset[model1] >= group_dataset[model2] + 0.01: model_tourneys[(model1, model2)] += 1 elif group_dataset[model2] >= group_dataset[model1] + 0.01: model_tourneys[(model2, model1)] += 1 from itertools import product for model1 in all_models: print('{}{}W / L'.format(model1, ' ' * (38 - len(model1)))) print('--------------------') for model2 in all_models: if model1 == model2: continue print('\t{}{}{} / {}'.format(model2, ' ' * (30 - len(model2)), model_tourneys[(model1, model2)], model_tourneys[(model2, model1)])) print('') from itertools import product import numpy as np model_tourney_matrix = [] for pair in list(product(all_models, all_models)): model_tourney_matrix.append(model_tourneys[pair]) model_tourney_matrix = np.array(model_tourney_matrix).reshape((14, 14)) all_models = list(np.array(all_models)[np.argsort(model_tourney_matrix.sum(axis=1))[::-1]]) model_tourney_matrix = model_tourney_matrix[:, np.argsort(model_tourney_matrix.sum(axis=1))[::-1]] from itertools import product import numpy as np """ TODO: This code snippet needs to be run twice (this is the 2nd time it's being run here, with the above snippet being the first time) so the chart below will display properly. There's some weird bug going on here that we should dig in to. """ model_tourney_matrix = [] for pair in list(product(all_models, all_models)): model_tourney_matrix.append(model_tourneys[pair]) model_tourney_matrix = np.array(model_tourney_matrix).reshape((14, 14)) all_models = list(np.array(all_models)[np.argsort(model_tourney_matrix.sum(axis=1))[::-1]]) model_tourney_matrix = model_tourney_matrix[:, np.argsort(model_tourney_matrix.sum(axis=1))[::-1]] %matplotlib inline import matplotlib.pyplot as plt import seaborn as sb model_nice_dict = { 'AdaBoostClassifier': 'AB', 'BernoulliNB': 'BNB', 'LogisticRegression': 'LR', 'MultinomialNB': 'MNB', 'PassiveAggressiveClassifier': 'PA', 'SGDClassifier': 'SGD', 'GaussianNB': 'GNB', 'DecisionTreeClassifier': 'DT', 'ExtraTreesClassifier': 'ERF', 'RandomForestClassifier': 'RF', 'GradientBoostingClassifier':'GTB', 'KNeighborsClassifier': 'KNN', 'SVC': 'SVM', 'XGBClassifier': 'XGB', 'LinearSVC': 'LSVM' } model_nice_dict_y = { 'AdaBoostClassifier': 'AdaBoost', 'BernoulliNB': 'Bernoulli Naive Bayes', 'LogisticRegression': 'Logistic Regression', 'MultinomialNB': 'Multinomial Naive Bayes', 'PassiveAggressiveClassifier': 'Passive Aggressive', 'SGDClassifier': 'Linear Model trained via\nStochastic Gradient Descent', 'GaussianNB': 'Gaussian Naive Bayes', 'DecisionTreeClassifier': 'Decision Tree', 'ExtraTreesClassifier': 'Extra Random Forest', 'RandomForestClassifier': 'Random Forest', 'GradientBoostingClassifier':'Gradient Tree Boosting', 'KNeighborsClassifier': 'K-Nearest Neighbors', 'SVC': 'Support Vector Machine', 'XGBClassifier': 'XGBoost', 'LinearSVC': 'Linear Support Vector Machine' } model_nice = [] model_nice_y = [] for m in all_models: model_nice.append(model_nice_dict[m]) model_nice_y.append(model_nice_dict_y[m]) plt.figure(figsize=(10, 10)) sb.heatmap(model_tourney_matrix, square=True, annot=True, fmt='0.0f', cmap='Purples', xticklabels=model_nice, yticklabels=model_nice_y, cbar_kws={'shrink': 0.56}) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.xlabel('Losses', fontsize=14) plt.ylabel('Wins', fontsize=14) plt.title('How many times model X outperformed model Y (out of 165)', fontsize=18) h = plt.gcf() plt.tight_layout() #h.savefig('figs/model_outperformance.pdf', bbox_inches='tight') #plt.savefig('figures/sklearn-model-x-outperform-model-y.pdf', bbox_inches='tight') ; ###Output _____no_output_____ ###Markdown How many models do we need to cover all data sets? ###Code import pandas as pd import pdb data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['dataset','classifier'])['accuracy'].max().reset_index() data['accuracy'] = data['accuracy'].apply(lambda x: round(x, 3)) data.head() from collections import defaultdict from tqdm import tqdm import numpy as np dataset_best_models = defaultdict(list) model_counts = defaultdict(int) for dataset, group_dataset in data.groupby('dataset'): group_dataset['accuracy'] /= group_dataset['accuracy'].max() dataset_best_models[dataset] = group_dataset.loc[ group_dataset['accuracy'] >= 0.99, 'classifier'].values for dataset in dataset_best_models: for model in dataset_best_models[dataset]: model_counts[model] += 1 print('Model','&','Data Set Coverage','\\\\ \\hline') for model in sorted(model_counts, key=model_counts.get, reverse=True): print(model,'&',model_counts[model],'\\\\') dataset_best_models = defaultdict(list) model_counts = defaultdict(int) for dataset, group_dataset in data.groupby('dataset'): group_dataset['accuracy'] /= group_dataset['accuracy'].max() dataset_best_models[dataset] = group_dataset.loc[ group_dataset['accuracy'] >= 0.99, 'classifier'].values for dataset in dataset_best_models: for model in dataset_best_models[dataset]: model_counts[model] += 1 dataset_exclude_set = set() top_models = [] while len(dataset_exclude_set) != len(data['dataset'].unique()): next_top_model = sorted(model_counts, key=model_counts.get, reverse=True)[0] top_models.append((model_counts[next_top_model], next_top_model)) for dataset in dataset_best_models: if next_top_model in dataset_best_models[dataset]: dataset_exclude_set.add(dataset) dataset_best_models = defaultdict(list) model_counts = defaultdict(int) for dataset, group_dataset in data.groupby('dataset'): if dataset in dataset_exclude_set: continue group_dataset['accuracy'] /= group_dataset['accuracy'].max() dataset_best_models[dataset] = group_dataset.loc[ group_dataset['accuracy'] >= 0.99, 'classifier'].values for dataset in dataset_best_models: for model in dataset_best_models[dataset]: model_counts[model] += 1 top_models ###Output _____no_output_____ ###Markdown How many model-parameter combinations do we need to cover all data sets? ###Code import pandas as pd import pdb data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['dataset','classifier','parameters'])['accuracy'].max().reset_index() data['classifier-params'] = data['classifier'].values + '-' + data['parameters'].values data['accuracy'] = data['accuracy'].apply(lambda x: round(x, 3)) data.head() from collections import defaultdict dataset_best_models = defaultdict(list) model_counts = defaultdict(int) for dataset, group_dataset in data.groupby('dataset'): group_dataset.loc[:, 'accuracy'] = group_dataset['accuracy'].values / group_dataset['accuracy'].max() dataset_best_models[dataset] = group_dataset.loc[ group_dataset['accuracy'] >= 0.99, 'classifier-params'].values for dataset in dataset_best_models: for model in dataset_best_models[dataset]: model_counts[model] += 1 dataset_exclude_set = set() top_models = [] while len(dataset_exclude_set) != len(data['dataset'].unique()): next_top_model = sorted(model_counts, key=model_counts.get, reverse=True)[0] top_models.append((model_counts[next_top_model], next_top_model)) if len(top_models) == 10: break # Don't allow repeat models data = data.loc[data['classifier'] != next_top_model.split('-')[0].strip()] for dataset in dataset_best_models: if next_top_model in dataset_best_models[dataset]: dataset_exclude_set.add(dataset) dataset_best_models = defaultdict(list) model_counts = defaultdict(int) for dataset, group_dataset in data.groupby('dataset'): if dataset in dataset_exclude_set: continue group_dataset.loc[:, 'accuracy'] = group_dataset.loc[:, 'accuracy'].values / group_dataset['accuracy'].max() dataset_best_models[dataset] = group_dataset.loc[ group_dataset['accuracy'] >= 0.99, 'classifier-params'].values for dataset in dataset_best_models: for model in dataset_best_models[dataset]: model_counts[model] += 1 top_models [(x, model_counts[x]) for x in sorted(model_counts, key=model_counts.get, reverse=True)[:25]] ###Output _____no_output_____ ###Markdown Model/data biclustering analysisCreate matrix of data sets vs. best model accuracy on those data sets.Cluster the matrix. ###Code import pandas as pd data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['classifier', 'dataset'])['bal_accuracy'].max().reset_index() # print("classifiers before drop:",data['classifier'].unique()) # data = data[data['classifier']!='LinearSVC'] # data = data[data['classifier']!='SVC'] print('loaded ',data['dataset'].unique().shape[0],'datasets and ', data['classifier'].unique().shape[0],'classifiers') # data['classifier-params'] = data['classifier'].values + '-' + data['parameters'].values data['bal_accuracy'] = data['bal_accuracy'].apply(lambda x: round(x, 3)) import numpy as np from tqdm import tqdm from sklearn.cluster.bicluster import SpectralBiclustering from sklearn.metrics import consensus_score import matplotlib.pyplot as plt %matplotlib inline import pdb # get model results with best parameters all_models = np.asarray((data['classifier'].unique())) all_datasets = np.asarray((data['dataset'].unique())) model_data_acc = np.zeros([len(all_models),len(all_datasets)]) model_data_acc_norm = np.zeros([len(all_models),len(all_datasets)]) ranks = np.zeros([len(all_models),len(all_datasets)]) #print("model_data_acc.shape:",model_data_acc.shape) # all_models = [] for i,(clf, group_clf) in enumerate(tqdm(data.groupby('dataset'))): print('clf:',clf) data_acc = group_clf['bal_accuracy'].transpose() model_data_acc[:,i] = data_acc # model_data_acc_norm[:,i] = (data_acc-data_acc.min())/(data_acc.max() - data_acc.min()) model_data_acc_norm[:,i] = (data_acc-data_acc.mean()) # all_models.append(clf) all_models = np.asarray(all_models) # get ranks for i,mda in enumerate(model_data_acc.transpose()): #print("mda shape:",mda.shape) temp = mda.argsort() ranks[temp,i] = np.arange(len(mda)) np.savetxt('figs/ranks',ranks) np.savetxt('figs/model_data_accuracy',model_data_acc) print("clustering...") np.random.seed(42) model = SpectralBiclustering(n_clusters=(4,10), random_state=0) model.fit(model_data_acc) #score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) #print("consensus score: {:.3f}".format(score)) r = model.row_labels_[:] c = model.column_labels_[:] #rearrange data into biclusters fit_data_norm = model_data_acc_norm[np.argsort(r)] fit_data_norm = fit_data_norm[:, np.argsort(c)] fit_data = model_data_acc[np.argsort(r)] fit_data = fit_data[:, np.argsort(c)] all_models = all_models[np.argsort(r)] all_datasets = all_datasets[np.argsort(c)] print('done') model_nice_dict = { 'AdaBoostClassifier': 'AdaBoost', 'BernoulliNB': 'Bernoulli NB', 'LinearSVC': 'Linear SVC', 'LogisticRegression': 'Logistic Regression', 'MultinomialNB': 'Multinomial NB', 'PassiveAggressiveClassifier': 'Passive Aggressive', 'SGDClassifier': 'SGD', 'GaussianNB': 'Gaussian NB', 'DecisionTreeClassifier': 'Decision Tree', 'ExtraTreesClassifier': 'Extra Trees', 'RandomForestClassifier': 'Random Forest', 'GradientBoostingClassifier':'Gradient Boosting', 'KNeighborsClassifier': 'K-Nearest Neighbor', 'SVC': 'SVC', 'XGBClassifier': 'XGBoost', 'LinearSVC': 'Linear Support Vector Machine' } model_nice = [] for m in all_models: model_nice.append(model_nice_dict[m]) print("biclusters_:",len(model.biclusters_)) #plot # h = plt.figure(figsize=(4,3),sharey=True) # ax = plt.subplot(111) h,ax = plt.subplots(3,figsize=(10,9)) # ax = h.add_subplot(311) tmp = ax[0].imshow(fit_data[:,:],cmap=plt.cm.RdBu) # ax[0].set_title('A') # ax[0].set_xlabel('A') cbar=plt.colorbar(tmp,ax=ax[0],orientation='vertical',shrink=0.8) cbar.set_label('Balanced Accuracy') ax[0].set_yticks(range(len(all_models))) #,rotation=90 ax[0].set_yticklabels(model_nice) #,rotation=90 # ax[1].set_xlabel('Data Set',size=16) ax[0].set_xticks(np.arange(len(all_datasets),step=10)) ax[0].xaxis.tick_top() # h = plt.gcf() # ax = plt.gca( ) ax[0].set_aspect(4) # h.tight_layout() # h = plt.gcf() # h.savefig("figs/bicluster.pdf",dpi=100) # k = plt.figure(figsize=(10,3)) # ax = h.add_subplot(312) tmp = ax[1].matshow(fit_data_norm[:,:],cmap=plt.cm.RdBu) cbar=plt.colorbar(tmp,ax=ax[1],orientation='vertical',shrink=0.8) cbar.set_label('Deviation from Mean') ax[1].set_yticks(range(len(all_models))) #,rotation=90 ax[1].set_yticklabels(model_nice) #,rotation=90 # ax[1].set_xlabel('Data Set',size=16) ax[1].set_xticks(np.arange(len(all_datasets),step=10)) # ax[1].set_xlabel('B') # h = plt.gcf() # ax = plt.gca( ) ax[1].set_aspect(4) # h.tight_layout() # h = plt.gcf() # k.savefig("figs/bicluster_zeromean.pdf",dpi=100) # h2 = plt.figure(figsize=(10,3)) # ax = h.add_subplot(313) cluster_labels = np.outer(np.sort(model.row_labels_) + 1,np.sort(model.column_labels_) + 1) boundary = np.zeros((cluster_labels.shape[0],cluster_labels.shape[1])) for i,cr in enumerate(cluster_labels[1:]): for j,cc in enumerate(cr[1:]): if cluster_labels[i-1,j] != cluster_labels[i,j]: boundary[i,j] = 1 if cluster_labels[i,j-1] != cluster_labels[i,j]: boundary[i,j] = 1 tmp=ax[2].matshow(cluster_labels,cmap=plt.cm.Purples,alpha=1) # tmp = # ydata = [0,165,0,165,0,165] # tmp=ax[2].plot((0,165),(2.5,2.5)) # plt.gca().invert_yaxis() cbar=plt.colorbar(tmp,ax=ax[2],orientation='vertical',shrink=0.8) cbar.set_label('Bicluster ID') plt.yticks(range(len(all_models)), model_nice) #,rotation=90 ax[2].set_xlabel('Dataset',size=16) plt.xticks(np.arange(len(all_datasets),step=10)) # ax[2].set_xlabel('C') # h = plt.gcf() # ax = plt.gca( ) ax[2].set_aspect(4) h.tight_layout() # plt.subplots_adjust(top=0.95) #h.savefig("figs/cluster_all.pdf",dpi=100) #h.savefig("figs/cluster_all.eps",dpi=100) #h.savefig("figs/cluster_all.png",dpi=100) plt.show() j=0 for c in np.unique(cluster_labels[0,:]): print('cluster',c,':') for d in all_datasets[cluster_labels[0,:]==c]: # print('',j,":",d) print('"'+d+'"',end=',') j+=1 print(' ') ###Output _____no_output_____ ###Markdown How do the algorithms cluster?Create matrix of data sets vs. median model accuracy on those data sets.Cluster the matrix using Agglomerative Clustering. Look at the resulting dendrogram. ###Code import pandas as pd data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') # data = data.groupby(['classifier', 'dataset', 'parameters'])['accuracy'].mean().reset_index() import numpy as np from tqdm import tqdm from scipy.cluster import hierarchy import matplotlib.pyplot as plt %matplotlib inline all_models = np.asarray(sorted(data['classifier'].unique())) # remove SVC from all_models all_datasets = np.asarray(sorted(data['dataset'].unique())) model_data_acc = np.zeros([len(all_models),len(all_datasets)]) ranks = np.zeros([len(all_models),len(all_datasets)]) #print("model_data_acc.shape:",model_data_acc.shape) all_models = [] for i,(clf, group_clf) in enumerate(tqdm(data.groupby('classifier'))): # if clf != 'SVC': model_best_params_acc = np.zeros(len(all_datasets)) # find best parameter setings for model, based on median cv score for each parameter setting for params,group_clf_params in group_clf.groupby('parameters'): # across data sets for j,a in enumerate(group_clf_params.groupby('dataset')['accuracy'].median()): if a > model_best_params_acc[j]: model_best_params_acc[j] = a # model i's accuracy is the median cv accuracy of the best parameter settings for that model, across data sets model_data_acc[i,:] = model_best_params_acc all_models.append(clf) all_models = np.asarray(all_models) # get ranks for i,mda in enumerate(model_data_acc.transpose()): #print("mda shape:",mda.shape) temp = mda.argsort() ranks[temp,i] = np.arange(len(mda)) model_nice_dict = { 'AdaBoostClassifier': 'AdaBoost', 'BernoulliNB': 'Bernoulli NB', 'LinearSVC': 'Linear SVC', 'LogisticRegression': 'Logistic Regression', 'MultinomialNB': 'Multinomial NB', 'PassiveAggressiveClassifier': 'Passive Aggressive', 'SGDClassifier': 'SGD', 'GaussianNB': 'Gaussian NB', 'DecisionTreeClassifier': 'Decision Tree', 'ExtraTreesClassifier': 'Extra Trees', 'RandomForestClassifier': 'Random Forest', 'GradientBoostingClassifier':'Gradient Boosting', 'KNeighborsClassifier': 'K-Nearest Neighbor', 'SVC': 'SVC', 'XGBClassifier': 'XGBoost', 'LinearSVC': 'Linear Support Vector Machine' } model_nice = [] for m in all_models: model_nice.append(model_nice_dict[m]) print("clustering...") Z = hierarchy.linkage(ranks, 'single') dn = hierarchy.dendrogram(Z, labels = model_nice, orientation='right') plt.title('Ranking Dendrogram') h = plt.gcf() plt.figure(1,figsize=(6,4)) plt.tight_layout() #h.savefig('figs/HAC_models_ranking.pdf') plt.figure() Z = hierarchy.linkage(model_data_acc, 'single') dn = hierarchy.dendrogram(Z, labels = model_nice, orientation='right') plt.title('Accuracy Dendrogram') h = plt.gcf() plt.figure(1,figsize=(6,4)) plt.tight_layout() #h.savefig('figs/HAC_models_accuracy.pdf') plt.show() ###Output _____no_output_____ ###Markdown How do the data sets cluster? ###Code from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score, silhouette_samples from sklearn.decomposition import PCA import numpy from tqdm import tqdm #========== # optimal K via elbow method with silhouette score which produces a better elbow. #========== X = model_data_acc.transpose() Ks = np.arange(2,147,1) Inertias = [] Silhouettes = [] np.random.seed(2) # loop through k values for K in tqdm(Ks): km = KMeans(n_clusters=K, init='k-means++',copy_x=False).fit(X) labels = km.labels_ centers = km.cluster_centers_ inertia = km.inertia_ Silhouettes.append(silhouette_score(X,labels)) # Inertias[K-1] = km.inertia_ Inertias.append(km.inertia_) # line plot of K versus Silhouette score with best value marked with x plt.figure(1) plt.plot(Ks,Silhouettes,label='silhouette') plt.plot(Ks[np.argmax(Silhouettes)],Silhouettes[np.argmax(Silhouettes)],marker = 'o',color='r',markersize=7) plt.text(Ks[np.argmax(Silhouettes)]-2,Silhouettes[np.argmax(Silhouettes)],"K = "+repr(Ks[np.argmax(Silhouettes)])) plt.ylim(0.95*np.min(Silhouettes),1.05*np.max(Silhouettes)) plt.ylabel("Average silhouette score") #Y-axis label plt.xlabel("K") #X-axis label plt.title("Choice of K") #Plot title plt.tight_layout() plt.savefig("figs/k_silhouette.pdf") plt.figure(2) plt.plot(Ks,Inertias,label='inertia') plt.plot(Ks[np.argmin(Inertias)],Inertias[np.argmin(Inertias)],marker = 'o',color='r',markersize=7) plt.text(Ks[np.argmin(Inertias)]-2,Inertias[np.argmin(Inertias)],"K = "+repr(Ks[np.argmin(Inertias)])) plt.ylim(0.95*np.min(Inertias),1.05*np.max(Inertias)) plt.ylabel("Inertias") #Y-axis label plt.xlabel("K") #X-axis label plt.title("Choice of K") #Plot title plt.tight_layout() plt.savefig("figs/k_inertia.pdf") # ===== # plot cluster centers on 2 principal component axes # ===== from sklearn.decomposition import PCA from sklearn.cluster import KMeans import itertools from sklearn.preprocessing import StandardScaler marker =('+', 'x', 'o', '*','s','^','<','v','>') h = plt.figure() ss = StandardScaler() X = ss.fit_transform(model_data_acc.transpose()) pca = PCA(n_components = 2) X_pca = pca.fit_transform(X) nc=4 unique_classes = np.array((0,1,2,3)) km = KMeans(n_clusters=nc, init='k-means++',copy_x=False,random_state=0).fit(X) labels = km.labels_ centers = km.cluster_centers_ unique_labels = np.unique(labels) # centers_pca = centers centers_pca = pca.transform(centers) colors = plt.cm.Dark2(np.linspace(0, 1, len(unique_labels))) for k, col in zip(unique_labels, colors): label_mask = (k==labels) xy = X_pca[label_mask] plt.plot(xy[:,0], xy[:, 1], linestyle = '', marker=marker[k%nc], markerfacecolor=col, markersize=5, alpha=1) for k, col in zip(unique_labels, colors): plt.plot(centers_pca[k,0],centers_pca[k,1], linestyle='', marker=marker[k%nc], markerfacecolor=col,markersize=20,alpha=0.3) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.tight_layout() h.savefig('figs/k_means_PCA_data.pdf') h2 = plt.figure() features = model_nice for k,col in zip(unique_labels,colors): label_mask = (k==labels) coverage = np.sum(label_mask) xk_mean = np.mean(ss.inverse_transform(X[label_mask]),axis=0) offset = k*0.1-np.mean(np.unique(labels))*0.1 print('col:',col) plt.bar(np.arange(len(features))+offset,xk_mean,align='center',width=0.1,facecolor=col,label='cluster '+marker[k%nc]+' ('+str(coverage)+' instances)') plt.ylim(0,1.1) plt.gca().set_xticks(np.arange(len(features))) plt.gca().set_xticklabels(list(features),fontsize=8,rotation=90) plt.legend(loc=3,fontsize=6) #(bbox_to_anchor=(1.05, 1), plt.tight_layout() h2.savefig('figs/data_ml_bar.pdf') plt.show() ###Output _____no_output_____ ###Markdown Comparison of tuned to un-tuned results ###Code import numpy as np from tqdm import tqdm import pandas as pd data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['classifier', 'dataset', 'parameters'])['accuracy'].mean().reset_index() data['accuracy'] = data['accuracy'].apply(lambda x: round(x, 3)) clf_defaults_dict = { 'GradientBoostingClassifier': 'n_estimators=100,min_impurity_decrease=0.0,max_features=None,learning_rate=0.1,loss=deviance,random_state=324089', 'RandomForestClassifier': 'n_estimators=10,min_impurity_decrease=0.0,max_features=sqrt,criterion=gini,random_state=324089', 'SVC': 'C=1.0,gamma=auto,kernel=rbf,degree=2,coef0=0.0,random_state=324089', 'ExtraTreesClassifier': 'n_estimators=10,min_impurity_decrease=0.0,max_features=sqrt,criterion=gini,random_state=324089', 'KNeighborsClassifier': 'n_neighbors=5,weights=uniform', 'LogisticRegression': 'C=1.0,penalty=l2,fit_intercept=True,dual=False,random_state=324089', 'DecisionTreeClassifier': 'min_impurity_decrease=0.0,max_features=None,criterion=gini,random_state=324089', 'PassiveAggressiveClassifier': 'C=1.0,loss=hinge,fit_intercept=False,random_state=324089', 'AdaBoostClassifier': 'learning_rate=1.0,n_estimators=50,random_state=324089', 'BernoulliNB': 'alpha=1.0,fit_prior=True,binarize=0.0', 'GaussianNB': '', 'MultinomialNB': 'alpha=1.0,fit_prior=True', 'XGBClassifier': 'n_estimators=100,learning_rate=0.1,gamma=0.0,max_depth=3,subsample=1.0,seed=324089,nthread=1', 'LinearSVC': 'C=1.0,penalty=l2,fit_intercept=True,dual=True,random_state=324089' } default_params_list = ['-'.join([k, v]) for k, v in clf_defaults_dict.items()] default_scores = data.loc[ data.apply( lambda record: '-'.join([record['classifier'], record['parameters']]) in default_params_list, axis=1)].drop('parameters', axis=1) best_scores = data.groupby(['dataset', 'classifier'])['accuracy'].max().reset_index() merged_scores = default_scores.merge(best_scores, on=['classifier', 'dataset'], suffixes=['_default', '_best']) merged_scores['accuracy_default_scaled'] = merged_scores['accuracy_best'] - merged_scores['accuracy_default'] model_names_dict = { 'AdaBoostClassifier': 'AdaBoost', 'BernoulliNB': 'Bernoulli Naive Bayes', 'LogisticRegression': 'Logistic Regression', 'MultinomialNB': 'Multinomial Naive Bayes', 'PassiveAggressiveClassifier': 'Passive Aggressive', 'GaussianNB': 'Gaussian Naive Bayes', 'DecisionTreeClassifier': 'Decision Tree', 'ExtraTreesClassifier': 'Extra Random Forest', 'RandomForestClassifier': 'Random Forest', 'GradientBoostingClassifier':'Gradient Tree Boosting', 'KNeighborsClassifier': 'K-Nearest Neighbors', 'SVC': 'Support Vector Machine', 'XGBClassifier': 'XGBoost', 'LinearSVC': 'Linear Support Vector Machine' } merged_scores['classifier'] = merged_scores['classifier'].apply(lambda x: model_names_dict[x]) merged_scores.head() %matplotlib inline import seaborn as sb import matplotlib.pyplot as plt sb.set_style('whitegrid') plt.figure(figsize=(9, 12)) sb.boxplot(data=merged_scores.sort_values('classifier'), y='classifier', x='accuracy_default_scaled', notch=True, palette=[sb.color_palette('Purples', n_colors=2)[1]]) plt.ylabel('') plt.xlabel('10-fold CV accuracy improvement by tuning models', fontsize=16) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.xlim(0., 0.5) #plt.title('Tuning machine learning model parameters almost always improves\nmodel performance', fontsize=22) #plt.savefig('figs/tuned_untuned_accuracy_boxplot.pdf', bbox_inches='tight') ; ###Output _____no_output_____ ###Markdown print model abbreviation table ###Code model_nice_dict = { 'AdaBoostClassifier': 'AB', 'BernoulliNB': 'BNB', 'LinearSVC': 'LSVC', 'LogisticRegression': 'LR', 'MultinomialNB': 'MNB', 'PassiveAggressiveClassifier': 'PAC', 'SGDClassifier': 'SGD', 'GaussianNB': 'GNB', 'DecisionTreeClassifier': 'DT', 'ExtraTreesClassifier': 'ET', 'RandomForestClassifier': 'RF', 'GradientBoostingClassifier':'GB', 'KNeighborsClassifier': 'KNN', 'XGBClassifier': 'XGBoost', 'LinearSVC': 'Linear Support Vector Machine' } model_nice = [] for m in model_nice_dict: print(model_nice_dict) import pandas as pd data = pd.read_csv('sklearn-benchmark6-data.tsv.gz', sep='\t', names=['dataset', 'classifier', 'parameters', 'accuracy', 'macrof1', 'bal_accuracy']).fillna('') data = data.groupby(['dataset','classifier'])['bal_accuracy'].max().reset_index() print(data[:5]) import seaborn as sns datan = data.groupby(['dataset']).max().sort_values('bal_accuracy',ascending=False) # print(datan) datamv = data.groupby(['dataset']).median()+data.groupby(['dataset']).var() datamv = datamv.sort_values('bal_accuracy',ascending=True) # for i in datamv.index: # print(data[i,'bal_accuracy']) print(datamv[::2]) # print(datan.index) print(data['dataset'][:5]) plt.figure(figsize=(10,5)) sns.set_style("whitegrid") s=sns.boxplot(data=data,x='dataset',y='bal_accuracy',order=datan.index,fliersize=3,linewidth=0.75) s.set_xticks(np.arange(len(np.unique(data['dataset'])),step=10)) s.set_xticklabels(np.arange(len(np.unique(data['dataset'])),step=10)) yticks = np.hstack((np.arange(0.6,step=0.1),np.arange(0.6,1.05,step=0.05))) s.set_yticks(yticks) s.set_yticklabels(['{0:.2f}'.format(x) for x in yticks],size=9) plt.ylim(0,1.1) plt.ylabel('Balanced Accuracy',size=16) plt.xlabel('Dataset',size=16) h = plt.gcf() h.tight_layout() #h.savefig('figs/boxplot_all.pdf',bbox_inches='tight') #h.savefig('figs/boxplot_all.png',bbox_inches='tight') print('90% cutoff:',len(datan[datan['bal_accuracy']>=0.9])) print('80% cutoff:',len(datan[datan['bal_accuracy']>=0.8])) print('70% cutoff:',len(datan[datan['bal_accuracy']>=0.7])) print('60% cutoff:',len(datan[datan['bal_accuracy']>=0.6])) # for i,d in enumerate(datan.index): # print('data set ',i,':',data['dataset'][d]) # plt.gca().set_xticks(np.arange(len(data),step=10)) # plt.gca().set_xticklabels(str(np.arange(len(data),step=10))) cutoff = np.empty(100) for i in np.arange(100): cutoff[i] = len(datan[datan['bal_accuracy']>=i/100])/len(datan) plt.bar(np.arange(len(cutoff)),cutoff) plt.xlim(50,100) plt.show() 149./165 ###Output _____no_output_____
docs/contents/Working_with_OpenMM.ipynb
###Markdown Working with OpenMM ###Code import numpy as np import matplotlib.pyplot as plt from simtk import unit from simtk import openmm as omm from simtk.openmm import app import molsysmt as msm ###Output _____no_output_____
Building Good Training Dataset.ipynb
###Markdown Import Libraries ###Code import pandas as pd import numpy as np ###Output _____no_output_____ ###Markdown Read Dataset ###Code df=pd.read_csv("Iris-f.csv") df.head() ###Output _____no_output_____ ###Markdown Initial Inspection ###Code df.shape df.info() df.describe() ###Output _____no_output_____ ###Markdown Display of null values ###Code print(df[df.isna().any(axis=1)]) df1=df.copy(deep=True) df1.isna().sum() ###Output _____no_output_____ ###Markdown Dropping of Rows/Columns ###Code df1.dropna(axis=1,thresh=2,inplace=True) # inplace to overwrite in same record axis=0 - drop rows df1.isna().sum() df1.shape df.isna().sum() df2=df.copy(deep=True) df2.isna().sum() ###Output _____no_output_____ ###Markdown Imputing Missing Values ###Code m=df2["PetalLengthCm"].median() df2["PetalLengthCm"]=df2["PetalLengthCm"].fillna(m) df2.iloc[[20,37],:] df=pd.read_csv("Iris-f.csv") from sklearn.impute import SimpleImputer imp=SimpleImputer(missing_values=np.nan,strategy='mean') df_n=df.iloc[:,0:4] imputed=imp.fit_transform(df_n) imputed names=["sepal_len","sepal_width","petal_len","petal_width"] df3=pd.DataFrame(imputed,columns=names) df3["Species"]=df["Species"].fillna(method="bfill") df3.iloc[[28],:] df4=df3.copy(deep=True) df4.shape df4.isna().sum() ###Output _____no_output_____ ###Markdown Handling Categorical Data ###Code df4["Species"].value_counts() mapping={"Iris-setosa":3, "Iris-virginica":2, "Iris-versicolor":1} df4["Species"]=df4["Species"].map(mapping) df4.head() from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(df3['Species']) y ###Output _____no_output_____ ###Markdown Inverse Encoding ###Code actual=le.inverse_transform(y) actual ###Output _____no_output_____ ###Markdown One hot Encoding ###Code from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() X=df3[["Species"]] ohe.fit_transform(X).toarray() ###Output _____no_output_____ ###Markdown Dummies ###Code pd.get_dummies(df['Species']) ###Output _____no_output_____ ###Markdown Feature Scaling ###Code from sklearn.preprocessing import MinMaxScaler x=df4.iloc[:,0:5] mmscaler = MinMaxScaler() x_scaled = mmscaler.fit_transform(x) x_scaled ###Output _____no_output_____ ###Markdown Feature Selection ###Code from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2,f_regression y=df4['Species'] # numeric predictor variable so, use f_regression in selectKbest #if it is categorical use chi2 X=df4.iloc[:,0:4] selectk = SelectKBest(chi2, k=2) X_new=selectk.fit_transform(X, y) # to get the column indices cols = selectk.get_support(indices=True) #to get the dataframe with new features new_df = df4.iloc[:,cols] ###Output _____no_output_____ ###Markdown Feature Extraction ###Code df_p=pd.read_csv("Iris-f.csv") df_p.fillna(method="bfill",inplace=True) x=df_p.iloc[:,0:4] from sklearn.decomposition import PCA pca=PCA(n_components=2) x_new=pca.fit_transform(x) new_df=pd.DataFrame(x_new,columns=["comp1","comp2"]) new_df.head() ###Output _____no_output_____ ###Markdown Train Test Split - Sampling ###Code from sklearn.model_selection import train_test_split X=df3.iloc[:,0:4] y=df3['Species'] X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,random_state=1,shuffle=True) X_train.shape X_test.shape df3.shape y_train.value_counts() ###Output _____no_output_____ ###Markdown Stratified Sampling - Equal number of samples on each class ###Code X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,random_state=1,stratify=y) y_train.value_counts() ###Output _____no_output_____
2D Analysis/AnalysisRunComp.ipynb
###Markdown Compare particle runsIn this notebook, I compare the different Parcels output files, with the primary goal of analysing what the most efficient way is to decrease the number of particles 'stuck' on the coral. All output files are preceded by `'d2c'`, so I search the output folders for files containing this prefix, read the files and extract the metadata into a pandas dataframe. This dataframe can then be used for analysis. ###Code import pandas as pd import re import xarray as xr import os import numpy as np import matplotlib.pyplot as plt import seaborn as sns import matplotlib as mpl from matplotlib.colors import ListedColormap import cmocean plt.style.use('ggplot') sdata = data.where(data['finaldistance'][:,-1]==0,drop=True) bdata = data.where(data['closestobject'][:,-1]==len(objects),drop=True) fig = plt.figure() ax = plt.axes(xticks=[],yticks=[]) ax.set_title('Dashboard '+dfilename) ax.text(0.05,0.95,'Flow: '+flow) ax.text(0.05,0.9,'Time direction: '+fb) ax.text(0.05,0.85,'Runtime: '+str(runtime.seconds)+' seconds') ax.text(0.05,0.8,'Execution dt: '+str(dt.total_seconds())+' seconds') ax.text(0.05,0.75,'Output dt: '+str(outputdt.total_seconds())+' seconds') ax.text(0.05,0.7,'dx/dy: '+str(dx)+' m') ax.text(0.05,0.65,'Number of particles released: '+str(len(data['traj']))) ax.text(0.05,0.6,'Number of particles beached: '+str(len(bdata['traj']))) ax.text(0.05,0.55,'Percentage of particles beached: '+str(np.round(len(bdata['traj'])/len(data['traj'])*100,1))+' %') # plt.savefig('Figures/Dashboard-'+flow+fb) ###Output _____no_output_____ ###Markdown Run the next cell to create the dataframe and save it using pandas' `to_pickle`. If you want to analyse a saved dataframe, skip this cell. ###Code output = pd.DataFrame(columns=['flow','dx','time direction','dt','runtime','execution time','output dt','#particles','#stuck','%stuck','#beached','%beached','beaching scheme']) foldernames = ['16objects','21objects'] for folder in foldernames: objects = np.load(folder+'/preprocessed/objects.npy') for file in os.listdir(folder+'/pfiles'): data = xr.open_dataset(folder+'/pfiles/'+file) sdata = data.where(data['finaldistance'][:,-1]==0,drop=True) bdata = data.where(data['closestobject'][:,-1]==len(objects),drop=True) filelist = file.split('-') flow = filelist[-3] fb = filelist[-1][:-3] filename = flow+'.nc' flowdata = xr.open_dataset(folder+'/'+filename) dx = flowdata['X'][1]-flowdata['X'][0] dx = dx.values outputdtlist = data.attrs['outputdt'].split() outputdt = float(outputdtlist[0]) dtlist = data.attrs['dt'].split() dt = float(dtlist[0]) runtimelist = data.attrs['runtime'].split() runtime = float(runtimelist[0]) if 'executiontime' in data.attrs: exectimelist = data.attrs['executiontime'].split() exectime = float(exectimelist[0]) else: exectime = None if 'beaching_strategy' in data.attrs: strategy = data.attrs['beaching_strategy'] else: strategy = 0 df = pd.DataFrame([[flow,dx,fb,dt,runtime,exectime,outputdt,len(data['traj']),len(sdata['traj']),np.round(len(sdata['traj'])/len(data['traj'])*100,2),len(bdata['traj']),np.round(len(bdata['traj'])/len(data['traj'])*100,2),strategy]],columns=['flow','dx','time direction','dt','runtime','execution time','output dt','#particles','#stuck','%stuck','#beached','%beached','beaching scheme']) frames = [output,df] output = pd.concat(frames) output.to_pickle('RunsComp.pkl') output = pd.read_pickle('RunsComp.pkl') dx.values # comp = output.where(abs(output['dt']) == 0.01) # comp = output.where(output['flow'] == 'waveparabolic') # output = comp.dropna() output my_cmap = ListedColormap(['dodgerblue','g','purple','darkorange','red','purple']) %matplotlib qt fig = plt.figure() ax = plt.axes() ax.set_title('Boundary conditions') ax.set_ylabel('Particles stuck [%]') ax.set_xlabel('Boundary condition') bp0 = ax.scatter([0]*len(output['%beached'][output['beaching scheme']==0]),output['%beached'][output['beaching scheme']==0],c = [my_cmap(0)],alpha=0.7,label='0',edgecolors='k') bp1 = ax.scatter([1]*len(output['%beached'][output['beaching scheme']==1]),output['%beached'][output['beaching scheme']==1],c = [my_cmap(1)],alpha=0.7,label='1',edgecolors='k') bp2 = ax.scatter([2]*len(output['%beached'][output['beaching scheme']==2]),output['%beached'][output['beaching scheme']==2],c = [my_cmap(2)],alpha=0.7,label='2',edgecolors='k') ax.set_xticks([0,1,2]) ax.set_xticklabels([0,1,2]) plt.savefig('Figures/boundary_conditions') %matplotlib qt dx_cmap = plt.get_cmap('cmo.matter',len(np.unique(output['beaching scheme']))) fig = plt.figure() ax = plt.axes() ax.set_title('Beaching') ax.set_ylabel('Particles beached [%]') ax.set_xlabel('dt [s]') sc0 = ax.scatter(output['dt'][output['beaching scheme']==0],output['%beached'][output['beaching scheme']==0],c = [my_cmap(0)],alpha=0.7,label='0',edgecolors='k') sc1 = ax.scatter(output['dt'][output['beaching scheme']==1],output['%beached'][output['beaching scheme']==1],c = [my_cmap(1)],alpha=0.7,label='1',edgecolors='k') sc2 = ax.scatter(output['dt'][output['beaching scheme']==2],output['%beached'][output['beaching scheme']==2],c = [my_cmap(2)],alpha=0.7,label='2',edgecolors='k',s=50,marker='^') sc3 = ax.scatter(output['dt'][output['beaching scheme']==3],output['%beached'][output['beaching scheme']==3],c = [my_cmap(3)],alpha=0.7,label='3',edgecolors='k') # sc4 = ax.scatter(output['dt'][output['beaching scheme']==4],output['%beached'][output['beaching scheme']==4],c = [my_cmap(4)],label='4',edgecolors='k') # sc5 = ax.scatter(output['dt'][output['beaching scheme']==5],output['%beached'][output['beaching scheme']==5],c = [my_cmap(5)],label='5',edgecolors='k') ax.legend(title = 'beaching scheme') ax.set_xscale('log') ax.set_xlim(left=0.00005) ax.set_ylim(top=10) # ax.set_yscale('log') plt.savefig('Figures/runcomp-beaching-dtdx') %matplotlib qt fig = plt.figure() ax = plt.axes() ax.set_title('Stuck') ax.set_ylabel('Particles stuck [%]') ax.set_xlabel('dt [s]') sc0 = ax.scatter(output['dt'][output['beaching scheme']==0],output['%stuck'][output['beaching scheme']==0],c = [my_cmap(0)],alpha=0.7,label='0',edgecolors='k') sc1 = ax.scatter(output['dt'][output['beaching scheme']==1],output['%stuck'][output['beaching scheme']==1],c = [my_cmap(1)],alpha=0.7,label='1',edgecolors='k') sc2 = ax.scatter(output['dt'][output['beaching scheme']==2],output['%stuck'][output['beaching scheme']==2],c = [my_cmap(2)],alpha=0.7,label='2',edgecolors='k',s=50,marker='^') sc3 = ax.scatter(output['dt'][output['beaching scheme']==3],output['%stuck'][output['beaching scheme']==3],c = [my_cmap(3)],alpha=0.7,label='3',edgecolors='k') # sc4 = ax.scatter(output['dt'][output['beaching scheme']==4],output['%stuck'][output['beaching scheme']==4],c = [my_cmap(4)],label='4',edgecolors='k') ax.legend(title='beaching scheme',loc = 'upper center') ax.set_xscale('log') ax.set_xlim(left=0.00005) ax.set_ylim(bottom=0) ax.set_ylim(top=10) # plt.savefig('Figures/runcomp-stuck-dtdx') %matplotlib qt fig = plt.figure() ax = plt.axes() ax.set_title('Beached vs Stuck') ax.set_ylabel('Particles beached [%]') ax.set_xlabel('Particles stuck [%]') x = np.linspace(0,5) y = x ax.plot(x,y,label='beached = stuck') sc0 = ax.scatter(output['%stuck'][output['beaching scheme']==0],output['%beached'][output['beaching scheme']==0],c = [my_cmap(0)],edgecolors='k',alpha=0.7,label='0') sc1 = ax.scatter(output['%stuck'][output['beaching scheme']==1],output['%beached'][output['beaching scheme']==1],c = [my_cmap(1)],edgecolors='k',alpha=0.7,label='1') sc2 = ax.scatter(output['%stuck'][output['beaching scheme']==2],output['%beached'][output['beaching scheme']==2],c='w',edgecolors='k',alpha=0.7,label='2') sc3 = ax.scatter(output['%stuck'][output['beaching scheme']==3],output['%beached'][output['beaching scheme']==3],c = [my_cmap(3)],edgecolors='k',alpha=0.7,label='3') # sc4 = ax.scatter(output['%stuck'][output['beaching scheme']==4],output['%beached'][output['beaching scheme']==4],c='yellow',edgecolors='k',alpha=0.7,label='4') # ax.set_ylim(bottom=0) # ax.set_xlim(left=0) ax.legend(title = 'beaching scheme') # plt.savefig('Figures/runcomp-beached-vs-stuck') %matplotlib qt fig = plt.figure() ax = plt.axes() ax.set_title('Beached vs Stuck') ax.set_ylabel('Particles beached/stuck [%]') ax.set_xlabel('Beaching scheme') sc0 = ax.scatter(output['beaching scheme'][output['dt']==0.01],output['%beached'][output['dt']==0.01],c = 'orangered',edgecolors='k',alpha=0.7,label='0.01') sc1 = ax.scatter(output['beaching scheme'][output['dt']==0.001],output['%beached'][output['dt']==0.001],c = 'deepskyblue',edgecolors='k',alpha=0.7,label='0.001') sc2 = ax.scatter(output['beaching scheme'][output['dt']==0.0001],output['%beached'][output['dt']==0.0001],c = 'greenyellow',edgecolors='k',alpha=0.7,label='0.0001') # sc0 = ax.scatter(output['beaching scheme'][output['dt']==0.01],output['%stuck'][output['dt']==0.01],c = 'orangered',marker='^',edgecolors='k',alpha=0.7,label='0') # sc1 = ax.scatter(output['beaching scheme'][output['dt']==0.001],output['%stuck'][output['dt']==0.001],c = 'deepskyblue',marker='^',edgecolors='k',alpha=0.7,label='1') ax.legend(title = 'dt') ###Output _____no_output_____
week10_IP.ipynb
###Markdown Group Members* Vanessa Ng'eno* Panchol Alier* Noah Kandie* Mildred Kulei Research QuestionThis analysis aims to develop a prediction model to determine whether or not a tweet is disaster tweet or it is not base on the keyword and the messege tweeted Metric of SuccessThe model will be consider successiful if it predict with at least 80% accuracy The overall project will be considered a success when we are able to develop a reliable prediction model that has a high accuracy level. We shall make sure the model doesn't suffer from any instances of overfitting or underfitting Context This dataset contains 7613 tweets extracted using the twitter api . The tweets have been annotated (0 = not a disaster, 1 = disaster) and they can be used to detect if a tweet is about a disaster or not. Content It contains the following 6 fields:* target: the polarity of the tweet (0 = negative, 1 = positive)* ids: The id of the tweet ( 2087)* location: the location of where the tweeted* keyword: a keyword extracted from the text column* text: the text of the tweet (Lyx is cool) Experimental Design and Implementation The following steps that will be used in this analysis in order to help in acheiving the intended goalLoad data and preview preliminary characteristicsData CleaningEDAImplementing the SolutionFindings and recomendation ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import r2_score from sklearn.preprocessing import StandardScaler from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.metrics.pairwise import cosine_similarity import heapq from collections import Counter from nltk.corpus import stopwords from nltk.util import ngrams from sklearn.feature_extraction.text import CountVectorizer from collections import defaultdict from collections import Counter plt.style.use('ggplot') import re from nltk.tokenize import word_tokenize import gensim import string from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from tqdm import tqdm from keras.models import Sequential from keras.layers import Embedding,LSTM,Dense,SpatialDropout1D from keras.initializers import Constant from sklearn.model_selection import train_test_split from keras.optimizers import Adam import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix, classification_report import matplotlib.pyplot as plt from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.utils import np_utils from keras.models import Sequential from keras import layers from keras.wrappers.scikit_learn import KerasClassifier from sklearn import metrics import sklearn.metrics as metrics from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.svm import SVC,LinearSVC # Reading the data from the csv file data_train = pd.read_csv('train.csv') data_test = pd.read_csv('test.csv') data_train data_test ###Output _____no_output_____ ###Markdown Reading the data ###Code # Reading the data from the csv file data_test = pd.read_csv('train.csv') data_test ###Output _____no_output_____ ###Markdown Checking the data ###Code # Checking the top 5 records data_train.head(5) data_train.tail(5) data_train.loc[1] ###Output _____no_output_____ ###Markdown 1 - real disaster0 - no disaster ###Code # Confirming number of records and columns data_train.shape # Checking the attributes data type data_train.info() # Checking number of unique variables for each column data_train.nunique() ###Output _____no_output_____ ###Markdown Data Cleaning ###Code # Checking for duplicates data_train.duplicated().any() # Check for missing data data_train.isnull().sum() data_train = data_train.drop(columns=['id']) # Drop remaining null values data_train = data_train.dropna() data_train data_train['chars_num'] = data_train['text'].apply(lambda t: len(t)) data_train #reoving the URL links example="New competition launched :https://www.kaggle.com/c/nlp-getting-started" def remove_URL(text): url = re.compile(r'https?://\S+|www\.\S+') return url.sub(r'',text) remove_URL(example) data_train['text']=data_train['text'].apply(lambda x : remove_URL(x)) data_train # Checking the shape of the new dataset data_train.shape #removing all the panctuation marks in 'text' column def remove_punct(text): table=str.maketrans('','',string.punctuation) return text.translate(table) example="I am a #king" print(remove_punct(example)) data_train['text']=data_train['text'].apply(lambda x : remove_punct(x)) #confirming that the punctuation marks have been removed. data_train #removing punctuation arks in 'location' column def remove_punct(location): table=str.maketrans('','',string.punctuation) return location.translate(table) example="I am a #king" print(remove_punct(example)) data_train['location']=data_train['location'].apply(lambda x : remove_punct(x)) data_train #putting the texts characters to lower case data_train['text'] = data_train['text'].map(lambda x: x.lower()) #lowering the characters in location column data_train['location'] = data_train['location'].map(lambda x: x.lower()) data_train #function to remove emojis def remove_emoji(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) remove_emoji("Omg another Earthquake 😔😔") #removing the emojis in text column if any. data_train['text']=data_train['text'].apply(lambda x: remove_emoji(x)) data_train ## Countplot for the target variable Not disaster and not disaster plt.figure(figsize = (30,8)) sns.countplot(data_train["keyword"]) plt.title(" Bar Chart of keyword") plt.xticks(rotation=90) plt.show() ###Output /usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. FutureWarning ###Markdown The most used keyword was "collision" ###Code data_train['keyword'].value_counts() sns.countplot(data_train['keyword'], color='blue',saturation=0.5,order=data_train['keyword'].value_counts(sort=True).iloc[:10].index) plt.xticks(rotation=90) plt.xlabel('keywords in the tweet') plt.ylabel('Count') #summary statistics data_train.describe() #Skewness data_train.skew() #checking for unique status in the dataset data_train['keyword'].unique() data_train['target'].unique x=data_train.target.value_counts() sns.barplot(x.index,x) plt.gca().set_ylabel('samples') classes = data_train.target.unique() counts = [] for i in classes: count = len(data_train[data_train.target==i]) counts.append(count) plt.bar(['disaster', 'not disaster'], counts) plt.show() # Get the top 10 keywords data_train['keyword'].value_counts(sort=True).head(10) # plotting bar charts to check distributions fig,ax=plt.subplots(1,2,figsize=(21,7)) sns.countplot('target',data=data_train,ax=ax[0]) ax[0].set_title('Target Status') sns.countplot('keyword',data=data_train,ax=ax[1],orient='ho',order=data_train['keyword'].value_counts(sort=True).iloc[:10].index) ax[1].set_title('Keyword Status') plt.show() #ploting bar charts of the tweet's characters fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5)) tweet_len=data_train[data_train['target']==1]['text'].str.len() ax1.hist(tweet_len,color='red') ax1.set_title('disaster tweets') tweet_len=data_train[data_train['target']==0]['text'].str.len() ax2.hist(tweet_len,color='green') ax2.set_title('Not disaster tweets') fig.suptitle('Characters in tweets') plt.show() # Ploting the summaries and recording observations sns.pairplot(data_train) plt.show() #There is no relationship between target and chars_num ###Output _____no_output_____ ###Markdown Tensor flow NN ModelingTo start modelling we still need to do a couple of things:1. Split data into train and test datasets2. Tokenize it3. Pad the text4. Encode the labels ###Code # We will also download and import nlkt which is a tokenizer. # This library will help us break (messages) into individual linguistic units i.e. words. # import nltk nltk.download('punkt') # Pre-processing # Tokenizing the messages into into single words using nltk. # Applying the tokenization # data_train['text']= data_train['text'].apply(nltk.word_tokenize) data_train[:3] # We will perform some word stemming. # The idea of stemming is to normalize our text for all variations of words carry the same meaning, # regardless of the tense. One of the most popular stemming algorithms is the Porter Stemmer: # from nltk.stem import PorterStemmer stemmer = PorterStemmer() data_train['text']= data_train['text'].apply(lambda x: [stemmer.stem(y) for y in x]) # Finally, we will transform the data into occurrences, # which will be the features that we will feed into our model # from sklearn.feature_extraction.text import CountVectorizer # This converts the list of words into space-separated strings data_train['text'] = data_train['text'].apply(lambda x: ' '.join(x)) count_vect = CountVectorizer() counts = count_vect.fit_transform(data_train['text']) # We could leave it as the simple word-count per message, but it is better to use Term Frequency Inverse Document Frequency, more known as tf-idf # from sklearn.feature_extraction.text import TfidfTransformer transformer = TfidfTransformer().fit(counts) counts = transformer.transform(counts) counts # Training the Model # Now that we have performed feature extraction from our data, it is time to build our model. # We will start by splitting our data into training and test sets # from sklearn.model_selection import train_test_split X_train, x_test, y_train, y_test = train_test_split(data_train['text'], data_train['target'], test_size=0.1, random_state=69) x_tr,x_val,y_tr,y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=69,shuffle=True) sample_size = int(len(data_train)*1) sampleDf = data_train.sample(sample_size, random_state=23) x = sampleDf.text.values y = sampleDf.target.values x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=32) data_train.shape x_train.shape,x_test.shape #turning a meaningful piece of tweet character, into a random string of characters called a token tokenizer = Tokenizer() tokenizer.fit_on_texts(x_train) X_train = tokenizer.texts_to_sequences(x_train) X_test = tokenizer.texts_to_sequences(x_test) vocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index print(x_train[0]) print(X_train[0]) lens_train = [len(i) for i in X_train] lens_test = [len(i) for i in X_test] lens = lens_train + lens_test maxlen = np.max(lens) print('Max len:', maxlen) X_train = pad_sequences(X_train, padding='post', maxlen=maxlen) X_test = pad_sequences(X_test, padding='post', maxlen=maxlen) encoder = LabelEncoder() encoder.fit(y) encoded_Y_test = encoder.transform(y_test) encoded_Y_train = encoder.transform(y_train) # convert integers to dummy variables (i.e. one hot encoded) dummy_y_test = np_utils.to_categorical(encoded_Y_test) dummy_y_train = np_utils.to_categorical(encoded_Y_train) ###Output _____no_output_____ ###Markdown ModellingThe model that we use here is a simple Neural Network. ###Code embedding_dim = 100 # initiates the model model = Sequential() model.add(layers.Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=maxlen)) model.add(layers.Dropout(0.2)) model.add(layers.GlobalMaxPool1D()) model.add(layers.Dropout(0.2)) model.add(layers.Dense(50, activation='relu')) # additional hidden layer model.add(layers.Dropout(0.2)) model.add(layers.Dense(50, activation='relu')) model.add(layers.Dropout(0.2)) model.add(layers.Dense(2, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model2=model # Training the dataset history = model.fit(X_train, dummy_y_train, epochs=5, # times model will run through the data verbose=True, validation_data=(X_test, dummy_y_test), batch_size=128) # data is set to batches we are sent to the model to predict, imagine each batc as a step in which model tries to predict the class and then checks the right answer and corrects it's weights with backpropogation loss, accuracy = model.evaluate(X_train, dummy_y_train, verbose=False) print("Training Accuracy: {:.4f}".format(accuracy)) loss, accuracy = model.evaluate(X_test, dummy_y_test, verbose=False) print("Testing Accuracy: {:.4f}".format(accuracy)) import tensorflow as tf import datetime ###Output _____no_output_____ ###Markdown ###Code rm -rf ./logs/ ###Output _____no_output_____ ###Markdown Plot the model training logs ###Code plt.style.use('ggplot') def plot_history(history): acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] x = range(1, len(acc) + 1) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(x, acc, 'b', label='Training acc') plt.plot(x, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.subplot(1, 2, 2) plt.plot(x, loss, 'b', label='Training loss') plt.plot(x, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.legend() plot_history(history) ###Output _____no_output_____ ###Markdown Essentially the point where training crosses validation you would want to stop training as it means that model is starting to remember exact things instead of learning patterns and therefore cause overfitting, which here is at 3rd epoch. ###Code # Training the dataset epoch3= model2.fit(X_train, dummy_y_train, epochs=3, # times model will run through the data verbose=True, validation_data=(X_test, dummy_y_test), batch_size=128) # data is set to batches we are sent to the model to predict, imagine each batc as a step in which model tries to predict the class and then checks the right answer and corrects it's weights with backpropogation loss, accuracy1 = model2.evaluate(X_train, dummy_y_train, verbose=False) print("Training Accuracy: {:.4f}".format(accuracy1)) loss, accuracy1 = model2.evaluate(X_test, dummy_y_test, verbose=False) print("Testing Accuracy: {:.4f}".format(accuracy1)) # # Visualize the logs using tensorboard. # %load_ext tensorboard # %tensorboard --logdir lightning_logs/ # #%tensorboard --logdir logs/fit #these are the encoded classes. encoder.classes_ ###Output _____no_output_____ ###Markdown Confussion matrix ###Code #And our predictions are stored as arrays with probabilities for each class. y_pred = model.predict(X_test) y_pred y_predDecoded = [encoder.classes_[np.argmax(i)] for i in y_pred] # here we get the max probability from those arrays and then based on that select which class is it. cm = confusion_matrix(y_test, y_predDecoded, labels=data_train.target.unique()) # same confusion matrix code as in Logistic Regression df_cm = pd.DataFrame(cm, index=data_train.target.unique(), columns=data_train.target.unique()) df_cm ###Output _____no_output_____ ###Markdown We can classify correctly 816 tweets while 200 tweets were not classified correctly with a 87% accuracy. Implementating the other models for comparison ###Code from xgboost import plot_importance from xgboost.sklearn import XGBClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.naive_bayes import BernoulliNB ###Output _____no_output_____ ###Markdown Naive Bayes modelsBernoulli Naive Bayes ###Code # Creating our Bernoulli Naive Bayes object with prior probabilities of each class model = BernoulliNB() # Train model model.fit(X_train, y_train) # model score model.score(X_train, y_train) # Predict ypredb = model.predict(X_test) # Accuracy Summary from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, ypredb)) print(classification_report(y_test, ypredb)) ###Output [[244 331] [151 290]] precision recall f1-score support 0 0.62 0.42 0.50 575 1 0.47 0.66 0.55 441 accuracy 0.53 1016 macro avg 0.54 0.54 0.52 1016 weighted avg 0.55 0.53 0.52 1016 ###Markdown recored an accuracy of 53% for Bernoulli Random Forest classifier ###Code # Create a random forest classifier forest = RandomForestClassifier(n_estimators = 100, random_state=42, min_samples_split = 20, max_depth=5) # Fitting the model forest = forest.fit(X_train, y_train) # Predict based on the model we've trained y_pred = forest.predict(X_test) # Comparing a sample of the prediction comparison_frame = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()}) print(comparison_frame.sample(n=5)) print(comparison_frame.describe()) # Assessing model accuracy #print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) confusion_matrix = confusion_matrix(y_test, y_pred) print(confusion_matrix) ###Output Mean Squared Error: 0.33661417322834647 Root Mean Squared Error: 0.5801846027156757 Accuracy: 0.6633858267716536 [[499 76] [266 175]] ###Markdown Naive Bayes model GNG ###Code # Importing from sklearn.naive_bayes import GaussianNB # Trainin our model model = GaussianNB() model.fit(X_train, y_train) # prediction predicted = model.predict(X_test) # Accuracy from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, predicted)) print(classification_report(y_test, predicted)) ###Output [[ 22 553] [ 13 428]] precision recall f1-score support 0 0.63 0.04 0.07 575 1 0.44 0.97 0.60 441 accuracy 0.44 1016 macro avg 0.53 0.50 0.34 1016 weighted avg 0.55 0.44 0.30 1016 ###Markdown Gaussian recored an accuracy of 44% KNN Model ###Code # Training the model classifier = KNeighborsClassifier(n_neighbors=5) classifier.fit(X_train, y_train) # Making predictions y_pred1 = classifier.predict(X_test) # Comparing a sample of the prediction comparison_frame = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred1.flatten()}) print(comparison_frame.sample(n=5)) print(comparison_frame.describe()) # Evaluating the model print(confusion_matrix(y_test, y_pred1)) print(classification_report(y_test, y_pred1)) ###Output [[351 224] [226 215]] precision recall f1-score support 0 0.61 0.61 0.61 575 1 0.49 0.49 0.49 441 accuracy 0.56 1016 macro avg 0.55 0.55 0.55 1016 weighted avg 0.56 0.56 0.56 1016 ###Markdown The classifer didn't do the good job. Optimazing the model by using LDA ###Code lda = LDA(n_components=2) X_train = lda.fit_transform(X_train, y_train) X_test = lda.transform(X_test) # Training and Making Predictions classifier = KNeighborsClassifier(n_neighbors=5) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) # Comparing a sample of the prediction comparison_frame = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()}) print(comparison_frame.sample(n=5)) print(comparison_frame.describe()) # Evaluating the model print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) print('Accuracy: ' + str(accuracy_score(y_test, y_pred))) ###Output [[356 219] [239 202]] precision recall f1-score support 0 0.60 0.62 0.61 575 1 0.48 0.46 0.47 441 accuracy 0.55 1016 macro avg 0.54 0.54 0.54 1016 weighted avg 0.55 0.55 0.55 1016 Accuracy: 0.5492125984251969 ###Markdown The LDA optimazation did not improve the model accuracy We shall try cosine similarity ###Code # Defining cosine similarity function def cos_knn(k, X_test, y_test, X_train, y_train): # find similarity for every point in test data between every other point in the train data cosim = cosine_similarity(X_test, X_train) # get indices of records in the train data that are most similar to any given test data point top = [(heapq.nlargest((k+1), range(len(i)), i.take)) for i in cosim] # convert indices to numbers top = [[y_train[j] for j in i[:k]] for i in top] # vote, and return prediction for every record in test data pred = [max(set(i), key=i.count) for i in top] pred = np.array(pred) # print table giving classifier accuracy using target variable in the test data print(classification_report(y_test, pred)) # Checking model accuracy for a 80-20 dataset split X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2, random_state=0) # Feature Scaling scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # Checking the model accuracy cos_knn(5, X_test, y_test, X_train, y_train) # test_predict=model.predict(data_test['text']) # #To merge these predictions back with the test df, # data_test['predicted'] =test_predict # data_test ###Output _____no_output_____
NY_MTA/.ipynb_checkpoints/MTA Turnstile Analysis-checkpoint.ipynb
###Markdown Topic: Challenge Set 1 Subject: Explore MTA turnstile data Date: 04/13/2018 Name: student name Worked with: other students' name Location: sea18_ds10/student_submissions/challenges/01-mta/shaikh_reshama/challenge_set_1_reshama.ipynb Initial Setup-Data was collected between mid-april and early May-Code is set up where you just need to change the csv files and it will combine all 3 into one data *super_df* We also removed many columns that did not add value to the analysis. Packages pandas, numpy, matplotlib, seaborn, datetime, and dateutil were used in this analysis. ###Code import sys # imports a library 'pandas', names it as 'pd' import pandas as pd import numpy as np import matplotlib.pyplot as plt from IPython.display import Image import pprint # enables inline plots, without it plots don't show up in the notebook %matplotlib inline import dateutil.parser from datetime import * # various options in pandas pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 500) pd.set_option('display.precision', 3) #read data from this folder df1 = pd.read_csv('turnstile_170422.csv') df2 = pd.read_csv('turnstile_170429.csv') df3 = pd.read_csv('turnstile_170506.csv') #Work with turnstile name as combo def Col_Con(mta): mta['TURNSTILE'] = mta['C/A'] + mta['UNIT'] + mta['SCP'] mta.drop('C/A', axis=1, inplace = True) mta.drop('UNIT', axis=1,inplace = True) mta.drop('SCP', axis=1, inplace = True) mta = mta.drop('LINENAME', axis=1) mta = mta.drop('DIVISION', axis=1) mta = mta.drop('DESC', axis=1) mta.columns = mta.columns.str.strip() return mta mta1 = Col_Con(df1) mta2 = Col_Con(df2) mta3 = Col_Con(df3) leng = len(mta1.index) leng3 = len(mta2.index) leng4 = len(mta3.index) # make new columns, blank #Only use if we are using turnstiles as data 'mta['Cu_ENT'] = [0] * leng' def new_cols(mta, leng): mta['ENT_COUNT'] = [0] * leng mta['EXT_COUNT'] = [0] * leng mta['DATE_TIME'] = [''] * leng mta['DATE_TIME_WD'] = [''] * leng mta['Donor_Est'] = [0] * leng return mta mta1 = new_cols(mta1, leng) mta2 = new_cols(mta2, leng3) mta3 = new_cols(mta3, leng4) ###Output _____no_output_____ ###Markdown Manipulating Data DatetimeA datetime column was added to help consolidate the dataframe later in the program. It helped us graph the mean traffic based on time of day. Summing CountsThe ENTRIES and EXITS column are set on a tally system, so it does not tell us exactly how many entries there were between the timestamp and the previous timestamp. Using the column data, a entry per time period was calculated based on the marker at the current timestamp and the marker at the previous time stamp (300,000 - 289,000) = 11,000 people passed through the time stamp. ###Code # Fill empty date time column with formula # I dont think we will use this... def date_time(df): leng= len(df.index) for i in range(leng): datetime = df.DATE[i]+ ' ' + df.TIME[i] value = dateutil.parser.parse(datetime) df.at[i,'DATE_TIME'] = value df.at[i,'DATE_TIME_WD'] = int(value.weekday()) return df mta1 = date_time(mta1) mta2 = date_time(mta2) mta3 = date_time(mta3) #fill empty ent_count with formula def coun_ent(mta, leng): for i in range(leng): if i == 0: mta.at[i,'ENT_COUNT'] = 0 else: if mta.TURNSTILE[i] == mta.TURNSTILE[i-1]: diff = mta.ENTRIES[i] - mta.ENTRIES[i-1] if diff < 0: diff = 0 if diff > 10000: diff = 0 mta.at[i,'ENT_COUNT'] = diff else: mta.at[i,'ENT_COUNT'] = 0 return mta mta1 = coun_ent(mta1,leng) mta2 = coun_ent(mta2, leng3) mta3 = coun_ent(mta3, leng4) #fill in new exit counts def coun_ext(mta): leng = len(mta.index) for i in range(leng): if i == 0: mta.at[i,'EXT_COUNT'] = 0 else: if mta.TURNSTILE[i] == mta.TURNSTILE[i-1]: diff = mta.EXITS[i] - mta.EXITS[i-1] if diff < 0: diff = 0 if diff > 10000: diff = 0 mta.at[i,'EXT_COUNT'] = diff else: mta.at[i,'EXT_COUNT'] = 0 return mta mta1 = coun_ext(mta1) mta2 = coun_ext(mta2) mta3 = coun_ext(mta3) #checking dataframe mta1.head(10) #Removing obsolete columns def mor_cln(mta): mta = mta.drop('TIME', axis=1) mta = mta.drop('ENTRIES', axis=1) mta = mta.drop('EXITS', axis=1) return mta mta1 = mor_cln(mta1) mta2 = mor_cln(mta2) mta3 = mor_cln(mta3) ###Output _____no_output_____ ###Markdown Merging the Dataframes Initial MergeThe initial merge is for the csv dataframes that have undergone the manipulations up to the point. It merges by 'STATION', 'DATE_TIME_WD' and 'DATE_TIME' (meaning we keep the columns as indexs, and need to reset_index to move them back to columns). It also creates aggregated ENT_COUNT (entry count) and and EXT_COUNT(exit count) based on day of the week. So the order is creating the entry and exit counts based turnstiles data per datetime hour.Functions: -get_agg -merge_agg Secondary MergeUsing concat to 'stack' dataframes, then using get_agg_mean to get the mean of each station's entry count based on day of the week and datetime hour. This results to our super_df. -get_agg_mean -merge_agg ###Code #Creating a function to merge aggregate columns with original DF def merge_agg(ent_agg, ext_agg): ent_agg.columns = ent_agg.columns.droplevel(level=1) ext_agg.columns = ext_agg.columns.droplevel(level=1) #Creating a new data frame removing repeted values q_mta = pd.merge(ent_agg, ext_agg, on=['STATION','DATE_TIME_WD','DATE_TIME'], how='left') return q_mta #Creating a function to get aggregate sum data for two columns def get_agg(mta): mt = mta.groupby(['STATION','DATE_TIME_WD', 'DATE_TIME']) ent_agg = mt.agg({'ENT_COUNT':['sum']}) ent_agg.reset_index(inplace=True) ext_agg = mt.agg({'EXT_COUNT':['sum']}) #returns "index" as columns .reset_index() ext_agg.reset_index(inplace=True) fin_agg = merge_agg(ent_agg, ext_agg) return fin_agg #Creating a function to get aggregate mean data for two columns def get_agg_mean(mta): mt = mta.groupby(['STATION','DATE_TIME_WD', 'DATE_TIME']) ent_agg = mt.agg({'ENT_COUNT':['mean']}) ent_agg.reset_index(inplace=True) ext_agg = mt.agg({'EXT_COUNT':['mean']}) #returns "index" as columns .reset_index() ext_agg.reset_index(inplace=True) fin_agg = merge_agg(ent_agg, ext_agg) return fin_agg #Updating DF with aggregate data mta1 = get_agg(mta1) mta2 = get_agg(mta2) mta3 = get_agg(mta3) #Function that combines 3 dfs into one def super_merge(a,b,c): new_df = pd.concat([a, b]) q_mta = pd.concat([new_df, c]) q_mta=get_agg_mean(q_mta) return q_mta #Combining DFs super_df = super_merge(mta1,mta2,mta3) super_df ###Output _____no_output_____ ###Markdown Creating unique dataframes Entrys per dayWe are removing the extra datetime column and focusing solely on the weekday for dataframe no_dt (ie no datetime). We have to copy super_df and then aggregate sum the traffic over the each day (because we are summing the total entries at each datetime point). Then we merge the two series together and return a new dataframe. ###Code #Creating new functions to get aggregates over an entire day and combine with original DF def merge_agg_day(ent_agg, ext_agg): ent_agg.columns = ent_agg.columns.droplevel(level=1) ext_agg.columns = ext_agg.columns.droplevel(level=1) #Creating a new data frame removing repeted values q_mta = pd.merge(ent_agg, ext_agg, on=['STATION','DATE_TIME_WD'], how='left') return q_mta def get_agg_day(mta): mt = mta.groupby(['STATION','DATE_TIME_WD']) ent_agg = mt.agg({'ENT_COUNT':['sum']}) ent_agg.reset_index(inplace=True) ext_agg = mt.agg({'EXT_COUNT':['sum']}) #returns "index" as columns .reset_index() ext_agg.reset_index(inplace=True) fin_agg = merge_agg_day(ent_agg, ext_agg) return fin_agg #Creates a DF with cumulative entries per day per station with no datetime object no_dt = get_agg_day(super_df) no_dt #Sorting DF and getting the top 30 stations super_df.sort_values(by=['ENT_COUNT'], ascending = 0, inplace = True) uni_stations = list(super_df.STATION.unique())[:20] ###Output _____no_output_____ ###Markdown Entries per time periodThe following sets up and cleans the graph for showing the amount of riders perevery 4 hours. We start by setting the datetime to an hour so we can graph it easier. We then filter out the top 20 stations to avoid over complicating the graph. We then use aggregate sum on the entry counts and exit counts, then merge it to a new dataframe.Functions used: - get_agg_time - merge_agg_time ###Code #Setting values for code below super_df.reset_index(drop=True, inplace = True) f = dateutil.parser.parse('02:00:00') late = dateutil.parser.parse('23:00:00') mid = dateutil.parser.parse('00:00:00') #Creating a column to use as an axis for plotting daily trends #Creating a better df for reading traffic per date time for i in range(len(super_df.index)): val = (super_df['DATE_TIME'][i]).hour #moving the midnight values (because it returns a 0) to 11 pm values to make a better looking graph if val == mid.hour: val =late.hour super_df.at[i,'New_DATE_TIME'] = val #removing bad data if super_df.ENT_COUNT[i] >=100000 and (val<f.hour or val>late.hour): super_df.drop(i, inplace = True) if super_df.ENT_COUNT[i] <=1000 or super_df['DATE_TIME_WD'][i]>=5: super_df.drop(i, inplace = True) #Creating new data while sorting out low traffic stations, then resetting the indeces uni_df = super_df[super_df['STATION'].isin(uni_stations)] uni_df.reset_index(drop=True, inplace = True) #Creating new functions to get aggregates over an entire day and combine with original DF def merge_agg_time(ent_agg, ext_agg): ent_agg.columns = ent_agg.columns.droplevel(level=1) ext_agg.columns = ext_agg.columns.droplevel(level=1) #Creating a new data frame removing repeted values q_mta = pd.merge(ent_agg, ext_agg, on=['STATION','DATE_TIME_WD','New_DATE_TIME'], how='left') return q_mta def get_agg_time(mta): mt = mta.groupby(['STATION','DATE_TIME_WD','New_DATE_TIME']) ent_agg = mt.agg({'ENT_COUNT':['sum']}) ent_agg.reset_index(inplace=True) ext_agg = mt.agg({'EXT_COUNT':['sum']}) #returns "index" as columns .reset_index() ext_agg.reset_index(inplace=True) fin_agg = merge_agg_time(ent_agg, ext_agg) return fin_agg uni_df = get_agg_time(uni_df) uni_df ###Output _____no_output_____ ###Markdown Filtering the no_dt dataframeWe take the no_dt dataframe and filter out the top 20 stations to make a plot over. ###Code no_dt_df = no_dt[no_dt['STATION'].isin(uni_stations)] no_dt_df.sort_values(by='ENT_COUNT', ascending = 0, inplace = True) no_dt_df.reset_index(drop=True, inplace=True) no_dt_df ###Output _____no_output_____ ###Markdown Graphing Seaborn lmplotUsing the uni_df created to graph the ridership based on time of day, we make a seaborn (denoted sns in code) lmplot. the sns.set() allows us to manipulate different aspects of the graph. It's more or less self explanatory. The second block of code does the following (corresponds with line of code): - resets the graph attributes to default (negating everything in previous block of code) - creates a list for the x-axis labels - creates a fig variable that plots the lmplot, x_bins trys to clean up the graph by creating bins. - sets axis titles - relabels x axis with a list with len corresponding to original amount of tick marks - adds a plot title Seaborn pinpoint plotWe are creating a pinpoint plot to show the trend of ridership over a full week.The 3rd block of coding does the following: - tells ipython to create a pop up with the graph - creates a list to relabel x axis - creates pinpoint graph, also sets title using .set - relabels x and y axis ###Code # Graphs are below import seaborn as sns import matplotlib.dates as mdates #Setting graph conponents sns.set(rc={"font.style":"normal", "axes.facecolor":('white'), "figure.facecolor":'black', "grid.color":'black', "grid.linestyle":':', "axes.grid":True, 'axes.labelsize':15, 'figure.figsize':(30, 30), 'xtick.labelsize':12, 'ytick.labelsize':12}) sns.reset_orig() %matplotlib inline # build the figure hours = ['','12 am','5 am','10 am', '3 pm', '8 pm'] fig = sns.lmplot(y="ENT_COUNT", x="New_DATE_TIME", data=uni_df, hue = 'STATION', fit_reg = False, x_bins=12) fig = fig.set_axis_labels('Time', 'Total Entries') fig = fig.set(xticklabels=hours) plt.title('20 Largest Stations: Daily Traffic') %matplotlib osx week = ['Monday','Tuesday', 'Wednesday','Thursday', 'Friday','Saturday', 'Sunday'] fig, ax = plt.subplots(figsize=(15,12)) fig = sns.pointplot(y="ENT_COUNT", x="DATE_TIME_WD", data=no_dt_df, join=True, hue='STATION', ax=ax, linestyles='-').set_title('20 Largest Traffic Stations') ax.set(xlabel='Dates', ylabel='Total Entries', xticklabels=week) ###Output _____no_output_____
application/pages/param_reference_example/Param.ipynb
###Markdown The ``panel.Param`` pane allows customizing the widgets, layout and style of the parameters of a `param.Parametrized` Class. Parameters:The basic parameters are* **`object`** (param.parameterized.Parameters): The `param` attribute of a `param.Parameterized` Class* **`parameters`** (None or List[str]): A list identifying the subset of parameters to include in the Pane.* **`widgets`** (Dict): A Dictionary specifying which parameters and widgets to use for a given parameter. You can also specify widget attributes.The more advanced parameters give you more control and they are * **`default_layout`** (ClassSelector) A layout like Column, Row etc or a Custom GridBox.* **`expand`** (bool) Whether or not to expand* **`expand_button`** (None or Button) The expand button* **`expand_layout`** (ClassSelector) A layout like Column, Row etc. or a Custom GridBox.* **`show_labels`** (bool) Whether or not to show labels* **`show_name`** (bool) Whether or not to show the name of the Parameterized Class For more layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).For an alternative example of using `panel.Param` see the [parameters user guide](../../user_guide/Param.ipynb).___ Lets build a model of a cycling Athlete and his PowerCurve. The PowerCurve is a recording of his maximum power output in Watt per kg for fixed durations of time. ###Code import datetime import pandas as pd import hvplot.pandas DATE_BOUNDS = (datetime.date(1900, 1, 1), datetime.datetime.now().date()) class PowerCurve(param.Parameterized): ten_sec = param.Number(1079) ten_sec_date = param.Date(datetime.date(2018, 8, 21), bounds=DATE_BOUNDS) one_min = param.Number(684) one_min_date = param.Date(datetime.date(2017, 8, 31), bounds=DATE_BOUNDS) ten_min = param.Number(419) ten_min_date = param.Date(datetime.date(2017, 9, 22), bounds=DATE_BOUNDS) twenty_min = param.Number(398) twenty_min_date = param.Date(datetime.date(2017, 9, 22), bounds=DATE_BOUNDS) one_hour = param.Number(319) one_hour_date = param.Date(datetime.date(2017, 8, 6), bounds=DATE_BOUNDS) @param.depends("ten_sec", "one_min", "ten_min", "twenty_min", "one_hour") def plot(self): data = { "duration": [10 / 60, 1, 10, 20, 60], "power": [self.ten_sec, self.one_min, self.ten_min, self.twenty_min, self.one_hour], } dataframe = pd.DataFrame(data) line_plot = dataframe.hvplot.line( x="duration", y="power", width=300, line_color="#007BFF", line_width=3, ) scatter_plot = dataframe.hvplot.scatter(x="duration", y="power", width=300).opts( marker="o", size=6, color="#007BFF" ) fig = line_plot * scatter_plot gridstyle = {"grid_line_color": "black", "grid_line_width": 0.1} fig = fig.opts( responsive=True, toolbar=None, yticks=list(range(0, 1600, 200)), ylim=(0, 1500), gridstyle=gridstyle, show_grid=True, ) return fig class Athlete(param.Parameterized): name_ = param.String("P.A. Nelson") birthday = param.Date(datetime.date(1976, 9, 17), bounds=DATE_BOUNDS) weight = param.Number(default=82, bounds=(20,300)) power_curve = param.ClassSelector(class_=PowerCurve, default=PowerCurve()) athlete = Athlete() ###Output _____no_output_____ ###Markdown The `pn.Param` can be used to view and edit the models.Try clicking the `...` button. This will expand the PowerCurve if running in an interactive notebook. ###Code pn.Param(athlete) ###Output _____no_output_____ ###Markdown The default Name and Birthday widgets are slow to use. So lets change them to a DatePicker and a LiteralInput for ints. ###Code pn.Param(athlete, widgets={"birthday": pn.widgets.DatePicker, "weight": pn.widgets.LiteralInput}) ###Output _____no_output_____ ###Markdown Lets expand the power curve by default. ###Code pn.Param( athlete, widgets={ "birthday": pn.widgets.DatePicker, "weight": pn.widgets.LiteralInput }, expand=True) ###Output _____no_output_____ ###Markdown Now lets try to display the Name and Birthday only and in a Row. ###Code pn.Param( athlete, widgets={"birthday": pn.widgets.DatePicker}, parameters=["name_", "birthday"], show_name=False, default_layout=pn.Row, width=400 ) ###Output _____no_output_____ ###Markdown Lets customize the view of the Athlete some more ###Code athlete_view = pn.Param( athlete, widgets={ "birthday": pn.widgets.DatePicker, "weight": {"type": pn.widgets.LiteralInput, "width": 100} }, parameters=["name_", "birthday", "weight"], show_name=False, default_layout=pn.Row, width=600 ) athlete_view ###Output _____no_output_____ ###Markdown Lets take a look at the PowerCurve ###Code pn.Param(athlete.power_curve) ###Output _____no_output_____ ###Markdown The PowerCurve layout is not that tidy. Lets change the layout to two columns. ###Code class GridBoxWithTwoColumns(pn.GridBox): def __init__(self, *objects, **params): super().__init__(*objects, **params, ncols=2) power_curve_columns_view = pn.Param( athlete.power_curve, default_layout=GridBoxWithTwoColumns, show_name=False, widgets = { "ten_sec_date": pn.widgets.DatePicker, "one_min_date": pn.widgets.DatePicker, "ten_min_date": pn.widgets.DatePicker, "twenty_min_date": pn.widgets.DatePicker, "one_hour_date": pn.widgets.DatePicker, } ) power_curve_columns_view ###Output _____no_output_____ ###Markdown Lets put a plot of the PowerCurve in the mix ###Code power_curve_view = pn.Row( power_curve_columns_view, pn.layout.VSpacer(width=50), athlete.power_curve.plot, ) power_curve_view ###Output _____no_output_____ ###Markdown And finally. Lets put the things together ###Code pn.Column( pn.pane.Markdown("### Athlete"), athlete_view, pn.pane.Markdown("#### Power Curve"), power_curve_view, ) ###Output _____no_output_____
Examples/torch/compression/channel_pruning.ipynb
###Markdown Torch Channel Pruning Example CodeThe following notebook is an example as to how one may compress their model via Channel Pruning using the AIMET library. The general procedure for compressing is to use AIMET's ModelCompressor, after specifying parameters determining the manner of compression, to compress the model, then finetuning it to recover lost accuracy.We now present an overview of the technique. Recall that in any model, a convolutional layer is defined by four dimensions (m, n, h, w), where m and n are the number of input and output channels, respectively; and h and w are the height and width of the convolutional kernel. Channel Pruning seeks to reduce the number of input channels in this convolutional layer. There are two steps - winnowing, which removes less informative channels, and weight reconstruction, which seeks to shift the weights such that a linear regression between the old outputs and new outputs exists with minimal error. The example code shows the following:1. Instantiate Data Pipeline for evaluation2. Load the pretrained resnet18 Pytorch model and get starting accuracy3. Compress using channel pruning and obtain resulting accuracy4. Finetune and obtain final accuracyThe first three cells below take care of all necessary imports: ###Code import warnings warnings.filterwarnings("ignore", ".*param.*") # Imports necessary for the notebook import os from typing import Tuple from datetime import datetime from decimal import Decimal import torch from torchvision.models import resnet18 # AIMET Imports from aimet_torch.compress import ModelCompressor from aimet_common.defs import CompressionScheme, CostMetric from aimet_torch.defs import GreedySelectionParameters, ChannelPruningParameters # Imports needed for the Data Pipeline from Examples.common import image_net_config from Examples.torch.utils.image_net_data_loader import ImageNetDataLoader from Examples.torch.utils.image_net_evaluator import ImageNetEvaluator from Examples.torch.utils.image_net_trainer import ImageNetTrainer ###Output _____no_output_____ ###Markdown Setting Up Our Config DictionaryThe config dictionary specifies a number of things for use in the pipeline:* dataset_dir: Path to a directory containing ImageNet dataset. This folder should contain at least 2 subfolders: 'train': for training dataset and 'val': for validation dataset.* use_cuda: A boolean var to indicate to run the test on GPU.* logdir: Path to a directory for logging.* epochs: Number of epochs to be used in fine tuning.* learning_rate: A float type learning rate for model finetuning.* learning_rate_schedule: A list of epoch indices for learning rate schedule used in finetuning. Check https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.htmlMultiStepLR for more details.The config dictionary is used for all of the remaining cells. To get a better understanding of when each of the parameters in the config dictionary is used, refer to the code in those cells.**Note**: You will have to replace the dataset_dir path with the path to your own imagenet/tinyimagenet dataset. ###Code config = {'dataset_dir': "path/to/dataset", # Replace with the directory of your dataset! 'use_cuda': True, 'logdir': os.path.join("benchmark_output", "channel_pruning_"+datetime.now().strftime("%Y-%m-%d-%H-%M-%S")), 'epochs': 15, 'learning_rate': 1e-2, 'learning_rate_schedule': [5, 10] } os.makedirs(config['logdir'], exist_ok=True) ###Output _____no_output_____ ###Markdown 1. Instantiate Data PipelineThe next cell defines the data pipeline. The ImageNetDataPipeline class takes care of both evaluating and finetuning a model using a dataset directory (which should contain both training data and validation data, already separated into folders) that is specified by the user. For more detail on how it works, see the relevant files under examples/torch/utils.The data pipeline class is simply a template for the user to follow. The methods for this class can be replaced by the user to fit their needs. ###Code class ImageNetDataPipeline: """ Provides APIs for model quantization using evaluation and finetuning. """ def __init__(self, config): """ :param config: """ self._config = config def data_loader(self): """ :return: ImageNetDataloader """ data_loader = ImageNetDataLoader(is_training=False, images_dir=self._config["dataset_dir"], image_size=image_net_config.dataset['image_size']).data_loader return data_loader def evaluate(self, model: torch.nn.Module, iterations: int = None, use_cuda: bool = False) -> float: """ Evaluate the specified model using the specified number of samples from the validation set. :param model: The model to be evaluated. :param iterations: The number of batches of the dataset. :param use_cuda: If True then use a GPU for inference. :return: The accuracy for the sample with the maximum accuracy. """ # Your code goes here evaluator = ImageNetEvaluator(self._config['dataset_dir'], image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.evaluation['batch_size'], num_workers=image_net_config.evaluation['num_workers']) return evaluator.evaluate(model, iterations, use_cuda) def finetune(self, model: torch.nn.Module): """ Finetunes the model. The implemtation provided here is just an example, provide your own implementation if needed. :param model: The model to finetune. :return: None """ # Your code goes here instead of the example from below trainer = ImageNetTrainer(self._config['dataset_dir'], image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.train['batch_size'], num_workers=image_net_config.train['num_workers']) trainer.train(model, max_epochs=self._config['epochs'], learning_rate=self._config['learning_rate'], learning_rate_schedule=self._config['learning_rate_schedule'], use_cuda=self._config['use_cuda']) torch.save(model, os.path.join(self._config['logdir'], 'finetuned_model.pth')) ###Output _____no_output_____ ###Markdown 2. Load the Model, Initialize Data Pipeline, Get Starting AccuracyThe next section will initialize the model and the data pipeline. It is also customary to log the original accuracy of the model on the dataset provided before compressing the model. ###Code data_pipeline = ImageNetDataPipeline(config) # Input image shape image_shape = (1, image_net_config.dataset['image_channels'], image_net_config.dataset['image_width'], image_net_config.dataset['image_height']) model = resnet18(pretrained=True) if config['use_cuda']: if torch.cuda.is_available(): model.to(torch.device('cuda')) else: raise Exception("use_cuda is True but cuda is unavailable") accuracy = data_pipeline.evaluate(model, use_cuda=config['use_cuda']) print(accuracy) ###Output _____no_output_____ ###Markdown 3. Compress with Channel PruningThe next cells perform the actual compression step. First, parameters related to the compression are specified in the following cell:1. **target_comp_ratio**: The desired compession ratio using Channel Pruning. This value denotes the desired compression % of the original model. To compress the model to 20% of its original size, use 0.2. This would compress the model by 80%. The pre-specified value that is given is 50%2. **num_comp_ratio_candidates**: The number of compression ratios used by the API at each layer. Note that the model will test multiple different compression ratios per layer to try to compress less-important layers more, in such a way such that the overall compression ratio is equal to target_comp_ratio. The specified value is 10, which means that for each layer, the API will try the values 0.1, 0.2, ... 1.0 as ratios.3. **cost_metric**: Determines in what way the model is evaluated - can either be compute (mac), or space (memory).4. **eval_iterations**: The number of batches of data used to evaluate a model while the model is compressing. It is set to 10 to speed up the compression, rather than using the whole dataset. More details are later in the notebook/elsewhere in the AIMET API documentation5. **modules_to_ignore**: The layers that should be ignored during compression. The first layer is ignored to preserve the way the input interacts with the model; if there are other layers that should be ignored, add them to the list.6. **num_reconstruction_samples**: During the last stage of Channel Pruning, the Compression API tries to map the outputs of the pruned model with that of the original model through linear regression, and uses this attempt to change the weights in the pruned layer. The regression is done with this many random samples. This should generally be in the 100s. ###Code target_comp_ratio = Decimal(0.5) num_comp_ratio_candidates = 10 cost_metric = CostMetric.mac num_eval_iterations = 10 modules_to_ignore = [model.conv1] num_reconstruction_samples = 500 ###Output _____no_output_____ ###Markdown The next cell sets up the other parameters needed to perform the compression.We first define the actual Channel Pruning Parameters. There are two methods for which you can choose parameters - Auto and Manual. For Auto, the only option is a greedy selection scheme, where the optimal compression ratio is selected for each layer among a set list of candidates to reach the target ratio (which was specified in the previous cell). For Manual, you have to specify the compression ratios for each layer; a general rule of thumb, if one is to use Manual, is to start with the ratios found by Auto Mode and use it as a starting point. ###Code # Creating a Data Loader data_loader = ImageNetDataLoader(is_training=True, images_dir=config['dataset_dir'], image_size=image_net_config.dataset['image_size']).data_loader # Creating Greedy selection parameters: greedy_params = GreedySelectionParameters(target_comp_ratio=target_comp_ratio, num_comp_ratio_candidates=num_comp_ratio_candidates) # Creating Auto mode Parameters: cp_mode = ChannelPruningParameters.Mode.auto auto_params = ChannelPruningParameters.AutoModeParams(greedy_select_params=greedy_params, modules_to_ignore=modules_to_ignore) # Creating Channel Pruning SVD parameters with Auto Mode: params = ChannelPruningParameters(data_loader=data_loader, num_reconstruction_samples=num_reconstruction_samples, allow_custom_downsample_ops=True, mode=cp_mode, params=auto_params) # Scheme is Channel Pruning: scheme = CompressionScheme.channel_pruning ###Output _____no_output_____ ###Markdown Finally, the model is compressed using AIMET's ModelCompressor paired with the parameters specified above. This returns both the new model, which is saved, as well as relevant statistics. Finally, the compressed model is evaluated on the dataset. Note here that the ModelCompressor evaluates the model while compressing using the same evaluate function that is in our data pipeline. ###Code compressed_model, comp_stats = ModelCompressor.compress_model(model=model, eval_callback=data_pipeline.evaluate, eval_iterations=num_eval_iterations, input_shape=image_shape, compress_scheme=scheme, cost_metric=cost_metric, parameters=params) torch.save(compressed_model, os.path.join(config['logdir'], 'compressed_model.pth')) print(comp_stats) comp_accuracy = data_pipeline.evaluate(model, use_cuda=config['use_cuda']) print(comp_accuracy) ###Output _____no_output_____ ###Markdown 4. FinetuningAfter the model is compressed, the model is finetuned, then evaluated and saved. ###Code data_pipeline.finetune(compressed_model) finetuned_accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config['use_cuda']) print(finetuned_accuracy) ###Output _____no_output_____ ###Markdown Model compression using Channel Pruning This notebook shows a working code example of how to use AIMET to perform model compression. The Channel Pruning technique is used in this notebook to achieve model compression.Here is a brief introduction to the techniques. Please refer to the AIMET user guide for more details.1. **Spatial SVD**: This is a tensor-decomposition technique generally applied to convolutional layers (Conv2D). Applying this technique will decompose a single convolutional layer into two. The weight tensor of the layer to be split is flattended to a 2D matrix and singular value decomposition (SVD) is applied to this matrix. Compression is achieved by discarding the least significant singular values in the diagonal matrix. The decomposed matrices are combined back into two separate convolutional layers.2. **Channel Pruning**: In this technique AIMET will discard least significant (using a magnitude metric) input channels of a given convolutional (Conv2D) layer. The layers of the model feeding into this convolutional layer also have the channels dimension modified to get back to a working graph. This technique also uses a layer-by-layer reconstruction procedure that modifies the weights of the compressed layers to minimize the distance of the compressed layer output to the corresponding layer output of the original model.Both of the above techniques are structured pruning techniques that aim to reduce computational macs or memory requirements of the model. Subsequent to applying either of these techniques, the compressed model needs to be fine-tuned (meaning trained again for a few epochs) to recover accuracy close to the original model.This notebook shows working code example of how the technique 2 can be used to compress the model. You can find a separate notebook for 1, and 1 followed by 2 in the same folder. Overall flowThis notebook covers the following1. Instantiate the example evaluation and training pipeline2. Load the model and evaluate it to find the baseline accuracy3. Compress the model and fine-tune: 3.1 Compress model using Channel Pruning and evaluate it to find post-compression accuracy 3.2 Fine-tune the model What this notebook is not * This notebook is not designed to show state-of-the-art compression results. For example, some optimization parameters such as num_comp_ratio_candidates, num_eval_iterations and epochs are deliberately chosen to have the notebook execute more quickly. --- DatasetThis notebook relies on the ImageNet dataset for the task of image classification. If you already have a version of the dataset readily available, please use that. Else, please download the dataset from appropriate location (e.g. https://image-net.org/challenges/LSVRC/2012/index.php).**Note1**: The ImageNet dataset typically has the following characteristics and the dataloader provided in this example notebook rely on these- Subfolders 'train' for the training samples and 'val' for the validation samples. Please see the [pytorch dataset description](https://pytorch.org/vision/0.8/_modules/torchvision/datasets/imagenet.html) for more details.- A subdirectory per class, and a file per each image sample**Note2**: To speed up the execution of this notebook, you may use a reduced subset of the ImageNet dataset. E.g. the entire ILSVRC2012 dataset has 1000 classes, 1000 training samples per class and 50 validation samples per class. But for the purpose of running this notebook, you could perhaps reduce the dataset to say 2 samples per class. This exercise is left upto the reader and is not necessary.Edit the cell below and specify the directory where the downloaded ImageNet dataset is saved. ###Code DATASET_DIR = '/path/to/dataset/' # Please replace this with a real directory ###Output _____no_output_____ ###Markdown --- 1. Example evaluation and training pipelineThe following is an example training and validation loop for this image classification task.- **Does AIMET have any limitations on how the training, validation pipeline is written?** Not really. We will see later that AIMET will modify the user's model to compress it and the resultant model is still a PyTorch model. This compressed model can be used in place of the original model when doing inference or training.- **Does AIMET put any limitation on the interface of the evaluate() or train() methods?** Not really, but evaluate() method should return a single number representing the accuracy of the model. Ideally, You should be able to use your existing evaluate and train routines as-is. ###Code import os import torch from typing import List from Examples.common import image_net_config from Examples.torch.utils.image_net_evaluator import ImageNetEvaluator from Examples.torch.utils.image_net_trainer import ImageNetTrainer from Examples.torch.utils.image_net_data_loader import ImageNetDataLoader class ImageNetDataPipeline: @staticmethod def get_val_dataloader() -> torch.utils.data.DataLoader: """ Instantiates a validation dataloader for ImageNet dataset and returns it """ data_loader = ImageNetDataLoader(DATASET_DIR, image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.evaluation['batch_size'], is_training=False, num_workers=image_net_config.evaluation['num_workers']).data_loader return data_loader @staticmethod def evaluate(model: torch.nn.Module, iterations: int, use_cuda: bool) -> float: """ Given a torch model, evaluates its Top-1 accuracy on the dataset :param model: the model to evaluate :param iterations: the number of batches to be used to evaluate the model. A value of 'None' means the model will be evaluated on the entire dataset once. :param use_cuda: whether or not the GPU should be used. """ evaluator = ImageNetEvaluator(DATASET_DIR, image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.evaluation['batch_size'], num_workers=image_net_config.evaluation['num_workers']) return evaluator.evaluate(model, iterations=iterations, use_cuda=use_cuda) @staticmethod def finetune(model: torch.nn.Module, epochs: int, learning_rate: float, learning_rate_schedule: List, use_cuda: bool): """ Given a torch model, finetunes the model to improve its accuracy :param model: the model to finetune :param epochs: The number of epochs used during the finetuning step. :param learning_rate: The learning rate used during the finetuning step. :param learning_rate_schedule: The learning rate schedule used during the finetuning step. :param use_cuda: whether or not the GPU should be used. """ trainer = ImageNetTrainer(DATASET_DIR, image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.train['batch_size'], num_workers=image_net_config.train['num_workers']) trainer.train(model, max_epochs=epochs, learning_rate=learning_rate, learning_rate_schedule=learning_rate_schedule, use_cuda=use_cuda) ###Output _____no_output_____ ###Markdown --- 2. Load the model and evaluate it to find the baseline accuracy For this example notebook, we are going to load a pretrained resnet18 model from torchvision. Similarly, you can load any pretrained PyTorch model instead. ###Code from torchvision.models import resnet18 model = resnet18(pretrained=True) ###Output _____no_output_____ ###Markdown ---We should decide whether to place the model on a CPU or CUDA device. This example code will use CUDA if available in your current execution environment. You can change this logic and force a device placement if needed. ###Code use_cuda = False if torch.cuda.is_available(): use_cuda = True model.to(torch.device('cuda')) ###Output _____no_output_____ ###Markdown ---Let's determine the FP32 (floating point 32-bit) accuracy of this model using the evaluate() routine ###Code accuracy = ImageNetDataPipeline.evaluate(model, iterations=None, use_cuda=use_cuda) print(accuracy) ###Output _____no_output_____ ###Markdown 3. Compress the model and fine-tune 3.1. Compress model using Channel Pruning and evaluate it to find post-compression accuracyNow we use AIMET to define compression parameters for Channel Pruning, few of which are explained here- **target_comp_ratio**: The desired compression ratio for Channel Pruning. We are using 0.9 to compress the model by 10%.- **num_comp_ratio_candidates**: As part of determining how compressible each layer is, AIMET performs various measurements. This number denotes the different compression ratios tried by the AIMET for each layer. We are using 3 here which translates to 0.33, 0.66 and 1.00 compression ratios at each layer. Optimal value is 10. The higher the number of candidates the more granular the measurements for each layer, but also the higher the time taken to complete these measurements.- **modules_to_ignore**: This list can contain the references of model-layers that should be ignored during compression. We have added the first layer to be ignored to preserve the way the input interacts with the model; other layers can be added too if desired.- **mode**: We are chossing **Auto** mode which means AIMET performs per-layer compressibility analysis and determines how much to compress each layer. The alternate choice is **Manual**.- **data_loader**: Channel Pruning uses unlabelled data samples for the layer-by-layer reconstruction procedure explained at the start. This provided data loader is used to retrieve those samples. You can just pass your existing data loader - say for the validation or training dataset.- **num_reconstruction_samples**: The number of samples used in the layer-by-layer reconstruction procedure. We are using 10 here which is a ridiculously low number but enables this notebook to execute quickly. A typical setting here would ~1000 samples.- **allow_custom_downsample_ops**: If this flag is enabled, AIMET Channel Pruning will insert downsample ops into the model graph if needed. Enabling this can enable more convolutional layers to be considered for pruning, but it may increase memory bandwidth overhead for the additional downsample layers. So there is a trade-off to be considered. We suggest disabling this by default.- **eval_callback**: The model evaluation function. The expected signature of the evaluate function should be `(model, eval_iterations, use_cuda)` and it is expected to return an accuracy metric.- **eval_iterations**: The number of batches of data to use for evaluating the model while the model is compressing. We are using 1 to speed up the notebook execution. But please choose a high enough number of samples so that we can trust the accuracy of the model given those samples. It is expected that the eval callback would use the same samples for every invocation of the callback.- **compress_scheme**: We choose the 'channel pruning' compression scheme.- **cost_metric**: Determines whether we want to target either to reduce MACs or memory by the desired compression ratio. We are chossing 'mac' here. ###Code from decimal import Decimal from aimet_torch.defs import GreedySelectionParameters, ChannelPruningParameters from aimet_common.defs import CompressionScheme, CostMetric greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.9), num_comp_ratio_candidates=3) modules_to_ignore = [model.conv1] auto_params = ChannelPruningParameters.AutoModeParams(greedy_select_params=greedy_params, modules_to_ignore=modules_to_ignore) data_loader = ImageNetDataPipeline.get_val_dataloader() params = ChannelPruningParameters(data_loader=data_loader, num_reconstruction_samples=10, allow_custom_downsample_ops=False, mode=ChannelPruningParameters.Mode.auto, params=auto_params) eval_callback = ImageNetDataPipeline.evaluate eval_iterations = 1 compress_scheme = CompressionScheme.channel_pruning cost_metric = CostMetric.mac ###Output _____no_output_____ ###Markdown ---We call the AIMET ModelCompressor.compress_model API using the above parameters. This call returns a compressed model as well as relevant statistics. **Note**: the ModelCompressor evaluates the model while compressing using the same evaluate function that is in our data pipeline. ###Code from aimet_torch.compress import ModelCompressor compressed_model, comp_stats = ModelCompressor.compress_model(model=model, eval_callback=eval_callback, eval_iterations=eval_iterations, input_shape=(1, 3, 224, 224), compress_scheme=compress_scheme, cost_metric=cost_metric, parameters=params) print(comp_stats) ###Output _____no_output_____ ###Markdown ---Now the compressed model is ready to be used for inference or training. First we can pass this model to the same evaluation routine we used before to calculated compressed model accuracy. ###Code accuracy = ImageNetDataPipeline.evaluate(compressed_model, iterations=None, use_cuda=use_cuda) print(accuracy) ###Output _____no_output_____ ###Markdown ---As you can see the model accuracy fell sharply after compression. This is expected. We will use model fine-tuning to recover this accuracy back. 3.2. Fine-tune the modelAfter the model is compressed using Channel Pruning, we can simply train the model for a few more epochs (typically 15-20). As with any training job, hyper-parameters need to be searched for optimal results. Good starting points are to use a learning rate on the same order as the ending learning rate when training the original model, and to drop the learning rate by a factor of 10 every 5 epochs or so.For the purpose of this example notebook, we are going to train only for 1 epoch. But feel free to change these parameters as you see fit. ###Code ImageNetDataPipeline.finetune(compressed_model, epochs=2, learning_rate=15e-4, learning_rate_schedule=[5, 10], use_cuda=use_cuda) ###Output _____no_output_____ ###Markdown ---After we are done with finetuing the compressed model, we can check the floating point accuracy against the same validation dataset at the end to observe any improvements in accuracy. ###Code accuracy = ImageNetDataPipeline.evaluate(compressed_model, iterations=None, use_cuda=use_cuda) print(accuracy) ###Output _____no_output_____ ###Markdown ---Depending on your settings you should have observed a slight gain in accuracy after one epoch of training. Ofcourse, this was just an example. Please try this against the model of your choice and play with the hyper-parameters to get the best results.So we have an improved model after compression using Channel Pruning. Optionally, this model now can be saved like a regular PyTorch model. ###Code os.makedirs('./output/', exist_ok=True) torch.save(compressed_model, './output/finetuned_model') ###Output _____no_output_____
LD00.3/LoadsDev.ipynb
###Markdown --- Loads FunctionsUsing the basic principles above to extract dynamic pressures, load cards can be written for specific Nastran elements. ###Code def getEID(cardType, file): ''' This function reads a Nastran fem file, and returns a list of all element ids, for the selected type. The cardType must be a string of the appropriate Nastran element type (eg. 'cquad4', 'CTRIA6'). Case does not matter. The function assumes (and only works with) short-field format for Nastran. Returns: "EID" a list of EID's (integers). ''' # Open the source file f = open(file, 'r') # Initate the output EID = [] # Read the lines into a list lines = f.readlines() # Loop over each line for L in lines: # If the first Nastran field is the card we want: if L[:8].upper() == cardType.upper().ljust(8): # append the text in the ID field as an integer to the output EID.append(int(L[8:16].rstrip())) return EID def makePressures(S, q, EID, vec=None, filename='loads.out'): ''' This function writes out applied pressures to Nastran elements. The specific bulk card used is a PLOAD4. The global coordinate system is used, and the orientation vector can be defined. The pressure is applied as a constant over each entire element (no gradient over a single element). The function assumes (and only works with) short-field format for Nastran. Input is the set ID, a pressure, list of element ids, orientation vector components, and an output filename (defaulted to 'loads.out'). Output is a text file with all the PLOAD4 cards. If vec is not provided, pressure will be applied normal to the element face. q should be number (int or float), not string! And disregard formatting and length (function with correct). Returns: nothing, file is written. ''' # Create the output file f = open(filename, 'w+') # Loop over element id for e in EID: # Write both lines of the PLOAD card. # See MSC Nastran quick-reference guide on PLOAD4 card for details on fields if vec == None: f.write('PLOAD4 {:<8d}{:<8d}{:<6.2E}\n'.format(S,e,q) ) else: f.write('PLOAD4 {:<8d}{:<8d}{:<6.2E}\n {:<8d}{:<8d}{:<8d}\n'.format(S,e,q,vec[0], vec[1], vec[2]) ) f.close() def patran2List(raw): ''' This function reads the clipboard and converts a string of patran selections to a list of object id's. This function assumes only 1 object type is selected at a time. Returns: object type (string), ids (list of ints) ''' raw = raw.split() ids = [] # The first thing in the list will always be element type typ = raw[0] # Loop through each string in the patran selection for v in raw[1:]: if ':' in v: xx = v.split(':') if len(xx) > 2: step = int(xx[2]) else: step = 1 for i in range(int(xx[0]), int(xx[1])+1, step): ids.append(i) return typ, ids def makeACCEL(i, csys, grav, v, EID, filename='load.out'): ''' This function writes out static accelerations to Nastran grids. The specific bulk card used is a ACCEL1. The coordinate system and the orientation vector can be defined. The function assumes (and only works with) short-field format for Nastran. Input is the set ID, coordinate sytems, acceleration constant, orientation vector components, list of grid ids, and an output filename (defaulted to 'loads.out'). Output is a text file with all the PLOAD4 cards. Returns: nothing, file is written. ''' # Create the output file f = open(filename, 'w+') # Loop over element id for e in EID: # Write both lines of the ACCEL1 card. # See MSC Nastran quick-reference guide on ACCEL! card for details on fields f.write('ACCEL1 {:<8d}{:<8d}{:<8.3f}{:<8.1f}{:<8.1f}{:<8.1f}\n {:<8d}\n'.format(i, csys, grav, v[0], v[1], v[2], e)) f.close() # This cell won't work unless you have the proper thing copied on your clipboard! t, EID = patran2List(pyperclip.paste()) #makePressures(1,-344737864, EID) v = [1, 0, 0] makeACCEL(1, 0, 9.81, v, EID, filename='a.1gX.dat') ###Output _____no_output_____
spatial_joins.ipynb
###Markdown Spatial Joins Testing For allowing the kernal for your conda environment to be found by Jupyterhttps://stackoverflow.com/questions/39604271/conda-environments-not-showing-up-in-jupyter-notebook `python -m ipykernel install --user --name myenv --display-name "Python (myenv)"` ###Code import pandas as pd import geopandas as gpd import requests, json from shapely.geometry import Point zip_url = 'https://raw.githubusercontent.com/OpenDataDE/State-zip-code-GeoJSON/master/oh_ohio_zip_codes_geo.min.json' # Return the data print('Getting Zip Code Shapefile Data...') response = requests.get(zip_url) zips = response.json() jobs = pd.read_csv('./data/jobs.csv') jobs.head() ###Output _____no_output_____ ###Markdown Convert Pandas Dataframe to a GeoDataFrame ###Code # Attempt #1 gdf = gpd.GeoDataFrame(jobs, geometry=gpd.points_from_xy(jobs.lon, jobs.lat)) gdf.crs = {'init': 'epsg:4326'} # Attempt #2 geom = jobs.apply(lambda x : Point([x['lon'],x['lat']]), axis=1) gdf = gpd.GeoDataFrame(jobs, geometry=geom) gdf.crs = {'init': 'epsg:4326'} gdf.head() jobdf = gdf.dropna() jobdf.head() ###Output _____no_output_____ ###Markdown Convert GeoJSON to GeoDataFrame ###Code dfzips = gpd.read_file(zip_url) #Easy enough to just read from URL! dfzips.crs = {'init': 'epsg:4326'} dfzips.head() merged = gpd.sjoin(jobdf, dfzips, how="inner", op="within") merged.head() f'Percent Match in Zip Codes: {"{:0.1%}".format(sum(merged.zip == merged.ZCTA5CE10)/len(merged))}' merged[merged.zip != merged.ZCTA5CE10] ###Output _____no_output_____
notebooks/DOG1_model.ipynb
###Markdown It's then necessary to check if Acromine produced the correct results. We must fix errors manually ###Code overlap = set(refs.values()) & set(ANO1_entrez_refs.values()) len(overlap) shortform_texts = [universal_extract_text(text, contains=[shortform]) for ref, text in content.items() if text and ref not in overlap and len(text) >= 5] miner = AdeftMiner(shortform) miner.process_texts(shortform_texts) top = miner.top() top miner.get_longforms() longforms = miner.get_longforms() longforms, scores = zip(*longforms) grounding_map = {} for longform in longforms: grounding = gilda_ground(longform) if grounding[0]: grounding_map[longform] = f'{grounding[0]}:{grounding[1]}' grounding_map grounding_map, names, pos_labels = ground_with_gui(longforms, scores, grounding_map=grounding_map) names['HGNC:21625'] = 'ANO1' pos_labels.extend(['HGNC:21625']) result = (grounding_map, names, pos_labels) result grounding_map, names, pos_labels = ({'delay of germination 1': 'UP:A0SVK0', 'delay of germination1': 'UP:A0SVK0'}, {'UP:A0SVK0': 'Protein DELAY OF GERMINATION 1', 'HGNC:21625': 'ANO1'}, ['UP:A0SVK0', 'HGNC:21625']) grounding_dict = {'DOG1': grounding_map} classifier = AdeftClassifier('DOG1', pos_labels=pos_labels) param_grid = {'C': [10.0, 100.0], 'max_features': [100, 1000, 10000]} labeler = AdeftLabeler(grounding_dict) corpus = labeler.build_from_texts(shortform_texts) corpus.extend([(text, 'HGNC:21625') for text in ANO1_entrez_texts]) texts, labels = zip(*corpus) classifier.cv(texts, labels, param_grid, cv=5, n_jobs=8) classifier.stats disamb = AdeftDisambiguator(classifier, grounding_dict, names) disamb.dump('DOG1', '../results') disambs = disamb.disambiguate(shortform_texts) a = [text for text, d in zip(shortform_texts, disambs) if d[0] != 'HGNC:21625'] len(a) a[1] from adeft.disambiguate import load_disambiguator disamb = load_disambiguator('DOG1', path='../results') from adeft_indra.s3 import model_to_s3 model_to_s3(disamb) ###Output _____no_output_____
lezione1/soluzione-esercizi/ipynb/ES11-CreazioneDizionario.ipynb
###Markdown ESERCIZIO 11 - Creazione, accesso e aggiornamento di un dizionarioCreare un dizionario per memorizzare le età dei 4 componenti della famiglia Rossi, dove le chiavi sono i nomi dei componenti e i valori sono le età. Precisamente la famiglia Rossi è composta da:- Silvia che ha 45 anni- Roberto che ha 47 anni- Arianna che ha 18 anni- Tommaso che ha 13 anniViene in seguito richiesto di:- produrre la dimensione del dizionario- produrre l'età di Arianna e aggiornarla poi a 19 anni- aggiungere al dizionario l'età di Aurora che ha un anno- verificare infine la presenza nel dizionario di Roberto SoluzioneI nomi dei componenti sono le chiavi del dizionario e le età sono i valori, cioè gli elementi da memorizzare nel dizionario, a cui si vuole accedere tramite il nome del componente.Le chiavi (nomi) sono stringhe e i valori (età) sono numeri interi. Alla variabile `diz` vengono assegnate le 4 coppie chiave-valore (nome-età) utilizzando la sintassi con parentesi graffe `{}` ###Code diz = {'Silvia' : 45, 'Roberto': 47, 'Arianna' : 18, 'Tommaso' : 13} ###Output _____no_output_____ ###Markdown La dimensione del dizionario) viene ottenuta chiamando la funzione `len()` e passando come argomento la variabile `diz` che contiene il dizionario. Il valore restituito dalla funzione viene assegnato alla variabile `dimensione` ###Code dimensione = len(diz) dimensione ###Output _____no_output_____ ###Markdown L'età di Arianna è il valore nel dizionario associato alla stringa **Arianna** come chiave ###Code diz['Arianna'] ###Output _____no_output_____ ###Markdown Con la stessa sintassi di accesso a un dizionario l'età di Arianna può essere aggiornata a 19 anni ###Code diz['Arianna'] = 19 ###Output _____no_output_____ ###Markdown La nuova età di Arianna è ora ###Code diz['Arianna'] ###Output _____no_output_____ ###Markdown A questo punto si può aggiungere anche l'età di Aurora che non è ancora presente nel dizionario ###Code diz['Aurora'] = 1 ###Output _____no_output_____ ###Markdown Il contenuto del dizionario è ora ###Code diz ###Output _____no_output_____ ###Markdown Per verificare la presenza di una chiave all'interno del dizionario basta usare l'operatore `in` con a sinistra la chiave di cui testare la presenza e a destra la variabile `diz` che contiene il dizionario. Il risultato della valutazione è un valore booleano (`True` o `False`) ###Code 'Roberto' in diz ###Output _____no_output_____
notebooks/not-used/1.10c-sfb-OBSOLETE-nested-subcats-python-and-SQL.ipynb
###Markdown Predict edits to articles in the Tropical Cyclones category Goals & Plans Predict daily edits to pages in jawiki category: Tropical Cyclones (熱帯低気圧) Steps ETL to get processed-input dataframes: - tcyc_page_ids_and_names (data dump or https://ja.wikipedia.org/wiki/Category:%E7%86%B1%E5%B8%AF%E4%BD%8E%E6%B0%97%E5%9C%A7)- target_dayseries (edit counts) (data dump or https://wikimedia.org/api/rest_v1//) (timezone?)- target_dayseries (pageviews) https://wikimedia.org/api/rest_v1//Pageviews_data/get_metrics_pageviews- jp_cyc (landfall data etc) Quant Analysis - overall lags - Estimate mean/median/mode lags of edits around a tcyc versus landfall - T-test (?) of mean lag for each of m/m/m lag to check if it's different from zero- proportions of edits by time category - time categories - reactive edits - in-season edits - off-season edits - cross reference time-categories with user-categories - weather-specialist editors - frequent editors - infrequent registered editors - IP editors (by location?) - cross-reference time-categories with different subcategories - tcyc in Japan versus storms in other particular continents - tcyc-science articles versus tcyc-storm articles - compare edit counts b/o damages, storm strength Predict - baseline preds by day - get residuals from raw values - values could be: - editcounts - residuals - reverted-edits- predict residual pageviews & edits based on landfall, landfall-severity, lagged vars (including self) - plot edit-spread around landfall and predictions- predict non-tcyc edits - with similar features- interesting questions: - Are non-tcyc edits "diverted" to tcyc's? - Are edits during crises less/more likely to get quickly reverted? - Are users whose first edit occurs during a crisis more or less likely to become long-term contributors? Details of processed-input dataframes - Table **tcyc_page_ids_and_names:** - Purpose: Help generate the target_dayseries - Includes: One row for each page_id / page name combo in wikiproject - Primary key (2-col pk): page_id, page name - Other columns: none - Sorting: By page_id, then by page name - Table **target_dayseries:** - Purpose: hold raw targets, baseline predictions, and deltas for-analysis - Includes: One row for every day during period - Primary key: edit_day (datetime) - Other columns: ```editcount_all_raw, editcount_cat_raw, editcount_all_basepreds, editcount_cat_basepreds, delta_all_actual_basepreds, delta_cat_actual_basepreds,``` Sources - Articles in the Tropical Cyclones category: ([jawiki categories](https://ja.wikipedia.org/wiki/Category:%E7%86%B1%E5%B8%AF%E4%BD%8E%E6%B0%97%E5%9C%A7)) - Articles to exclude: - People who died in tcyc's ([jawiki categories](https://ja.wikipedia.org/wiki/Category:%E6%B4%9E%E7%88%BA%E4%B8%B8%E4%BA%8B%E6%95%85%E3%81%AE%E7%8A%A0%E7%89%B2%E8%80%85)) - English-name-articles for names that got assigned to tcyc's: ([jawiki categories](https://ja.wikipedia.org/wiki/Category:%E5%8F%B0%E9%A2%A8%E3%81%AE%E8%8B%B1%E5%90%8D))- Pageviews: - 熱帯低気圧 ([jawiki page](https://ja.wikipedia.org/wiki/%E7%86%B1%E5%B8%AF%E4%BD%8E%E6%B0%97%E5%9C%A7)) ([data source](https://pageviews.toolforge.org/?project=ja.wikipedia.org&platform=all-access&agent=user&redirects=0&range=all-time&pages=%E7%86%B1%E5%B8%AF%E4%BD%8E%E6%B0%97%E5%9C%A7)) Get data data prep imports ###Code import pandas as pd, numpy as np, seaborn as sns, mysql.connector as mysql import os, re, sqlalchemy, datetime # from pathlib import Path import pickle ###Output _____no_output_____ ###Markdown read prepped data ###Code damage = pd.read_csv('../data/raw/weather/japan_nii/damage.tsv', sep='\t') landfall = pd.read_csv('../data/raw/weather/japan_nii/landfall.tsv', sep='\t') ###Output _____no_output_____ ###Markdown Import jawiki into sql table list - unified tables: - category - categorylinks - page- revisions tables: - (each year) import script Import these tables into a single MySQL database: "jawiki" ```bashmysql --user=root --password=XXXXXXXX``````SQLCREATE DATABASE jawiki CHARACTER SET utf8 COLLATE utf8_bin;USE jawiki;GRANT ALL PRIVILEGES ON jawiki TO bhrdwj@localhost IDENTIFIED BY XXXXXXX;GRANT ALL PRIVILEGES ON jawiki.* TO bhrdwj@localhost IDENTIFIED BY XXXXXXX;EXIT``````bashmysql --user=bhrdwj --password=XXXXXXX jawiki output.tabmysql --user=bhrdwj --password=XXXXXXX jawiki output2.tabmysql --user=bhrdwj --password=XXXXXXX jawiki output3.tab```*See notebook 1.15 for importing history tables* Get sql data turn on sql in bash ```bashsudo service mysqld startnetstat -lnp | grep mysql``` ###Code mysql_superuser = 'root' # mysql_su_pass = input(f'Enter the MySQL password for user {mysql_superuser}: ') ###Output _____no_output_____ ###Markdown Initialize a connection and cursor ###Code host='localhost'; user=mysql_superuser; passwd=mysql_su_pass; dbname='jawiki'; cxn = mysql.connect(host=host,user=user,passwd=passwd, database=dbname) # not the first time around cur = cxn.cursor() ###Output _____no_output_____ ###Markdown Check what's in mysql, pick a database (SILENCED) ###Code cur.execute('select user();'); print(cur.fetchall()) cur.execute('show databases;'); print(cur.fetchall()) dbname = 'jawiki' cur.execute(f'use {dbname};') cur.execute('select database();'); print(cur.fetchall()) cur.execute('show tables;'); print(cur.fetchall()) ###Output _____no_output_____ ###Markdown Initialize an engine ###Code # Finally, let's instantiate a SQL alchemy engine, so we can pass results sets into pandas and evaluate them here connection_str = 'mysql+mysqlconnector://'+user+':'+passwd+'@'+host+'/'+dbname # removed this after host +':'+dbport try: engine1 = sqlalchemy.create_engine(connection_str) conn1 = engine1.connect() except: print('Database connection error - check creds') metadata = sqlalchemy.MetaData(conn1) metadata.reflect() metadata.tables.keys() ###Output _____no_output_____ ###Markdown Count rows in a table (SILENCED) ###Code sql_eda2 = f'select count(*) from {tblname};' print(datetime.datetime.now()) sql_rows = pd.read_sql(sql_eda2,engine1) print(sql_rows) print(datetime.datetime.now()) ###Output _____no_output_____ ###Markdown Show table head (SILENCED) ###Code tblname = 'categorylinks' cur.execute(f'SELECT * FROM {tblname} LIMIT 10;'); firstfew = cur.fetchall(); firstfew; ###Output _____no_output_____ ###Markdown Get tables' schemas ###Code tblnames = list(metadata.tables.keys()) schemas = {} for tn in tblnames: schemas[tn] = pd.read_sql(f'DESCRIBE {tn};', engine1) print(tn) display(schemas[tn]) ###Output category ###Markdown get all nested subcats and pages in category: 熱帯低気圧 define functions, gcloud credentials simple sql queries: ###Code def mw_query(colval_dict, tblname, engine=engine1): """ Simple query for a SQL table like pandas "loc", for mediawiki data dumps. Accepts: dict of filtering pairs: {colname:val, ...} Returns tuple: (df, query) Presumes all 'object' cols are bytearrays. (MAKE THIS OPTIONAL LATER.) """ d = colval_dict query = ( f'SELECT * FROM {tblname} WHERE ' + ' AND '.join([ f'{col} = (_BINARY "{d[col]}")' if type(d[col]) == str else f'{col} = {d[col]}' for col in d ]) + ';' ) selected_rows = pd.read_sql(query, engine) selected_rows = decode_df(selected_rows) return (selected_rows, query) def decode_df(df, encoding='utf-8'): """presume all 'object'-type cols of a pandas df are cols of bytearrays, and decode them.""" str_df = df.select_dtypes(['object']) # get list of columns that need decoding str_df = str_df.stack().str.decode('utf-8').unstack() # decode those columms for col in str_df: df[col] = str_df[col] # replace in original df return df ###Output _____no_output_____ ###Markdown translate: ja2en_txt, ja2en_ser Get gcloud credentials ###Code import os, six from google.cloud import translate_v2 as translate os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/home/bhrdwj/git/.my-translation-sa_keys.json' bq_client = translate.Client() ###Output _____no_output_____ ###Markdown functions for translation ###Code translate_client = translate.Client() def ja2en_txt(el): """Translates text into the target language. Target must be an ISO 639-1 language code. See https://g.co/cloud/translate/v2/translate-reference#supported_languages """ # import six # from google.cloud import translate_v2 as translate translate_client = translate.Client() if isinstance(el, six.binary_type): el = el.decode("utf-8") if not isinstance(el, str): return el text = el return translate_client.translate( text, target_language='EN', source_language='JA' )['translatedText'] def ja2en_ser(ser): """ Maps each element of a series with ja2en_txt, only if the type is string """ assert isinstance(ser,pd.Series) if pd.api.types.is_object_dtype(ser): return ser.map(lambda x: translate_client.translate(x, target_language='EN', source_language='JA')['translatedText']) else: return ser ###Output _____no_output_____ ###Markdown get all subcategories of category 熱帯低気圧 Initialize stack and output-collection ###Code topcattitle = '熱帯低気圧' slxn, query = mw_query({'cat_title':'熱帯低気圧'}, tblname='category') # stack of categories with subcats that haven't been queried yet catstack = [slxn.cat_id.values[0]] # storing the found categories' ids and titles all_subcats = {slxn.cat_id.values[0]: topcattitle} # {cat_id, cat_title} display(catstack) ###Output _____no_output_____ ###Markdown Add subcats to stack of category page_id's, and pop thru them, until all are found ###Code while len(catstack) > 0: cattitle = all_subcats[catstack.pop()] slxn, query = mw_query({'cl_to':cattitle}, tblname='categorylinks') cids = slxn.loc[slxn.cl_type=='subcat'].cl_from.tolist() catstack = catstack + cids cttls = [mw_query({'page_id':cid}, tblname='page')[0].page_title[0] for cid in cids] all_subcats.update({cids[i]:cttls[i] for i in range(len(cids))}) print(query) display(pd.Series(all_subcats)) ###Output SELECT * FROM categorylinks WHERE cl_to = (_BINARY "2013年の台風"); ###Markdown get all member-pages of category 熱帯低気圧 ###Code all_pages = {} for i in all_subcats: slxn, query = mw_query({'cl_to':all_subcats[i]}, tblname='categorylinks') pids = slxn.loc[slxn.cl_type=='page'].cl_from.tolist() pttls = [mw_query({'page_id':pid}, tblname='page')[0].page_title[0] for pid in pids] all_pages.update({pids[i]:pttls[i] for i in range(len(pids))}) print(query) display(pd.Series(all_pages)) ###Output SELECT * FROM categorylinks WHERE cl_to = (_BINARY "1856年の台風"); ###Markdown get time series of revisions to these pages ```bash``` Failed methods Get parent-categories. (I need children.) DON'T NEED THIS. ###Code # rows_en = rows.apply(ja2en_ser) # with open('../data/processed/jawiki_tcyclone_topcat31168_membpages.pickle', 'wb+') as file: # pickle.dump(rows_en, file) with open('../data/processed/jawiki_tcyclone_topcat31168_membpages.pickle', 'rb') as file: rows_en = pickle.load(file) rows_en ###Output _____no_output_____ ###Markdown Scrape page information. (Hard b/c lazy loading.) DATA DUMPS ARE BETTER Failed because wikipedia's [Extension: CategoryTree](https://www.mediawiki.org/wiki/Extension:CategoryTree) uses AJAX and might require selenium to scrape carefully. scrape page identifiers ###Code import requests from bs4 import BeautifulSoup req = requests.get('https://ja.wikipedia.org/wiki/Category:%E7%86%B1%E5%B8%AF%E4%BD%8E%E6%B0%97%E5%9C%A7') bs = BeautifulSoup(req.content, features='html.parser') #### get pages pages_in_TropicalCyclones_htmlchunk = bs.find('body').find('div',{'id':'mw-pages'}).find('div',{'class':'mw-category'}).find_all('li') pages_in_TropicalCyclones = {} for li in pages_in_TropicalCyclones_htmlchunk: pages_in_TropicalCyclones[ li.find('a').attrs['href'] ] = li.find('a').attrs['title'] pages_in_TropicalCyclones ### get categories subcats_in_TropicalCyclones_htmlchunk = bs.find('body').find('div',{'id':'mw-subcategories'}).find_all('div',{'class':'CategoryTreeItem'}) subcats_in_TropicalCyclones = {} for li in subcats_in_TropicalCyclones_htmlchunk: subcats_in_TropicalCyclones[ li.find('a').attrs['href'] ] = li.find('a').attrs['title'] bs.find('body').find('div',{'id':'mw-subcategories'}) subcats_in_TropicalCyclones_htmlchunk ###Output _____no_output_____
.ipynb_checkpoints/CarNDAdvLaneLines-checkpoint.ipynb
###Markdown Advanced Lane Finding ProjectThe goals / steps of this project are the following:* Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.* Apply a distortion correction to raw images.* Use color transforms, gradients, etc., to create a thresholded binary image.* Apply a perspective transform to rectify binary image ("birds-eye view").* Detect lane pixels and fit to find the lane boundary.* Determine the curvature of the lane and vehicle position with respect to center.* Warp the detected lane boundaries back onto the original image.* Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.--- First, I'll compute the camera calibration using chessboard images ###Code import numpy as np import cv2 import glob import matplotlib.pyplot as plt %matplotlib qt # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*9,3), np.float32) objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('camera_cal/calibration*.jpg') # Step through the list and search for chessboard corners for fname in images: img = cv2.imread(fname) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (9,6),None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners img = cv2.drawChessboardCorners(img, (9,6), corners, ret) cv2.imshow('img',img) cv2.waitKey(500) cv2.destroyAllWindows() ###Output 20 camera_cal\calibration1.jpg camera_cal\calibration10.jpg camera_cal\calibration11.jpg camera_cal\calibration12.jpg camera_cal\calibration13.jpg camera_cal\calibration14.jpg camera_cal\calibration15.jpg camera_cal\calibration16.jpg camera_cal\calibration17.jpg camera_cal\calibration18.jpg camera_cal\calibration19.jpg camera_cal\calibration2.jpg camera_cal\calibration20.jpg camera_cal\calibration3.jpg camera_cal\calibration4.jpg camera_cal\calibration5.jpg camera_cal\calibration6.jpg camera_cal\calibration7.jpg camera_cal\calibration8.jpg camera_cal\calibration9.jpg
examples/Field Sweep Example.ipynb
###Markdown Field Sweep Example Author: Colin Jermain, Minh-Hai NguyenSimulating the equilibrium positions of a macrospin as a function of applied field is demonstrated. A `FieldSweep` object is used to preform the simulation. ###Code import sys, importlib sys.path.append("../") %matplotlib inline from matplotlib import pyplot as plt import numpy as np import pymacrospin.parameters as params import pymacrospin.kernels as kernels # import pymacrospin.numba.kernels as kernels # Run this if want to use numba kernels import pymacrospin.simulations as sims ###Output _____no_output_____ ###Markdown Setting up the kernel and Field Sweep control Unlike the [basic example](Basic Macrospin Example.ipynb), the external field does not need to be defined in the parameters. The values of the externally applied field will be passed to the specify sweeping method of `FieldSweep`. ###Code parameters = { 'Ms': 140, # Saturation Magnetization (emu/cc) 'dt': 1e-12, # Timestep (sec) 'damping': 0.01, # Gilbert damping 'm0': [1, 0., 0.], # Initial moment (normalized) } kernel = kernels.BasicKernel(step_method='RK23', **parameters) simulation = sims.FieldSweep(kernel) ###Output _____no_output_____ ###Markdown Linear field sweepThe units of the start and end fields are defined based on the Parameters object that was chosen initially for the kernel. ###Code fields, moments, times = simulation.sweep_linear([1e3,100,0],[-1e3,100,0],10,return_time=True) plt.plot(fields[:,0], moments[:,0], 'o-') # plt.ylim(-1.1, 1.1) plt.ylabel("Mx/Ms") plt.xlabel("x Applied Field (Oe)") plt.show() ###Output _____no_output_____ ###Markdown Rotational field sweep Rotate the field in-plane. ###Code phis = np.linspace(0,180,19) fields, moments, times = simulation.sweep_rotation(1e3,90,phis,return_time=True) plt.plot(phis, moments[:,0], 'o-',label="Mx") plt.plot(phis, moments[:,1], 's-',label="My") plt.ylim(-1.1, 1.1) plt.ylabel("M/Ms") plt.xlabel("Applied field angle Phi [deg]") plt.legend() plt.show() ###Output _____no_output_____
deep-learning-with-python-notebooks/2.1-a-first-look-at-a-neural-network.ipynb
###Markdown 原文代码作者:François Cholletgithub:https://github.com/fchollet/deep-learning-with-python-notebooks中文注释制作:黄海广github:https://github.com/fengdu78代码全部测试通过。配置环境:keras 2.2.1(原文是2.0.8,运行结果一致),tensorflow 1.8,python 3.6,主机:显卡:一块1080ti;内存:32g(注:绝大部分代码不需要GPU)![公众号](data/gongzhong.jpg) ###Code import keras keras.__version__ ###Output _____no_output_____ ###Markdown A first look at a neural network 初识神经网络This notebook contains the code samples found in Chapter 2, Section 1 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.----We will now take a look at a first concrete example of a neural network, which makes use of the Python library Keras to learn to classify hand-written digits. Unless you already have experience with Keras or similar libraries, you will not understand everything about this first example right away. You probably haven't even installed Keras yet. Don't worry, that is perfectly fine. In the next chapter, we will review each element in our example and explain them in detail. So don't worry if some steps seem arbitrary or look like magic to you! We've got to start somewhere.The problem we are trying to solve here is to classify grayscale images of handwritten digits (28 pixels by 28 pixels), into their 10 categories (0 to 9). The dataset we will use is the MNIST dataset, a classic dataset in the machine learning community, which has been around for almost as long as the field itself and has been very intensively studied. It's a set of 60,000 training images, plus 10,000 test images, assembled by the National Institute of Standards and Technology (the NIST in MNIST) in the 1980s. You can think of "solving" MNIST as the "Hello World" of deep learning -- it's what you do to verify that your algorithms are working as expected. As you become a machine learning practitioner, you will see MNIST come up over and over again, in scientific papers, blog posts, and so on.我们来看一个具体的神经网络示例,使用 Python 的 Keras 库来学习手写数字分类。如果你 没用过 Keras 或类似的库,可能无法立刻搞懂这个例子中的全部内容。甚至你可能还没有安装 Keras。没关系,下一章会详细解释这个例子中的每个步骤。因此,如果其中某些步骤看起来有 些随意,或者像魔法一样,也请你不要担心。下面我们要开始了。我们这里要解决的问题是,将手写数字的灰度图像(28 像素×28 像素)划分到 10 个类别 中(0~9)。我们将使用 MNIST 数据集,它是机器学习领域的一个经典数据集,其历史几乎和这 个领域一样长,而且已被人们深入研究。这个数据集包含 60 000 张训练图像和 10 000 张测试图 像,由美国国家标准与技术研究院(National Institute of Standards and Technology,即 MNIST 中 的 NIST)在 20 世纪 80 年代收集得到。你可以将“解决”MNIST 问题看作深度学习的“Hello World”,正是用它来验证你的算法是否按预期运行。当你成为机器学习从业者后,会发现 MNIST 一次又一次地出现在科学论文、博客文章等中。 The MNIST dataset comes pre-loaded in Keras, in the form of a set of four Numpy arrays: ###Code from keras.datasets import mnist (train_images, train_labels), (test_images, test_labels) = mnist.load_data() #这里下载要多试几次,不翻墙能下。train_images 和 train_labels 组成了训练集(training set),模型将从这些数据中进行学习。 #然后在测试集(test set,即 test_images 和 test_labels)上对模型进行测试。 ###Output _____no_output_____ ###Markdown `train_images` and `train_labels` form the "training set", the data that the model will learn from. The model will then be tested on the "test set", `test_images` and `test_labels`. Our images are encoded as Numpy arrays, and the labels are simply an array of digits, ranging from 0 to 9. There is a one-to-one correspondence between the images and the labels.Let's have a look at the training data:train_images 和 train_labels 组成了训练集(training set),模型将从这些数据中进行学习。然后在测试集(test set,即 test_images 和 test_labels)上对模型进行测试。图像被编码为 Numpy 数组,而标签是数字数组,取值范围为 0~9。图像和标签一一对应。 我们来看一下训练数据: ###Code train_images.shape#看下数据的维度 len(train_labels) train_labels ###Output _____no_output_____ ###Markdown Let's have a look at the test data:看下测试数据: ###Code test_images.shape len(test_labels) test_labels ###Output _____no_output_____ ###Markdown Our workflow will be as follow: first we will present our neural network with the training data, `train_images` and `train_labels`. The network will then learn to associate images and labels. Finally, we will ask the network to produce predictions for `test_images`, and we will verify if these predictions match the labels from `test_labels`.Let's build our network -- again, remember that you aren't supposed to understand everything about this example just yet.接下来的工作流程如下:首先,将训练数据(train_images 和 train_labels)输入神经网络;其次,网络学习将图像和标签关联在一起;最后,网络对 test_images 生成预测, 而我们将验证这些预测与 test_labels 中的标签是否匹配。下面我们来构建网络。再说一遍,你现在不需要理解这个例子的全部内容。 ###Code from keras import models from keras import layers network = models.Sequential() network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,))) network.add(layers.Dense(10, activation='softmax')) ###Output _____no_output_____ ###Markdown The core building block of neural networks is the "layer", a data-processing module which you can conceive as a "filter" for data. Some data comes in, and comes out in a more useful form. Precisely, layers extract _representations_ out of the data fed into them -- hopefully representations that are more meaningful for the problem at hand. Most of deep learning really consists of chaining together simple layers which will implement a form of progressive "data distillation". A deep learning model is like a sieve for data processing, made of a succession of increasingly refined data filters -- the "layers".Here our network consists of a sequence of two `Dense` layers, which are densely-connected (also called "fully-connected") neural layers. The second (and last) layer is a 10-way "softmax" layer, which means it will return an array of 10 probability scores (summing to 1). Each score will be the probability that the current digit image belongs to one of our 10 digit classes.To make our network ready for training, we need to pick three more things, as part of "compilation" step:* A loss function: the is how the network will be able to measure how good a job it is doing on its training data, and thus how it will be able to steer itself in the right direction.* An optimizer: this is the mechanism through which the network will update itself based on the data it sees and its loss function.* Metrics to monitor during training and testing. Here we will only care about accuracy (the fraction of the images that were correctly classified).The exact purpose of the loss function and the optimizer will be made clear throughout the next two chapters.神经网络的核心组件是层(layer),它是一种数据处理模块,你可以将它看成数据过滤器。 进去一些数据,出来的数据变得更加有用。具体来说,层从输入数据中提取表示——我们期望这种表示有助于解决手头的问题。大多数深度学习都是将简单的层链接起来,从而实现渐进式 的数据蒸馏(data distillation)。深度学习模型就像是数据处理的筛子,包含一系列越来越精细的数据过滤器(即层)。本例中的网络包含 2 个 Dense 层,它们是密集连接(也叫全连接)的神经层。第二层(也 是最后一层)是一个 10 路 softmax 层,它将返回一个由 10 个概率值(总和为 1)组成的数组。每个概率值表示当前数字图像属于 10 个数字类别中某一个的概率。要想训练网络,我们还需要选择编译(compile)步骤的三个参数。* 损失函数(loss function):网络如何衡量在训练数据上的性能,即网络如何朝着正确的方向前进。* 优化器(optimizer):基于训练数据和损失函数来更新网络的机制。* 在训练和测试过程中需要监控的指标(metric):本例只关心精度,即正确分类的图像所占的比例。后续两章会详细解释损失函数和优化器的确切用途。 ###Code network.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) ###Output _____no_output_____ ###Markdown Before training, we will preprocess our data by reshaping it into the shape that the network expects, and scaling it so that all values are in the `[0, 1]` interval. Previously, our training images for instance were stored in an array of shape `(60000, 28, 28)` of type `uint8` with values in the `[0, 255]` interval. We transform it into a `float32` array of shape `(60000, 28 * 28)` with values between 0 and 1.在开始训练之前,我们将对数据进行预处理,将其变换为网络要求的形状,并缩放到所 有值都在 [0, 1] 区间。比如,之前训练图像保存在一个 uint8 类型的数组中,其形状为 (60000, 28, 28),取值区间为 [0, 255]。我们需要将其变换为一个 float32 数组,其形 状为 (60000, 28 * 28),取值范围为 0~1。 ###Code train_images = train_images.reshape((60000, 28 * 28)) train_images = train_images.astype('float32') / 255 test_images = test_images.reshape((10000, 28 * 28)) test_images = test_images.astype('float32') / 255 ###Output _____no_output_____ ###Markdown We also need to categorically encode the labels, a step which we explain in chapter 3:我们还需要对标签进行分类编码,第 3 章将会对这一步骤进行解释。 ###Code from keras.utils import to_categorical train_labels = to_categorical(train_labels) test_labels = to_categorical(test_labels) ###Output _____no_output_____ ###Markdown We are now ready to train our network, which in Keras is done via a call to the `fit` method of the network: we "fit" the model to its training data.现在我们准备开始训练网络,在 Keras 中这一步是通过调用网络的 fit 方法来完成的—— 2我们在训练数据上拟合(fit)模型。 ###Code network.fit(train_images, train_labels, epochs=5, batch_size=128) ###Output Epoch 1/5 60000/60000 [==============================] - 28s 463us/step - loss: 0.0287 - acc: 0.9913 Epoch 2/5 60000/60000 [==============================] - 3s 45us/step - loss: 0.0216 - acc: 0.9938 Epoch 3/5 60000/60000 [==============================] - 3s 45us/step - loss: 0.0165 - acc: 0.9953 Epoch 4/5 60000/60000 [==============================] - 3s 45us/step - loss: 0.0123 - acc: 0.9966 Epoch 5/5 60000/60000 [==============================] - 3s 46us/step - loss: 0.0104 - acc: 0.9972 ###Markdown Two quantities are being displayed during training: the "loss" of the network over the training data, and the accuracy of the network over the training data.We quickly reach an accuracy of 0.989 (i.e. 98.9%) on the training data. Now let's check that our model performs well on the test set too:训练过程中显示了两个数字:一个是网络在训练数据上的损失(loss),另一个是网络在 训练数据上的精度(acc)。我们很快就在训练数据上达到了 0.989(98.9%)的精度。现在我们来检查一下模型在测试 集上的性能。 ###Code test_loss, test_acc = network.evaluate(test_images, test_labels) print('test_acc:', test_acc) ###Output test_acc: 0.979
02_train_experiments/02_train_04_xseresnext_appian_fromscratch.ipynb
###Markdown Load Model ###Code name = 'baseline_any_xse_resnext50_appian_fromscratch' import pretrainedmodels m = pretrainedmodels.se_resnext50_32x4d(num_classes=1000, pretrained='imagenet') # m.avg_pool = nn.AdaptiveAvgPool2d(1) # m.last_linear = nn.Linear(in_features=2048, out_features=6) m.avg_pool = AdaptiveConcatPool2d(1) m.last_linear = nn.Linear(in_features=2048*2, out_features=6) def appianish_cycle_train(learn, get_data, no_pretrain=False, schedule=None, lr=3e-4): do_slice=freeze= not no_pretrain if not schedule: schedule = [(64,192), (64,352), (64,512), (64,None)] """ learn.loss_func = get_loss() bs,sz = schedule[3] learn.dls = get_data(bs, None, use_wgt=False) do_fit(learn, 1, lr*2/3*2/3, freeze=False, **no_1cycle) """ if no_pretrain: learn.loss_func = get_loss() learn.dls = get_data(64, 160, use_wgt=False) do_fit(learn, 4, lr, freeze=False, **no_1cycle) #if not schedule: schedule = [(64,192), (64,352), (64,512), (64,None)] #if not schedule: schedule = [(16,192),(16,352),(16,512),(16,None)] learn.loss_func = get_loss() bs,sz = schedule[0] learn.dls = get_data(bs, 192, use_wgt=True) do_fit(learn, 2, lr, do_slice=do_slice, freeze=freeze , **no_1cycle) bs,sz = schedule[1] learn.dls = get_data(bs, 352, use_wgt=True) do_fit(learn, 2, lr*2/3, do_slice=do_slice, freeze=freeze, **no_1cycle) bs,sz = schedule[2] learn.dls = get_data(bs, 512, use_wgt=True) do_fit(learn, 1, lr*2/3*2/3, do_slice=False, freeze=False, **no_1cycle) learn.loss_func = get_loss() bs,sz = schedule[3] learn.dls = get_data(bs, None, use_wgt=False) do_fit(learn, 1, lr*2/3*2/3, freeze=False, **no_1cycle) do_fit(learn, 1, lr*2/3*2/3, freeze=False, div=1, pct_start=0.01) dls = get_data(384, 192, splits=Meta.splits, img_dir=path_jpg256) learn = get_learner(dls, m) lr = 4e-3 ###Output _____no_output_____ ###Markdown Training ###Code do_fit(learn, 2, lr, **no_1cycle) learn.save(f'runs/{name}-1') learn.dls = get_data(196, 352, splits=Meta.splits, img_dir=path_jpg) do_fit(learn, 2, lr*1/3, freeze=False, **no_1cycle) learn.save(f'runs/{name}-2') learn.dls = get_data(128, 512, splits=Meta.splits, img_dir=path_jpg) do_fit(learn, 1, lr*1/9, freeze=False, **no_1cycle) learn.save(f'runs/{name}-3') learn.dls = get_data(128, None, splits=Meta.splits, img_dir=path_jpg) do_fit(learn, 1, lr*1/9, freeze=False, **no_1cycle) learn.save(f'runs/{name}-4') ###Output _____no_output_____ ###Markdown Submission ###Code learn.load(f'runs/{name}-1') sub_fn = f'subm/{name}' learn.dls = get_test_data(Meta.df_tst, bs=128, sz=None, tst_dir='tst_jpg') ob = learn.dls.one_batch() preds,targs = learn.get_preds() pred_csv = submission(Meta.df_tst, preds, fn=sub_fn) api.competition_submit(f'{sub_fn}.csv', f'{name} appian se_resnext50', 'rsna-intracranial-hemorrhage-detection') api.competitions_submissions_list('rsna-intracranial-hemorrhage-detection')[0] ###Output _____no_output_____ ###Markdown Save Predictions ###Code class FeatureModel(nn.Module): def __init__(self, m): super().__init__() self.m = m def forward(self, x): return self.m.avg_pool(self.m.features(x)) #export class DummyLoss: def __call__(self, p, *t, **kwargs): return torch.tensor(0, device=p.device).float() #export def save_features(learn, feat_path): preds,targs = learn.get_preds(dl=learn.dls.valid) val_ids = dls.valid.dataset.items feat_path.mkdir(exist_ok=True) for idx,pred in progress_bar(zip(val_ids, preds), total=len(val_ids)): fn = feat_path/f'{idx}' if fn.exists(): continue np.save(str(fn), pred.squeeze().numpy()) #export path_feat512 = path/'features_512' path_feat512_tst = path/'tst_features_512' len(dls.valid) dls = get_test_data(Meta.df_comb, bs=128, sz=None, tst_dir='nocrop_jpg', sl=slice(50000, 200000)) # dls = get_data(48, 512, splits=Meta.splits_sample, img_dir=path_jpg) learn = get_learner(dls, m, lf=DummyLoss(), metrics=[]) learn.load(f'runs/{name}-1') learn.model = FeatureModel(learn.model) save_features(learn, path_feat512) import gc gc.collect() learn.dls = get_test_data(Meta.df_tst, bs=128, sz=None, tst_dir='tst_jpg') save_features(learn, path_feat512_tst) ###Output _____no_output_____
arbolCovid19.ipynb
###Markdown Árbol de decisión COVID-19 Utilizando reglas del tipo if se generan las directrices para proponer al usuario si tiene o no COVID-19 o bien puede proponérsele que sea otra enfermedad muy parecida en sintomas. ###Code print("Por favor escribe el número de oración corresponda a tu realidad:") print("1. Estuve en contacto con algún confirmado de COVID-19") print("2. No estoy seguro pero presento cierta sintomatología") respuesta=input() if(respuesta=="1"): print("1. Presento sintomatología grave y además soy persona de riesgo") print("2. Presento sintomatología nula o muy leve y además no soy persona de riesgo") respuesta=input() if(respuesta=="1"): print("Resultado: Estas en gran riesgo (COVID-19 POSITIVO)") else: print("Resultado: No estás en gran riesgo, manten cuarentena total (COVID-19 NEGATIVO)") else: print("1. Presento fiebre mayor a 37.5") print("2. No presento fiebre") respuesta=input() if(respuesta=="1"): print("1. Tengo dolor de pecho") print("2. Solo tengo dolor de garganta") respuesta=input() if(respuesta=="1"): print("1. Presento tos seca") print("2. Presento tos productiva") respuesta=input() if(respuesta=="1"): print("1. Tengo dificultad para respirar") print("2. No tengo dificultad para respirar") respuesta=input() if(respuesta=="1"): print("1. Puedo moverme y hablar") print("2. Ya no puedo moverme o hablar") respuesta=input() if(respuesta=="1"): print("Resultado: Estas en gran riesgo (COVID-19 POSITIVO)") else: print("Resultado: Estás en extremo riesgo. Llama a emergencias. (COVID-19 POSITIVO)") else: print("Resultado: Podría sólo ser gripe común. (COVID-19 NEGATIVO)") else: print("Resultado: Podría sólo ser resfriado común. (COVID-19 NEGATIVO)") else: print("Resultado: Podría sólo ser resfriado común. (COVID-19 NEGATIVO)") else: print("1. Tengo lo ojos irritados") print("2. No tengo lo ojos irritados") respuesta=input() if(respuesta=="1"): print("Resultado: Podría sólo ser alergia. (COVID-19 NEGATIVO)") else: print("Resultado: Podría sólo ser resfriado común. (COVID-19 NEGATIVO)") ###Output Por favor escribe el número de oración corresponda a tu realidad: 1. Estuve en contacto con algún confirmado de COVID-19 2. No estoy seguro pero presento cierta sintomatología 2 1. Presento fiebre mayor a 37.5 2. No presento fiebre 1 1. Tengo dolor de pecho 2. Solo tengo dolor de garganta 1 1. Presento tos seca 2. Presento tos productiva 1 1. Tengo dificultad para respirar 2. No tengo dificultad para respirar 1 1. Puedo moverme y hablar 2. Ya no puedo moverme o hablar 2 Resultado: Estás en extremo riesgo. Llama a emergencias. (COVID-19 POSITIVO)
3.Databases-and-SQL-for-Data-Science/.ipynb_checkpoints/a.DB2-checkpoint.ipynb
###Markdown Lab: Access DB2 on Cloud using Python IntroductionThis notebook illustrates how to access your database instance using Python by following the steps below:1. Import the `ibm_db` Python library1. Identify and enter the database connection credentials1. Create the database connection1. Create a table1. Insert data into the table1. Query data from the table1. Retrieve the result set into a pandas dataframe1. Close the database connection__Notice:__ Please follow the instructions given in the first Lab of this course to Create a database service instance of Db2 on Cloud. Task 1: Import the `ibm_db` Python libraryThe `ibm_db` [API ](https://pypi.python.org/pypi/ibm_db/) provides a variety of useful Python functions for accessing and manipulating data in an IBM® data server database, including functions for connecting to a database, preparing and issuing SQL statements, fetching rows from result sets, calling stored procedures, committing and rolling back transactions, handling errors, and retrieving metadata.We import the ibm_db library into our Python Application ###Code import ibm_db ###Output _____no_output_____ ###Markdown When the command above completes, the `ibm_db` library is loaded in your notebook. Task 2: Identify the database connection credentialsConnecting to dashDB or DB2 database requires the following information:* Driver Name* Database name * Host DNS name or IP address * Host port* Connection protocol* User ID* User Password__Notice:__ To obtain credentials please refer to the instructions given in the first Lab of this courseNow enter your database credentials belowReplace the placeholder values in angular brackets below with your actual database credentials e.g. replace "database" with "BLUDB" ###Code #Replace the placeholder values with the actuals for your Db2 Service Credentials dsn_driver = "{IBM DB2 ODBC DRIVER}" dsn_database = "database" # e.g. "BLUDB" dsn_hostname = "hostname" # e.g.: "dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net" dsn_port = "port" # e.g. "50000" dsn_protocol = "protocol" # i.e. "TCPIP" dsn_uid = "username" # e.g. "abc12345" dsn_pwd = "password" # e.g. "7dBZ3wWt9XN6$o0J" ###Output _____no_output_____ ###Markdown Task 3: Create the database connectionIbm_db API uses the IBM Data Server Driver for ODBC and CLI APIs to connect to IBM DB2 and Informix.Create the database connection ###Code #Create database connection #DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter dsn = ( "DRIVER={0};" "DATABASE={1};" "HOSTNAME={2};" "PORT={3};" "PROTOCOL={4};" "UID={5};" "PWD={6};").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd) try: conn = ibm_db.connect(dsn, "", "") print ("Connected to database: ", dsn_database, "as user: ", dsn_uid, "on host: ", dsn_hostname) except: print ("Unable to connect: ", ibm_db.conn_errormsg() ) ###Output _____no_output_____ ###Markdown Task 4: Create a table in the databaseIn this step we will create a table in the database with following details: ###Code #Lets first drop the table INSTRUCTOR in case it exists from a previous attempt dropQuery = "drop table INSTRUCTOR" #Now execute the drop statment dropStmt = ibm_db.exec_immediate(conn, dropQuery) ###Output _____no_output_____ ###Markdown Dont worry if you get this error:If you see an exception/error similar to the following, indicating that INSTRUCTOR is an undefined name, that's okay. It just implies that the INSTRUCTOR table does not exist in the table - which would be the case if you had not created it previously.Exception: [IBM][CLI Driver][DB2/LINUXX8664] SQL0204N "ABC12345.INSTRUCTOR" is an undefined name. SQLSTATE=42704 SQLCODE=-204 ###Code #Construct the Create Table DDL statement - replace the ... with rest of the statement createQuery = "create table INSTRUCTOR(id INTEGER PRIMARY KEY NOT NULL, fname ...)" #Now fill in the name of the method and execute the statement createStmt = ibm_db.replace_with_name_of_execution_method(conn, createQuery) ###Output _____no_output_____ ###Markdown Double-click __here__ for the solution.<!-- Hint:createQuery = "create table INSTRUCTOR(ID INTEGER PRIMARY KEY NOT NULL, FNAME VARCHAR(20), LNAME VARCHAR(20), CITY VARCHAR(20), CCODE CHAR(2))"createStmt = ibm_db.exec_immediate(conn,createQuery)--> Task 5: Insert data into the tableIn this step we will insert some rows of data into the table. The INSTRUCTOR table we created in the previous step contains 3 rows of data:We will start by inserting just the first row of data, i.e. for instructor Rav Ahuja ###Code #Construct the query - replace ... with the insert statement insertQuery = "..." #execute the insert statement insertStmt = ibm_db.exec_immediate(conn, insertQuery) ###Output _____no_output_____ ###Markdown Double-click __here__ for the solution.<!-- Hint:insertQuery = "insert into INSTRUCTOR values (1, 'Rav', 'Ahuja', 'TORONTO', 'CA')"insertStmt = ibm_db.exec_immediate(conn, insertQuery)--> Now use a single query to insert the remaining two rows of data ###Code #replace ... with the insert statement that inerts the remaining two rows of data insertQuery2 = "..." #execute the statement insertStmt2 = ibm_db.exec_immediate(conn, insertQuery2) ###Output _____no_output_____ ###Markdown Double-click __here__ for the solution.<!-- Hint:insertQuery2 = "insert into INSTRUCTOR values (2, 'Raul', 'Chong', 'Markham', 'CA'), (3, 'Hima', 'Vasudevan', 'Chicago', 'US')"insertStmt2 = ibm_db.exec_immediate(conn, insertQuery2)--> Task 6: Query data in the tableIn this step we will retrieve data we inserted into the INSTRUCTOR table. ###Code #Construct the query that retrieves all rows from the INSTRUCTOR table selectQuery = "select * from INSTRUCTOR" #Execute the statement selectStmt = ibm_db.exec_immediate(conn, selectQuery) #Fetch the Dictionary (for the first row only) - replace ... with your code ... ###Output _____no_output_____ ###Markdown Double-click __here__ for the solution.<!-- Hint:Construct the query that retrieves all rows from the INSTRUCTOR tableselectQuery = "select * from INSTRUCTOR"Execute the statementselectStmt = ibm_db.exec_immediate(conn, selectQuery)Fetch the Dictionary (for the first row only)ibm_db.fetch_both(selectStmt)--> ###Code #Fetch the rest of the rows and print the ID and FNAME for those rows while ibm_db.fetch_row(selectStmt) != False: print (" ID:", ibm_db.result(selectStmt, 0), " FNAME:", ibm_db.result(selectStmt, "FNAME")) ###Output _____no_output_____ ###Markdown Double-click __here__ for the solution.<!-- Hint:Fetch the rest of the rows and print the ID and FNAME for those rowswhile ibm_db.fetch_row(selectStmt) != False: print (" ID:", ibm_db.result(selectStmt, 0), " FNAME:", ibm_db.result(selectStmt, "FNAME"))--> Bonus: now write and execute an update statement that changes the Rav's CITY to MOOSETOWN ###Code #Enter your code below ###Output _____no_output_____ ###Markdown Double-click __here__ for the solution.<!-- Hint:updateQuery = "update INSTRUCTOR set CITY='MOOSETOWN' where FNAME='Rav'"updateStmt = ibm_db.exec_immediate(conn, updateQuery))--> Task 7: Retrieve data into Pandas In this step we will retrieve the contents of the INSTRUCTOR table into a Pandas dataframe ###Code import pandas import ibm_db_dbi #connection for pandas pconn = ibm_db_dbi.Connection(conn) #query statement to retrieve all rows in INSTRUCTOR table selectQuery = "select * from INSTRUCTOR" #retrieve the query results into a pandas dataframe pdf = pandas.read_sql(selectQuery, pconn) #print just the LNAME for first row in the pandas data frame pdf.LNAME[0] #print the entire data frame pdf ###Output _____no_output_____ ###Markdown Once the data is in a Pandas dataframe, you can do the typical pandas operations on it. For example you can use the shape method to see how many rows and columns are in the dataframe ###Code pdf.shape ###Output _____no_output_____ ###Markdown Task 8: Close the ConnectionWe free all resources by closing the connection. Remember that it is always important to close connections so that we can avoid unused connections taking up resources. ###Code ibm_db.close(conn) ###Output _____no_output_____
TeaRecommendations/TeaReviewsNLP2.ipynb
###Markdown More Data WranglingNow that I have the dataframe imported, I am going to use NLP to work with the different reviews on certain teas to create more insights and set it up for unsupervised learning. Tasting Considerations* Aroma: The odor of the tea liquor, also called the nose or fragrance. A complex aroma is often described as a bouquet. * Astringency: A lively and mouth-drying effect on the tongue. Not bitter, but a clean and refreshing quality. The sensation of astringency is caused by a reaction between polyphenols (tannins) and the protein in saliva. * Body: The tactile aspect of tea’s weight and substance in the mouth, variously subcategorized as light, medium, or full; also known as fullness. * Bright: A lively, clean style that refreshes the palate. * Character: A tea’s signature attributes depending upon origin, whether of its country, region or type. * Clean: Indicates purity of flavor and an absence of any off-tastes. * Finish: The lasting taste on your tongue after swallowing the tea. * Flowery: A floral nose or flavor associated with high grade teas. * Full: References a positive sensation of body and good heft; indicates a well-made tea, possessing color, strength, substance and roundness. * Malty: A sweet malt flavor that is characteristic of Assam black teas. * Muscatel: A flavor reminiscent of grapes, most often used to describe an exceptional characteristic found in the liquors of the finest Darjeelings. * Smooth: Round-bodied, fine-drinking teas. * Soft: Smooth, lush, and subsequently often (but not necessarily) timid in flavor; not a negative term. * Thick: Describes liquor having substance, but not necessarily strength. * Vegetal: A characteristic of green teas that might include grassy, herby or marine flavors. ###Code import nltk import re from nltk.corpus import stopwords from nltk.tokenize import word_tokenize teareview_dict['Green Tea'][0].keys() ###Output _____no_output_____ ###Markdown Polarity ScoreUsing TextBlob, I will be creating a polarity score for each review. This is to help weight the reviews if needed. ###Code polarityscore = [] for i in teareview_dict: for j in teareview_dict[i]: for review in j['Tea Reviews']: q = TextBlob(review) polarityscore.append(q.sentiment.polarity) j['Polarity']=polarityscore polarityscore=[] teareview_dict['Black Tea'][0]['Polarity'] ###Output _____no_output_____ ###Markdown Proportion Flavor Wheel and MouthfeelUsing the flavor wheel found online, I will create a flavor profile for each tea. I will need to use 1 and 2 n-grams and the tastingflavors dictionary.I also want to get any 'mouthfeel' data to see what added bonus it could give to the tea. ###Code #creating flavor profile based on the tastingflavors dict, stored as proportions def getProp(text1): text1=TextBlob(text1) count=0 tot_count=0 proportion_dict = {} for i in tastingflavors: count=0 adjlist=[] for j in text1.tags: if j[1]=='JJ' or j[1]=='JJR'or j[1]=='JJS' or j[1]=='NN'or j[1]=='NNP'or j[1]=='NNS': abb = re.sub("y","", i[0]) if(i[0] !=abb): adjlist.append(abb.lower()) adjlist.append(j[0].lower()) else: adjlist.append(j[0].lower()) for k in adjlist: if k in tastingflavors[i]: count+=1 for k in text1.ngrams(n=2): if ' '.join(k.lower()) in tastingflavors[i]: count+=1 tot_count+=count proportion_dict[i]=count if tot_count!=0: for i in proportion_dict: proportion_dict[i] = proportion_dict[i]/tot_count return proportion_dict, adjlist #function pulling out mouthfeel data and filtering out unnecessary data, input is the revlist def getReviews(revlist): supertext="" mouthfeel = '' for i in revlist: supertext += (' '+ i) if re.findall(r"([^[.!]]*?mouthfeel[^.]*\.)",i): mouthfeel=' '.join((re.findall(r"([^.!,]*?mouthfeel[^.!,]*\.)",i))) supertext = re.sub("[’,;:–…]","", supertext).replace("(", '').replace(".", ' ').replace("!", ' ').replace(")", '') supertext= re.sub("(-)"," ", supertext) supertext= re.sub("(chocolate)","cocoa", supertext) word_tokens = word_tokenize(supertext) sentence = '' mouthblob = TextBlob(mouthfeel) mouthadj = [] for i in mouthblob.tags: if i[1]=='JJ' or i[1]=='JJR'or i[1]=='JJS': mouthadj.append(i[0]) filtered_sentence = [w for w in word_tokens if not w in stop_words] for i in filtered_sentence: sentence += (' '+i) return supertext, mouthadj #creating a flavor profile from both reviews and flavors filled in by customers, also makign a mouthfeel profile if found newdictlist = [] for i in teareview_dict: newdictlist = [] for j in range(len(teareview_dict[i])): flavtext = '' reviewtext, mouthfeel= getReviews(teareview_dict[i][j]['Tea Reviews']) proportions = getProp(reviewtext) if teareview_dict[i][j]['Tea Flavors']: flavtext = teareview_dict[i][j]['Tea Flavors'] custproportions= getProp(flavtext) dict2 = teareview_dict[i][j].copy() dict2['Mouthfeel']=mouthfeel dict2['Flavor Profile Cust']=custproportions dict2['Flavor Profile Reviews']=proportions dict2['Reviews Supertext']=reviewtext newdictlist.append(dict2) teareview_dict[i] = newdictlist """with open('totstea_data.pkl', 'wb') as picklefile: pickle.dump(teareview_dict, picklefile)""" from pprint import pprint for i in teareview_dict['Green Tea']: pprint(i) ###Output _____no_output_____ ###Markdown KMeansUsing Kmeans to cluster my data to create recommendations based on an input vector. ###Code #Importing important tools for clustering with scikit-learn from sklearn.feature_extraction.text import CountVectorizer from sklearn.preprocessing import Normalizer from sklearn.cluster import KMeans, MiniBatchKMeans from sklearn.cluster import KMeans from sklearn.utils import shuffle from sklearn import preprocessing from sklearn import metrics from sklearn.metrics.pairwise import euclidean_distances from sklearn.metrics import pairwise_distances from sklearn.metrics import silhouette_score from surprise import SVD from surprise import Dataset from surprise.model_selection import cross_validate vect = CountVectorizer(max_df=.95, min_df=2) tsvd = SVD() #reading a pickle file reading to pick up where i left off in case something fails or i start over with open("/Users/deven/Documents/pickleddata/projectfletcher/totstea_data.pkl", 'rb') as picklefile: teareview_dict = pickle.load(picklefile) #stacking dictionaries into a dataframe teadf = pd.DataFrame() for i in teareview_dict: newdf = pd.DataFrame.from_dict(teareview_dict[i]) teadf=pd.concat([teadf,newdf],ignore_index=True) #creating falvor profile df teaflavdf = pd.DataFrame(list(teadf['Flavor Profile Cust'])) #combining dataframes teadf.reset_index(drop=True,inplace=True) teadf = pd.concat([teadf,teaflavdf], axis=1) teadf.drop('Flavor Profile Reviews', axis=1, inplace=True) #copying df to experiment with playset = teaflavdf.copy() #teaflavdf=pd.concat([teaflavdf,teaflavdf2], axis=1) #initializing KMeans km = KMeans(n_clusters = 14) km.fit(playset) #initializing important variables mu_digits = km.cluster_centers_ kmlabels = km.labels_ custpref = [ 0, 1.17647059e-02, 0, 0, 0, 0, -4.33680869e-19, 6.93889390e-18, 1.35525272e-20,\ 4.33680869e-19, 5.98930481e-02, 3.46944695e-18, 7.76470588e-02, 6.93889390e-18, 0, 8.50695187e-01, 8.67361738e-19] #defining a function to find the closest teas to a specific flavor profile based on euclidean distance, returns (index, dist) pair def Rec(labels, clstr,cust): clustlist = [] tearecs=[] teaind=[] for ind, i in enumerate(labels): if i ==clstr: clustlist.append(ind) newdf= playset.iloc[clustlist,:] for i in range(len(newdf)): tearecs.append((newdf.index[i],sum(euclidean_distances([newdf.iloc[i,:]], [cust]))/len(euclidean_distances([newdf.iloc[i,:]], [cust])))) mindist = sorted(tearecs) return tearecs tearecs = Rec(kmlabels,km.predict([custpref])[0],custpref) #Defining a function that takes tea rec (index,dist), then pulls the tea names based on smallest dist values def getTeaNames(tearec): teanames = [] mindist = sorted(tearec, key=lambda x:x[1]) teanames = [w[0] for w in mindist[:3]] teanames = teadf.iloc[teanames,:]['Tea Name'] return teanames teanames = getTeaNames(tearecs) teanames ###Output _____no_output_____ ###Markdown Determining amount of ClustersUsing the silhoutte score to find the optimal cluster amount. Also, testing out which clustering method is the best for my dataset. ###Code #importing functions from sklearn.cluster import SpectralClustering, AgglomerativeClustering #initializing functions sc = SpectralClustering() ac = AgglomerativeClustering() db.labels_ #using Spectral clustering, find the best silhouette score based on increasing k values Sil_coefs = [] for k in range(2,20): sc = SpectralClustering(n_clusters = k) sc.fit(teaflavdf) labels = sc.labels_ Sil_coefs.append(metrics.silhouette_score(teaflavdf, labels, metric='euclidean')) fig, ax1 = plt.subplots(1,1, figsize=(15,5), sharex=True) k_clusters = range(2,20) ax1.plot(k_clusters, Sil_coefs) ax1.plot(k_clusters, sc.inertia_) ax1.set_title('Spectral Cluster') ax1.set_xlabel('number of clusters') ax1.set_ylabel('silhouette coefficient') plt.xticks(np.arange(2, 20, step=2)) #using agglomerative clustering, find the best silhouette score based on increasing k values Sil_coefs = [] for k in range(2,20): ac = AgglomerativeClustering(n_clusters = k) ac.fit(teaflavdf) labels = ac.labels_ Sil_coefs.append(metrics.silhouette_score(teaflavdf, labels, metric='euclidean')) fig, ax1 = plt.subplots(1,1, figsize=(15,5), sharex=True) k_clusters = range(2,20) ax1.plot(k_clusters, Sil_coefs) ax1.set_xlabel('number of clusters') ax1.set_ylabel('silhouette coefficient') ax1.set_title('Agg Cluster') plt.xticks(np.arange(2, 20, step=2)) #using KMeans clustering, find the best silhouette score based on increasing k values Sil_coefs = [] for k in range(2,20): km = KMeans(n_clusters=k, random_state=1) km.fit(teaflavdf) labels = km.labels_ Sil_coefs.append(metrics.silhouette_score(teaflavdf, labels, metric='euclidean')) fig, ax1 = plt.subplots(1,1, figsize=(15,5), sharex=True) k_clusters = range(2,20) ax1.plot(k_clusters, Sil_coefs) ax1.set_xlabel('number of clusters') ax1.set_title('KMeans Cluster') ax1.set_ylabel('silhouette coefficient') plt.xticks(np.arange(2, 20, step=2)) ###Output _____no_output_____ ###Markdown Flavor Profile PCALooking at the distribution of flavor profile data if fitted to 2 features ###Code teadf = teadf.set_index('Tea Name') reviewvect = vect.fit_transform(teadf[['Reviews Supertext','Tea Flavors']]) #flavvect = vect.fit_transform(teadf['Tea Flavors']) pd.DataFrame(reviewvect.toarray(), index=example, columns=vectorizer.get_feature_names()).head(10) dtm = dtm.asfptype() from sklearn.decomposition import PCA pca = PCA(n_components=2) principalComponents = pca.fit_transform(teaflavdf) X = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2']) plt.scatter(X['principal component 1'], X['principal component 2'], c=kmlabels, s=50, cmap='viridis') centers = km.cluster_centers_ plt.xlabel('PC1') plt.ylabel('PC2') principalDf.columns ###Output _____no_output_____ ###Markdown SVD with Surprise ###Code import pandas as pd from surprise import NormalPredictor from surprise import SVDpp,SVD from surprise import Dataset from surprise import Reader from surprise.model_selection import cross_validate import nltk import re from nltk.corpus import stopwords from nltk.tokenize import word_tokenize userdf.head() #creating a df for surprise analysis from userdf teascore_list=[] teauser_list=[] teaname_list=[] for user,i in zip(userdf['Tea Names'], userdf.index): for ind,j in enumerate(user): teascore= userdf.loc[i,'Score'][ind] if teascore=='/span': teascore=0 teascore_list.append(teascore) teauser_list.append(i) teaname_list.append(re.sub('[!@#$\'\",]', '', j)) newdf=pd.DataFrame({'Tea Name': teaname_list, 'Score': teascore_list, 'User Name': teauser_list }) """with open('surprise_data.pkl', 'wb') as picklefile: pickle.dump(newdf, picklefile)""" #adding names and classes from survey names = ['maya','THE Jonathan', 'Kelly', 'Amy', 'Sakura', 'Dan','Anonymous','Travis', 'Chad', 'the_og_jonathan','Vicky', 'Cyrus', 'Deven'] teas = ['Irish Breakfast','Earl Grey', 'Pre Rain Organic Dragon Well Supreme (Long Jing)', 'supreme pu-erh', 'Loose leaf white teas', 'Gyokuro', 'Chai',\ 'Peppermint Tea', 'chamomile','rishi tropical hibiscus', 'organic english breakfast','jasmine dragon pearls'] teas #adding classmate scores, all of this needed to be hard coded classrate=[] classrate=[[55,95,25,45,0,25,85,90,0,0,0,0],[5,75,95,20,80,25,85,25,25,0,80,0], [75,95,85,85,55,85,65,0,95,0,0,0], \ [95,35,65,0,0,35,5,0,0,85,0,0],[95,75,65,55,45,55,15,0,0,0,0,0],[45,55,55,15,15,55,65,75,0,0,0,0],\ [95,95,0,15,0,0,95,0,0,0,0,0], [95,95,25,35,0,25,75,0,0,0,95,0], [55,65,85,0,95,75,45,0,0,0,0,0],\ [55,55,75,45,75,75,95,95,0,0,0,0],[35,35,95,95,45,95,95,0,0,0,0,90],[65,65,55,0,15,85,95,0,0,0,0,0],\ [35,75,86,55,70,85,75,85,85,65,40,90]] for index,i in enumerate(names): for ind, k in enumerate(classrate[index]): newdf = pd.concat([newdf,pd.DataFrame([[k,teas[ind], i]], columns = ['Score', 'Tea Name', 'User Name'])], ignore_index=True) # A reader is still needed but only the rating_scale param is requiered. reader = Reader(rating_scale=(0, 100)) algo=SVD() # The columns must correspond to user id, item id and ratings (in that order). data = Dataset.load_from_df(newdf[['User Name', 'Tea Name', 'Score']], reader) # We can now use this dataset as we please, e.g. calling cross_validate cross_validate(NormalPredictor(), data, cv=4) #SVD is a better predictor, albeit still a bit off cross_validate(algo, data, cv=4) from collections import defaultdict def get_top_n(predictions, n=3): '''Return the top-N recommendation for each user from a set of predictions. Args: predictions(list of Prediction objects): The list of predictions, as returned by the test method of an algorithm. n(int): The number of recommendation to output for each user. Default is 10. Returns: A dict where keys are user (raw) ids and values are lists of tuples: [(raw item id, rating estimation), ...] of size n. ''' # First map the predictions to each user. top_n = defaultdict(list) for uid, iid, true_r, est, _ in predictions: top_n[uid].append((iid, est)) # Then sort the predictions for each user and retrieve the k highest ones. for uid, user_ratings in top_n.items(): user_ratings.sort(key=lambda x: x[1], reverse=True) top_n[uid] = user_ratings[:n] return top_n trainset = data.build_full_trainset() algo.fit(trainset) #generating predictions for unrated teas based on what users have rated testset = trainset.build_anti_testset() predictions = algo.test(testset) want= [] for i in predictions: if i[0] in names: want.append(i) top_n = get_top_n(want, n=3) recsdf = pd.DataFrame(top_n) recsdf from nltk.corpus import stopwords from nltk.tokenize import word_tokenize stop_words = stopwords.words('english') stop_words = stop_words + ['the','i','I','a','of',')','\'', 'to', 'it','and','is','this','for', 'but', 'that', 'in', 'my', 'not','husband',\ 'be', 'we', 'are', 'm', 'as', 'just', 'there', 'you','all','with','me', 'few', 'will', 'on','has', 'was','many','last'\ '''()''', "'",'!','.','It',',', '-',':','Thanksgiving','tea','Im','youll','Ive','Its','Also','A','As','This','cant','anybody',\ 'go','one','everybody','dont', 'We', 'us', 'got', 'And'] #adding flavor profiles to allow for a hybrid approach newdictlist = [] dict2={} totsteareviews = [] for j in tea_list: flavtext = '' reviewtext='' dict2={} adjlist=[] flavs = [] reviewtext, mouthfeel= getReviews(j['Tea Reviews']) proportions, adjlist = getProp(reviewtext) if j['Tea Flavors']!='<dd class="empty">Not available': flavtext = j['Tea Flavors'] custproportions, flavs= getProp(flavtext) dict2['Flavor Profile Cust']=custproportions else: dict2['Flavor Profile Cust']=0 dict2['Review Adj'] = adjlist+flavs dict2['Tea Name'] = j['Tea Name'] dict2['Mouthfeel']=mouthfeel dict2['Flavor Profile Reviews']=proportions dict2['Reviews Supertext']=reviewtext totsteareviews.append(reviewtext) newdictlist.append(dict2) """#saving list as it takes forever for it to run with open("newdatalist.pkl", 'wb') as picklefile: pickle.dump(newdictlist,picklefile)""" #reading in the list of users with open('/Users/deven/Documents/pickleddata/projectfletcher/newdatalist.pkl', 'rb') as picklefile: newdictlist = pickle.load(picklefile) newteaprofiledf = pd.DataFrame(newdictlist) #flavor profile Cust is the most accurate newteaprofiledf.head() ###Output _____no_output_____ ###Markdown Hybrid ModelCreating a linear regresssion model to predict the 'actual' predicted rating of teas to counter the 'cold start up' problem in collaborative recommendation systems. ###Code import statsmodels.api as sm import statsmodels.formula.api as smf import patsy from sklearn.linear_model import LinearRegression from sklearn import metrics from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import train_test_split from sklearn.cross_validation import KFold from sklearn.linear_model import ElasticNetCV from sklearn.linear_model import ElasticNet from sklearn.linear_model import Ridge from sklearn.cross_validation import cross_val_score from sklearn.naive_bayes import MultinomialNB from sklearn.naive_bayes import GaussianNB #initializing the get functions to find nearest points def Rec(labels, clstr,cust): clustlist = [] tearecs=[] teaind=[] for ind, i in enumerate(labels): if i ==clstr: clustlist.append(ind) newdf= playset.iloc[clustlist,:] for i in range(len(newdf)): tearecs.append((newdf.index[i],sum(euclidean_distances([newdf.iloc[i,:]], [cust]))/len(euclidean_distances([newdf.iloc[i,:]], [cust])))) mindist = sorted(tearecs) return tearecs def getTeaNames(tearec): teanames = [] mindist = sorted(tearec, key=lambda x:x[1]) teanames = [w[0] for w in mindist[:3]] teanames = teadf.iloc[teanames,:]['Tea Name'] return teanames top_n[] #trying to find which of the top 10 are closest to the other teas the user has tried, based on average, may need to rethink this avetearate = [] tearate = 0 count=0 flag=0 for i in top_n: for k in top_n[i]: userrecs = newdf[newdf['User Name']==i]['Tea Name'] avetearate=[] if len(userrecs) <5: if flag==1: break count=0 for i in userrecs['Tea Name']: eudis=(euclidean_distances(newteaprofiledf[newteaprofiledf['Tea Name']==i]['Flavor Profile Reviews'], newteaprofiledf[newteaprofiledf['Tea Name']==k[0]]['Flavor Profile Reviews'])) tearate +=eudis count+=1 avetearate.append((tearate/count)) tearate=0 flag=1 df1 = newteaprofiledf[['Tea Name', 'Flavor Profile Cust']] hybrid1 = pd.merge(newdf,df1,how='inner') newcols=[] for i in hybrid1['Flavor Profile Cust']: newcols.append(i) inter = pd.DataFrame(newcols) print(len(inter)) print(len(hybrid1)) hybrid1 = pd.concat([hybrid1, inter], axis=1) hybrid1.drop('Flavor Profile Cust', inplace=True, axis=1) hybrid1.head() algopredicts = [] for i,k in zip(hybrid1['Tea Name'],hybrid1['User Name']): algopredicts.append(round(algo.predict(k, i).est)) hybrid1['Algo']=algopredicts hybrid1.sample(10) hybrid1['Algo'] = preprocessing.scale(hybrid1['Algo']) hybrid1['Score'] = preprocessing.scale(hybrid1['Score']) y = hybrid1['Score'] X = hybrid1.drop(['Tea Name','User Name','Score'], axis=1) lr = LinearRegression() lg = LogisticRegression() #hybriddf = pd.DataFeame(predictions) xtrain, xtest, ytrain, ytest = train_test_split(X,y, test_size=.3, random_state=8) lr.fit(xtrain,ytrain) #lg.fit(xtrain,ytrain) lr.score(xtest, ytest) est = ElasticNetCV(l1_ratio = .15, cv=20, n_alphas= 200) est.fit(xtrain,ytrain) est.score(xtest,ytest) #Getting multiplicative error to see my models absolute fit rms = math.sqrt(mean_squared_error(ytest, est.predict(xtest))) math.exp(rms) def diagnostic_plot(x, y): plt.figure(figsize=(20,5)) pred = lr.predict(x) plt.subplot(1, 3, 2) res = y - pred plt.scatter(pred, res) plt.title("Residual plot") plt.xlabel("prediction") plt.ylabel("residuals") plt.subplot(1, 3, 3) #Generates a probability plot of sample data against the quantiles of a # specified theoretical distribution stats.probplot(res, dist="norm", plot=plt) plt.title("Normal Q-Q plot") #Checking residuals and quantile plots #diagnostic_plot(xtest, ytest) ###Output _____no_output_____ ###Markdown Doc2VecExperimenting with Doc2vec to see if there is any relation between tea reviews based on tea type. ###Code import gensim import os import collections import smart_open import random itemdf.head() train=[] test=[] words='' def read_corpus(fname, tokens_only=False): for i in fname: for k,line in enumerate(i): if tokens_only: yield gensim.utils.simple_preprocess(line) else: # For training data, add tags if len(gensim.utils.simple_preprocess(line))<50: yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(line), [k]) len(gensim.utils.simple_preprocess(itemdf['Tea Reviews'][0][0])) train = itemdf['Tea Reviews'][:round(len(itemdf)*.8)] test = itemdf['Tea Reviews'][-round((len(itemdf)*.2)):] train[0] model = gensim.models.doc2vec.Doc2Vec(vector_size=50, min_count=2, epochs=55) train_corpus = list(read_corpus(train)) test_corpus = list(read_corpus(test, tokens_only=True)) model.build_vocab(train_corpus) # Pick a random document from the test corpus and infer a vector from the model doc_id = random.randint(0, len(test_corpus) - 1) inferred_vector = model.infer_vector(test_corpus[doc_id]) sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs)) # Compare and print the most/median/least similar documents from the train corpus print('Test Document ({}): «{}»\n'.format(doc_id, ' '.join(test_corpus[doc_id]))) print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model) for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]: print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words))) #model.save('teadocmodel.bin') model = gensim.models.doc2vec.Doc2Vec.load('teadocmodel.bin') ###Output _____no_output_____ ###Markdown Doc2Vec with BooksUsing doc to vec to see if I can recommend books based on tea flavor profiles. ###Code books = nltk.corpus.gutenberg.fileids() bookt = ['Emma by Jane Austen', 'Persuassion by Jane Austen', 'Sense and Sensibility by Jane Austen',\ 'Poems by William Blake', 'The Little People of the Snow by William Bryant', 'The Adventures of Buster Bear by Thornton Burgress'\ 'Alice in Wonderland by Lewis Carroll','The Ball and the Cross by G.K. Chesterton','The Wisdom of Father Brown by G.K. Chesterton'\ 'The Ball and the Cross by G.K. Chesterton', 'The Parents Assistant by Maria Edgeworth','Moby Dick by Herman Melville',\ 'Paradise Lost by John Milton', 'Shakespeares Works','Shakespeares Works','Shakespeares Works', 'Leaves of Grass by Walt Whitman'] beat = {} for i,k in zip(books, bookt): beat[i]=k doclen = [] train = [] def read_corpus1(fname, tokens_only=False): for i in fname: for k,line in enumerate(i): if tokens_only: train.append(gensim.utils.simple_preprocess(line)) else: # For training data, add tags if len(gensim.utils.simple_preprocess(line))<50: train.append(gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(line), [k])) doclen.append(k) return train, doclen bookrec = gensim.models.doc2vec.Doc2Vec(vector_size=50, min_count=2, epochs=55) bookwords=[] for i in books: book = ' '.join(nltk.corpus.gutenberg.words(i)) bookwords.append(book) btrain,doclen = list(read_corpus1(bookwords)) test_corpus = newteaprofiledf['Review Adj'].values '''with open("doclen.pkl", 'wb') as picklefile: pickle.dump(doclen,picklefile)''' with open('/Users/deven/Documents/pickleddata/projectfletcher/btrain.pkl', 'rb') as picklefile: btrain = pickle.load(picklefile) bookrec.build_vocab(btrain) bookrec = gensim.models.doc2vec.Doc2Vec.load('/Users/deven/Documents/pickleddata/projectfletcher/bookrec.bin') # Pick a random document from the test corpus and infer a vector from the model doc_id = random.randint(0, len(test_corpus) - 1) inferred_vector = bookrec.infer_vector(test_corpus[doc_id]) sims = bookrec.docvecs.most_similar([inferred_vector]) # Compare and print the most/median/least similar documents from the train corpus print('Test Document ({}): «{}»\n'.format(doc_id, ' '.join(test_corpus[doc_id]))) print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % bookrec) for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2)]: print(u'%s %s: \n' % (label, sims[index])) tot=0 for ind, i in enumerate(doclen): tot+=i if sims[0][0]==btrain[ind][1]: rec = nltk.corpus.gutenberg.fileids()[ind-1] break print(rec) def getBookrec(iid): test_corpus = newteaprofiledf[newteaprofiledf['Tea Name']==iid]['Review Adj'].values[0] inferred_vector = bookrec.infer_vector(test_corpus) sims = bookrec.docvecs.most_similar([inferred_vector]) rec='' tot=0 for ind, i in enumerate(doclen): tot+=i if sims[0][0]<tot: rec = bookt[ind-1] break return rec, bookreclist = [] for i in names: teaid= top_n[i][0][0] bookreclist.append(getBookrec(teaid)) print(bookreclist) print(names) import plotly.plotly as py import plotly.graph_objs as go frec=[] srec=[] trec=[] for i in top_n: frec.append(top_n[i][0][0]) srec.append(top_n[i][1][0]) trec.append(top_n[i][2][0]) sims #bookrec.save('bookrec.bin') import plotly.plotly as py import plotly.graph_objs as go import plotly plotly.tools.set_credentials_file(username='djmorcode', api_key='g4D9PR85TaaUkKlH8CWZ') trace = go.Table( header=dict(values=['Name', 'Tea Rec 1','Tea Rec 2','Tea Rec 3','Book Recommendation'], line = dict(color='#7D7F80'), fill = dict(color='#a1c3d1'), align = ['left'] * 5), cells=dict(values=[names,frec,srec,trec,bookreclist], line = dict(color='#7D7F80'), fill = dict(color='#EDFAFF'), align = ['left'] * 5)) layout = dict(width=1000, height=800) data = [trace] fig = dict(data=data, layout=layout) py.iplot(fig, filename = 'styled_table') top_n print(bookreclist) print(names) ###Output _____no_output_____ ###Markdown LDA BooksTrying the recommendation system with LDA for recommending books. ***Work in progress** ###Code from gensim import corpora, models, similarities, matutils cv = CountVectorizer(ngram_range=(1, 2), stop_words=stop_words) cv.fit(bookwords) teawords = cv.transform(newteaprofiledf['Review Adj']) # Creating matrix, then transposing it so the terms are the rows counts = cv.transform(bookwords).transpose() # Convert sparse matrix of counts to a gensim corpus corpus = matutils.Sparse2Corpus(counts) compareset = matutils.Sparse2Corpus(teawords) #saving mapping for later use id2word = dict((v, k) for k, v in count_vectorizer.vocabulary_.items()) id2word = dict((v, k) for k, v in count_vectorizer.vocabulary_.items()) lda = models.LdaModel(corpus=corpus, num_topics=5, minimum_probability=0.03, id2word=id2word, passes=10) lda = models.LdaModel(corpus=corpus, num_topics=5, minimum_probability=0.03, id2word=id2word, passes=10) lda.print_topics() # Transform the docs from the word space to the topic space (like "transform" in sklearn) lda_corpus = lda[corpus] lda_corpus # Store the documents' topic vectors in a list so we can take a peak lda_docs = [doc for doc in lda_corpus] # Check out the document vectors in the topic space for the first 5 documents lda_docs[:] ###Output _____no_output_____ ###Markdown Word2VecTryign my hand at word to vec to see if it will work for a NLP analysis of reviews. ###Code allwords = nltk.corpus.gutenberg.words() len(allwords) def read_corpus(fname): for i in fname: for line in i: if len(gensim.utils.simple_preprocess(line))<50: yield [x for x in gensim.utils.simple_preprocess(line) if len(x)>2] train_corpus = list(read_corpus(train)) test_corpus = list(read_corpus(test)) len(train_corpus) model1 = gensim.models.Word2Vec(train_corpus, size=100, window=5, min_count=1, workers=4,sg=1) #model1.save('teawordmodel.bin') model1 = gensim.models.word2vec.Word2Vec.load('teawordmodel.bin') list(model.wv.vocab.items())[:7] print(model['bright']) # Similarity model.most_similar('mouthfeel' ,topn=8) model.similarity('green','tea') model.n_similarity(['bread', 'dog'], ['cat', 'dog']) model.doesnt_match("rabbit cow raven turtle".split()) ###Output _____no_output_____ ###Markdown Creating wrappersCreating a list of wrappers to copy into a JS file for flask app. ###Code #Giving wrappers a try def p_decorate(func): def func_wrapper(name): return "{"+func(name)+"}," return func_wrapper @p_decorate def getval(string): return 'value: +{0}+,'.format(string)+'\n'+' text: +{0}+'.format(string) convert_text = p_decorate(getval) def loopit(list1): new_list = [] for i in list1: new_list.append(getval(i)) return new_list uniteas = loopit(itemdf['Tea Name'].unique()) from pprint import pprint pprint(uniteas) ###Output _____no_output_____
westeros_gas_ppl.ipynb
###Markdown Add a new technology “gas power plant”- Assume realistic cost parameters and lifetimes for this power plant type (include references your sources in the notebook)- Is there a “sweet spot” of prices on carbon such that coal, wind & gas are used at the same time? IntroductionSince gas power plant is usually supplied with gas we include this within the O&M costs, because we do not want to introduce a whole new fuel type. ###Code import pandas as pd import ixmp import message_ix from message_ix.util import make_df %matplotlib inline mp = ixmp.Platform() ###Output _____no_output_____ ###Markdown Making a clone of the existing scenario 'baseline'Loading the existing scenario 'baseline' and cloning to a new scenario 'gas_ppl' to add the gas power plant ###Code model = 'Westeros Electrified' base = message_ix.Scenario(mp, model=model, scenario='baseline') scen = base.clone(model, 'gas_ppl', 'introduce new gas power plant', keep_solution=False) scen.check_out() country = 'Westeros' model_horizon = scen.set('year') year_df = scen.vintage_and_active_years() vintage_years, act_years = year_df['year_vtg'], year_df['year_act'] ###Output _____no_output_____ ###Markdown Create gas power plant ###Code scen.add_set("technology", 'gas_ppl') base = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'time': 'year', 'unit': '-', } base_output = make_df(base, node_dest=country, time_dest='year') gas_out = make_df(base_output, technology='gas_ppl', commodity='electricity', level='secondary', value=1., unit="GWa") scen.add_par('output', gas_out) ###Output _____no_output_____ ###Markdown Operational Constraints and Parameters Include Capacity factor of 1.0 ###Code base_capacity_factor = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'time': 'year', 'unit': '-', } gas_capacity = 1.0 df = make_df(base_capacity_factor, technology='gas_ppl', value=gas_capacity) scen.add_par('capacity_factor', df) ###Output _____no_output_____ ###Markdown Include lifetime same as coal = 20 ###Code base_technical_lifetime = { 'node_loc': country, 'year_vtg': model_horizon, 'unit': 'y', } df = make_df(base_technical_lifetime, technology='gas_ppl', value=20) scen.add_par('technical_lifetime', df) ###Output _____no_output_____ ###Markdown Technological Diffusion and Contraction ###Code base_growth = { 'node_loc': country, 'year_act': model_horizon, 'time': 'year', 'unit': '-', } df = make_df(base_growth, technology='gas_ppl', value=0.1) scen.add_par('growth_activity_up', df) ###Output _____no_output_____ ###Markdown Defining an Energy MixGas has the same fraction as coal ###Code history = [690] base_activity = { 'node_loc': country, 'year_act': history, 'mode': 'standard', 'time': 'year', 'unit': 'GWa', } demand_per_year = 40 * 12 * 1000 / 8760 historic_demand = 0.85 * demand_per_year grid_efficiency = 0.9 historic_generation = historic_demand / grid_efficiency gas_fraction = 0.6 gas_activity = (1 - gas_fraction) * historic_generation df = make_df(base_activity, technology='gas_ppl', value=gas_activity) scen.add_par('historical_activity', df) base_capacity = { 'node_loc': country, 'year_vtg': history, 'unit': 'GWa', } gas_cap = 1 / 10 / gas_capacity / 2 value = gas_activity * gas_cap df = make_df(base_capacity, technology='gas_ppl', value=value) scen.add_par('historical_new_capacity', df) ###Output _____no_output_____ ###Markdown Investment costs| technology | USD/kw ||---------------|--------------|| Coal | 500 || Wind | 1500 || Bulb | 5 || Gas | 750 | ###Code # in $ / kW (specific investment cost) base_inv_cost = { 'node_loc': country, 'year_vtg': model_horizon, 'unit': 'USD/kW', } df = make_df(base_inv_cost, technology='gas_ppl', value=750) scen.add_par('inv_cost', df) ###Output _____no_output_____ ###Markdown Fixed O&M Costs| technology | USD/kw ||---------------|--------------|| Coal | 30 || Wind | 10 || Gas | 25 | ###Code base_fix_cost = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'unit': 'USD/kWa', } df = make_df(base_fix_cost, technology='gas_ppl', value=25) scen.add_par('fix_cost', df) ###Output _____no_output_____ ###Markdown Variable O&M Costs| technology | USD/kw ||---------------|--------------|| Coal | 30 || Gas | 40 | ###Code base_var_cost = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'time': 'year', 'unit': 'USD/kWa', } df = make_df(base_var_cost, technology='gas_ppl', value=40) scen.add_par('var_cost', df) ###Output _____no_output_____ ###Markdown Run model ###Code scen.commit(comment='introducing gas power plant') scen.solve() scen.var('OBJ')['lvl'] scen.var_list() ###Output _____no_output_____ ###Markdown Results ###Code # Create a Reporter object to describe and carry out reporting # calculations and operations (like plotting) based on `scenario` from message_ix.reporting import Reporter rep = Reporter.from_scenario(scen) # Add keys like "plot activity" to describe reporting operations. # See tutorial/utils/plotting.py from message_ix.util.tutorial import prepare_plots prepare_plots(rep) ###Output _____no_output_____ ###Markdown ActivityHow much energy is generated in each time period from the different potential sources? ###Code # Only show a subset of technologies in the follow plots; # e.g. exclude "bulb" and "grid" rep.set_filters(t=["coal_ppl", "wind_ppl", "gas_ppl"]) # Trigger the calculation and plotting rep.get("plot activity") activity = scen.var('ACT') activity print('Coal:', activity.loc[activity['technology'] == 'coal_ppl']['lvl'].sum()) print('Wind:', activity.loc[activity['technology'] == 'wind_ppl']['lvl'].sum()) print('Gas:', activity.loc[activity['technology'] == 'gas_ppl']['lvl'].sum()) ###Output Coal: 448.439410003166 Wind: 10.350076103500761 Gas: 30.09940278222215 ###Markdown CapacityHow much capacity of each plant is installed in each period? ###Code # Create a different plot. The same filters are still active. rep.get("plot capacity") capacity = scen.var('CAP') capacity print('Coal:', capacity.loc[capacity['technology'] == 'coal_ppl']['lvl'].sum()) print('Wind:', capacity.loc[capacity['technology'] == 'wind_ppl']['lvl'].sum()) print('Gas:', capacity.loc[capacity['technology'] == 'gas_ppl']['lvl'].sum()) ###Output Coal: 448.439410003166 Wind: 28.750211398613228 Gas: 30.09940278222215 ###Markdown New CapacityHow much new capacity of each plant is installed overall? ###Code new_capacity = scen.var('CAP_NEW') new_capacity print('Coal:', new_capacity.loc[new_capacity['technology'] == 'coal_ppl']['lvl'].sum()) print('Wind:', new_capacity.loc[new_capacity['technology'] == 'wind_ppl']['lvl'].sum()) print('Gas:', new_capacity.loc[new_capacity['technology'] == 'gas_ppl']['lvl'].sum()) ###Output Coal: 27.61222925206089 Wind: 0.0 Gas: 0.9874663339360694 ###Markdown ConclusionsThe total costs are decreasing, since with the additional gas power plant there is more freedom and possibility to meet the energy demand. Furthermore, it can be observed that all technologies are at least used.Secondly, the major energy part is generated out of coal which is problematic, especially when there is no new wind power plants built during the decades.We will tackle this issue within the next notebook by introducing an invention of Eddard Stark, namely carbon prices. Close connection to database ###Code mp.close_db() ###Output _____no_output_____
Toby/.ipynb_checkpoints/Fixed_Input-checkpoint.ipynb
###Markdown Load data ###Code scenario = "moons" n_instance = 1000 # number of generated points n_features = 2 if scenario in ("3d", "helix") : X_train, y_train, X_test, y_test, X_valid, y_valid = dataset.get_dataset(n_instance, scenario) else: X_train, y_train, X_test, y_test, X_valid, y_valid = dataset.get_dataset(n_instance, scenario) os.system('mkdir Dataset') os.system('mkdir GANS') os.system('mkdir GANS/Models') os.system('mkdir GANS/Losses') os.system('mkdir GANS/Random_test') export_excel(X_train, 'Dataset/X_train') export_excel(y_train, 'Dataset/y_train') # print(X_train.shape,y_train.shape) X_train = import_excel('Dataset/X_train') y_train = import_excel('Dataset/y_train') print('made dataset') # Preprocessing vars = np.zeros((6,864)) j = 0 for i in range(6): for i2 in range(4): for i3 in range(3): for i4 in range(2): for i5 in range(3): for i6 in range(2): vars[0,j]=i+2 vars[1,j]=i2 vars[2,j]=i3 vars[3,j]=i4 vars[4,j]=i5 vars[5,j]=i6 j = j +1 j = 0#int(sys.argv[1])-1 print(vars[:,j]) n_features = 2 n_var =int(vars[0,j]) latent_spaces = [3,10,50,100] latent_space = 3#int(latent_spaces[int(vars[1,j])]) batchs = [10,100,1000] BATCH_SIZE = 100#int(batchs[int(vars[2,j])]) scales = ['-1-1','0-1'] scaled = '-1-1'#scales[int(vars[3,j])] epochs = 1001 #[1000,10000,10000] # epoch = int(epochs[int(vars[4,j])]) bias = [True,False] use_bias = True#(bias[int(vars[5,j])]) ###Output made dataset [2. 0. 0. 0. 0. 0.] ###Markdown WGAN Preprocessing ###Code wgan = WGAN_Model.WGAN(n_features,latent_space,BATCH_SIZE,n_var,use_bias) train_dataset, scaler, X_train_scaled = wgan.preproc(X_train, y_train, scaled) hist = wgan.train(train_dataset, epochs, scaler, scaled, X_train, y_train) wgan.generator.save('GANS/Models/GAN_'+str(j)) # plot loss print('Loss: ') fig, ax = plt.subplots(1,1, figsize=[10,5]) ax.plot(hist) ax.legend(['loss_gen', 'loss_disc']) #ax.set_yscale('log') ax.grid() plt.tight_layout() plt.savefig('GANS/Losses/GANS_loss'+str(j)+'.png') generator = keras.models.load_model('GANS/Models/GAN_'+str(j)) plt.close() ###Output every time the data shape (100, 2) every time the data shape (100, 2) every time the data shape (100, 2) every time the data shape (100, 2) every time the data shape (100, 2) every time the data shape (100, 2) every time the data shape (100, 2) every time the data shape (100, 2) every time the data shape (100, 2) every time the data shape (100, 2) Epoch 0/1001 critic: 25.568979 - generator: 0.070247 - 3s Epoch 1/1001 critic: 2.688954 - generator: 0.000613 - 0s Epoch 2/1001 critic: 0.176120 - generator: -0.001025 - 1s Epoch 3/1001 critic: 0.034078 - generator: -0.007843 - 0s Epoch 4/1001 critic: 0.023872 - generator: -0.019241 - 0s Epoch 5/1001 critic: 0.030154 - generator: -0.024663 - 0s Epoch 6/1001 critic: 0.019505 - generator: -0.033050 - 1s Epoch 7/1001 critic: 0.025594 - generator: -0.040037 - 0s Epoch 8/1001 critic: 0.018164 - generator: -0.052804 - 0s Epoch 9/1001 critic: 0.022046 - generator: -0.061853 - 0s Epoch 10/1001 critic: 0.023408 - generator: -0.069584 - 0s Epoch 11/1001 critic: 0.019457 - generator: -0.076791 - 0s Epoch 12/1001 critic: 0.015165 - generator: -0.082658 - 0s Epoch 13/1001 critic: 0.027290 - generator: -0.095384 - 0s Epoch 14/1001 critic: 0.037055 - generator: -0.102816 - 0s Epoch 15/1001 critic: 0.022575 - generator: -0.108251 - 0s Epoch 16/1001 critic: 0.020684 - generator: -0.115601 - 0s Epoch 17/1001 critic: 0.017669 - generator: -0.118765 - 0s Epoch 18/1001 critic: 0.017717 - generator: -0.123137 - 0s Epoch 19/1001 critic: 0.024663 - generator: -0.123612 - 0s Epoch 20/1001 critic: 0.034461 - generator: -0.124099 - 0s Epoch 21/1001 critic: 0.027552 - generator: -0.127057 - 0s Epoch 22/1001 critic: 0.032897 - generator: -0.127540 - 0s Epoch 23/1001 critic: 0.021437 - generator: -0.128156 - 0s Epoch 24/1001 critic: 0.022266 - generator: -0.123292 - 0s Epoch 25/1001 critic: 0.022047 - generator: -0.127166 - 0s Epoch 26/1001 critic: 0.029026 - generator: -0.130562 - 0s Epoch 27/1001 critic: 0.026986 - generator: -0.124924 - 0s Epoch 28/1001 critic: 0.019485 - generator: -0.130389 - 0s Epoch 29/1001 critic: 0.025529 - generator: -0.129109 - 0s Epoch 30/1001 critic: 0.030013 - generator: -0.127556 - 0s Epoch 31/1001 critic: 0.029520 - generator: -0.129072 - 0s Epoch 32/1001 critic: 0.025325 - generator: -0.128980 - 0s Epoch 33/1001 critic: 0.029414 - generator: -0.122372 - 0s Epoch 34/1001 critic: 0.029364 - generator: -0.122721 - 0s Epoch 35/1001 critic: 0.025976 - generator: -0.118179 - 0s Epoch 36/1001 critic: 0.030323 - generator: -0.112393 - 0s Epoch 37/1001 critic: 0.027385 - generator: -0.116549 - 0s Epoch 38/1001 critic: 0.027154 - generator: -0.123225 - 0s Epoch 39/1001 critic: 0.029747 - generator: -0.123592 - 0s Epoch 40/1001 critic: 0.031739 - generator: -0.122516 - 0s Epoch 41/1001 critic: 0.025656 - generator: -0.116855 - 0s Epoch 42/1001 critic: 0.024843 - generator: -0.115419 - 0s Epoch 43/1001 critic: 0.028486 - generator: -0.104431 - 0s Epoch 44/1001 critic: 0.024818 - generator: -0.113454 - 0s Epoch 45/1001 critic: 0.026805 - generator: -0.111395 - 0s Epoch 46/1001 critic: 0.022583 - generator: -0.106167 - 0s Epoch 47/1001 critic: 0.027529 - generator: -0.106675 - 0s Epoch 48/1001 critic: 0.033895 - generator: -0.109275 - 0s Epoch 49/1001 critic: 0.033593 - generator: -0.112387 - 0s Epoch 50/1001 critic: 0.025051 - generator: -0.115361 - 0s Epoch 51/1001 critic: 0.027259 - generator: -0.114793 - 0s Epoch 52/1001 critic: 0.025210 - generator: -0.119652 - 0s Epoch 53/1001 critic: 0.027581 - generator: -0.112108 - 0s Epoch 54/1001 critic: 0.026654 - generator: -0.115012 - 0s Epoch 55/1001 critic: 0.022277 - generator: -0.113917 - 1s Epoch 56/1001 critic: 0.037606 - generator: -0.118175 - 0s Epoch 57/1001 critic: 0.031411 - generator: -0.117958 - 0s Epoch 58/1001 critic: 0.026676 - generator: -0.123886 - 0s Epoch 59/1001 critic: 0.026181 - generator: -0.130423 - 0s Epoch 60/1001 critic: 0.028910 - generator: -0.135358 - 0s Epoch 61/1001 critic: 0.032483 - generator: -0.126818 - 0s Epoch 62/1001 critic: 0.022315 - generator: -0.132630 - 0s Epoch 63/1001 critic: 0.027036 - generator: -0.132911 - 0s Epoch 64/1001 critic: 0.030786 - generator: -0.125757 - 0s Epoch 65/1001 critic: 0.028733 - generator: -0.125349 - 0s Epoch 66/1001 critic: 0.029621 - generator: -0.122990 - 0s Epoch 67/1001 critic: 0.031012 - generator: -0.121220 - 1s Epoch 68/1001 critic: 0.028020 - generator: -0.132084 - 0s Epoch 69/1001 critic: 0.024225 - generator: -0.115723 - 0s Epoch 70/1001 critic: 0.030966 - generator: -0.129223 - 0s Epoch 71/1001 critic: 0.023838 - generator: -0.126994 - 0s Epoch 72/1001 critic: 0.027896 - generator: -0.130682 - 0s Epoch 73/1001 critic: 0.027139 - generator: -0.129798 - 0s Epoch 74/1001 critic: 0.029060 - generator: -0.131594 - 0s Epoch 75/1001 critic: 0.034973 - generator: -0.131477 - 0s Epoch 76/1001 critic: 0.023300 - generator: -0.135917 - 0s Epoch 77/1001 critic: 0.026047 - generator: -0.134290 - 0s Epoch 78/1001 critic: 0.029338 - generator: -0.125773 - 0s Epoch 79/1001 critic: 0.028433 - generator: -0.131305 - 0s Epoch 80/1001 critic: 0.019193 - generator: -0.124576 - 0s Epoch 81/1001 critic: 0.018784 - generator: -0.119864 - 0s Epoch 82/1001 critic: 0.023357 - generator: -0.119690 - 0s Epoch 83/1001 critic: 0.027060 - generator: -0.124522 - 0s Epoch 84/1001 critic: 0.035080 - generator: -0.122195 - 0s Epoch 85/1001 critic: 0.016614 - generator: -0.127656 - 0s Epoch 86/1001 critic: 0.021245 - generator: -0.125087 - 0s Epoch 87/1001 critic: 0.028238 - generator: -0.134848 - 0s Epoch 88/1001 critic: 0.027495 - generator: -0.140637 - 0s Epoch 89/1001 critic: 0.036221 - generator: -0.139536 - 0s Epoch 90/1001 critic: 0.035780 - generator: -0.142084 - 1s Epoch 91/1001 critic: 0.024387 - generator: -0.137261 - 0s Epoch 92/1001 critic: 0.033898 - generator: -0.139775 - 0s Epoch 93/1001 critic: 0.024793 - generator: -0.141957 - 0s Epoch 94/1001 critic: 0.031182 - generator: -0.140327 - 0s Epoch 95/1001 critic: 0.021342 - generator: -0.145852 - 0s Epoch 96/1001 critic: 0.019208 - generator: -0.151455 - 0s Epoch 97/1001 critic: 0.031263 - generator: -0.157938 - 1s Epoch 98/1001 critic: 0.031839 - generator: -0.163076 - 0s Epoch 99/1001 critic: 0.020708 - generator: -0.149761 - 0s Epoch 100/1001 critic: 0.029246 - generator: -0.157890 - 0s Epoch 101/1001 critic: 0.033402 - generator: -0.152588 - 0s Epoch 102/1001 critic: 0.024386 - generator: -0.147543 - 0s Epoch 103/1001 critic: 0.032790 - generator: -0.146951 - 0s Epoch 104/1001 critic: 0.027668 - generator: -0.141939 - 0s Epoch 105/1001 critic: 0.023909 - generator: -0.140643 - 0s Epoch 106/1001 critic: 0.029838 - generator: -0.143357 - 0s Epoch 107/1001 critic: 0.026188 - generator: -0.141938 - 0s Epoch 108/1001 critic: 0.037015 - generator: -0.149648 - 0s Epoch 109/1001 critic: 0.039801 - generator: -0.142260 - 0s Epoch 110/1001 critic: 0.030545 - generator: -0.140615 - 0s Epoch 111/1001 critic: 0.029042 - generator: -0.131273 - 0s Epoch 112/1001 critic: 0.024143 - generator: -0.136179 - 0s Epoch 113/1001 critic: 0.042132 - generator: -0.137109 - 0s Epoch 114/1001 critic: 0.028055 - generator: -0.132523 - 0s Epoch 115/1001 critic: 0.025088 - generator: -0.131276 - 0s Epoch 116/1001 critic: 0.024487 - generator: -0.127451 - 0s Epoch 117/1001 critic: 0.025621 - generator: -0.122053 - 0s Epoch 118/1001 critic: 0.031852 - generator: -0.125088 - 0s Epoch 119/1001 critic: 0.031111 - generator: -0.133959 - 0s Epoch 120/1001 critic: 0.024644 - generator: -0.133664 - 0s Epoch 121/1001 critic: 0.028217 - generator: -0.126505 - 0s Epoch 122/1001 critic: 0.026509 - generator: -0.124533 - 0s Epoch 123/1001 critic: 0.021606 - generator: -0.113439 - 0s Epoch 124/1001 critic: 0.022321 - generator: -0.112446 - 0s Epoch 125/1001 critic: 0.026713 - generator: -0.106153 - 0s Epoch 126/1001 critic: 0.030266 - generator: -0.109716 - 0s Epoch 127/1001 critic: 0.029438 - generator: -0.107248 - 0s Epoch 128/1001 critic: 0.031368 - generator: -0.109164 - 1s Epoch 129/1001 critic: 0.022176 - generator: -0.109392 - 1s Epoch 130/1001 critic: 0.030306 - generator: -0.107703 - 1s Epoch 131/1001 critic: 0.030922 - generator: -0.112434 - 0s Epoch 132/1001 critic: 0.029925 - generator: -0.112389 - 1s Epoch 133/1001 ###Markdown Prediction ###Code latent_values = tf.random.normal([1000, latent_space], mean=0.0, stddev=0.1) predicted_values = wgan.generator.predict(latent_values) if scaled == '-1-1': predicted_values[:,:]=(predicted_values[:,:]) predicted_values = scaler.inverse_transform(predicted_values) elif scaled =='0-1': predicted_values = scaler.inverse_transform(predicted_values) plt.plot(X_train,y_train,'o') plt.plot(predicted_values[:,0],predicted_values[:,1],'o') plt.show() x_input = [-1, 0, 0.5, 1.5] n_points = 20 y_min = -0.75 y_max = 1 # produces an input of fixed x coordinates with random y values predict1 = np.full((n_points//4, 2), x_input[0]) predict2 = np.full((n_points//4, 2), x_input[1]) predict3 = np.full((n_points//4, 2), x_input[2]) predict4 = np.full((n_points//4, 2), x_input[3]) predictthis = np.concatenate((predict1, predict2, predict3, predict4)) for n in range(n_points): predictthis[n,1] = random.uniform(y_min, y_max) # np.random.shuffle(predictthis) X_generated = wgan.predict(predictthis, scaler, scaled) plt.title("Prediction at x = -1, 0, 1.5") plt.scatter(X_train, y_train, label="Training data") #plt.scatter(predictthis[:,0], predictthis[:,1], label="Sample data", c="pink") plt.scatter(X_generated[:,0], X_generated[:,1], label="Fixed Input Prediction") plt.legend(loc='upper right') plt.tight_layout() plt.xlabel("x") plt.ylabel("y") mse = tf.keras.losses.MeanSquaredError() optimizer = tf.keras.optimizers.Adam(1e-2) def mse_loss(inp, outp): """ Calculates the MSE loss between the x-coordinates """ inp = tf.reshape(inp, [-1, n_features]) outp = tf.reshape(outp, [-1, n_features]) return mse(inp[:,0], outp[:,0]) def opt_step(latent_values, real_coding): """ Minimizes the loss between generated point and inputted point """ with tf.GradientTape() as tape: tape.watch(latent_values) gen_output = wgan.generator(latent_values, training=True) loss = mse_loss(real_coding, gen_output) gradient = tape.gradient(loss, latent_values) optimizer.apply_gradients(zip([gradient], [latent_values])) return loss def optimize_coding(real_coding): """ Optimizes the latent space values """ latent_values = tf.random.normal([20, latent_space], mean=0.0, stddev=0.1) latent_values = tf.Variable(latent_values) loss = [] for epoch in range(1000): loss.append(opt_step(latent_values, real_coding).numpy()) return latent_values def predict(input_data_unscaled, scaler, scaled): """ Optimizes the latent space of the input then produces a prediction from the generator. """ input_data = scaler.fit_transform(input_data_unscaled) predicted_vals = np.zeros((1, n_features)) for n in range(len(input_data)): print("Optimizing latent space for point ", n, " / ", len(input_data)) real_coding = input_data[n] real_coding = tf.constant(real_coding) real_coding = tf.cast(real_coding, dtype=tf.float32) latent_values = optimize_coding(real_coding) predicted_vals_1 = scaler.inverse_transform((wgan.generator.predict(tf.convert_to_tensor(latent_values)).reshape(-1, n_features))) # predicted_vals_1 = predicted_vals_1.reshape(1, n_features) predicted_vals = np.concatenate((predicted_vals, predicted_vals_1), axis=0) predicted_vals = predicted_vals[1:,:] return predicted_vals X_generated = predict(predictthis, scaler, scaled) plt.title("Prediction at x = -1, 0, 1.5") plt.scatter(X_train, y_train, label="Training data") plt.scatter(predictthis[:,0], predictthis[:,1], label="Sample data", c="pink") plt.scatter(X_generated[:,0], X_generated[:,1], label="Fixed Input Prediction") plt.legend(loc='upper right') plt.tight_layout() plt.xlabel("x") plt.ylabel("y") ###Output _____no_output_____
Library_design/CTP-04/CTP-04_Check-size_Merge.ipynb
###Markdown CTP-04 Check and Merge Fasta Filesby Pu Zheng ###Code # minimal imports for biopython from Bio import SeqIO from Bio.Seq import Seq from Bio.Alphabet import IUPAC from Bio.SeqRecord import SeqRecord import os,glob,time import numpy as np # Information copied by hand, as a reference probe_len = 142; sub_lib_size = {'chr21': 231435, 'chr21_7by21': 30036, 'chr21_9by36': 25879 } print "- Total expected probes in this library:\n", sum(sub_lib_size.values()) # library directory total_library_folder = r'/n/boslfs/LABS/zhuang_lab/User/pzheng/Libraries/CTP-04/'; # probe subdir pb_subfolder = 'final_probes' #pb_filename = 'filtered_probes.fasta' # after normal check #pb_filename = 'blast_centered_probes.fasta' # after blast pb_filename = 'filtered_blast_centered_probes.fasta' # after blast and extra check # sub folders sub_lib_folders = glob.glob(total_library_folder + os.sep + '*') print "- All folders in library directory:\n", sub_lib_folders # all probe records all_records = []; for fd in sub_lib_folders: if os.path.basename(fd) in sub_lib_size.keys(): _filename = fd + os.sep + pb_subfolder + os.sep + pb_filename; print "-- reading file:",_filename; with open(_filename, 'rU') as handle: for record in SeqIO.parse(handle, "fasta"): all_records.append(record); if len(record.seq) != probe_len: print 'Wrong length!' print "- Acquired probe in this library:", len(all_records) if len(all_records) == sum(sub_lib_size.values()): print "- Library size matches"; output_filename = 'merged_library.fasta'; with open(total_library_folder+os.sep+output_filename, 'w') as output_handle: print "- Writing all probes into", output_filename; SeqIO.write(all_records, output_handle, 'fasta'); ###Output - Writing all probes into merged_library.fasta
AIC/K-NN_with_iris_dataset.ipynb
###Markdown the distance is calculated as$d_{total} = \sqrt{\sum_{n=1}^Nd_n^2}$where N is the dimension of each data element and $d_n$ is the distance on each dimension.In this case, take the square root or not has no impact on the result. Therefore the algorithm could be simplified into$d_{total} = \sum_{n=1}^Nd_n^2$so the function is more efficient ###Code def calculate_distance(input, dataSet, label): dist = [] for i in range(len(dataSet)): d = 0 for j in range(len(dataSet[i])): # dn is dataSet[i][j] - input[j] d += (dataSet[i][j] - input[j]) ** 2 dist.append(d) return np.array(dist) # dist is the array storing 150 points' distance dist = calculate_distance(input, dataSet, labels) print(dist[:5]) # a sorted index list for 150 data points according to the distance to the input point # cut the list and keep the first k elements (smallest k elements) k_dist_sorted_index = dist.argsort()[:k] print(k_dist_sorted_index) # list used to store the labels votes of the input labelsVote = [0 for i in range(len(labelsTypes))] # get the index of first k elements according to k_dist_sorted_index for i in k_dist_sorted_index: # labels[i] is the label stored in index i # since labelsTypes and labelsVote are matched pairs(label index is same) # the nth element in labelsVote is labeled as labelsTypes[n] # therefore labelsVote[i] is the vote of label labelsTypes[n] labelsVote[labelsTypes.index(labels[i])] += 1 print(labelsVote) # find the index of input label in labelsVote inputLabelIndex = labelsVote.index(max(labelsVote)) print(inputLabelIndex) # since labelsVote is matched with labelsTypes, the index could be directly used inputLabel = labelsTypes[inputLabelIndex] inputName = labelsNames[inputLabelIndex] print("the input is labeled as ", inputLabel, "which is", inputName) # set up color scheme for the plot graph # 3 classes -> 3 colors colors = ['b', 'g', 'r'] # set the size of the matplotlib figure plt.rcParams['figure.figsize'] = [15, 15] # plot the data set points for pointIndex in range(len(dataSet)): colorIndex = labelsTypes.index(labels[pointIndex]) plt.subplot(221) plt.scatter(dataSet[pointIndex][1], dataSet[pointIndex][0], s=50, c=colors[colorIndex], alpha=0.5) plt.scatter(input[1], input[0],s=50, c=colors[inputLabelIndex], marker='v', alpha=0.5) plt.xlabel("SepalWidth") plt.ylabel("SepalLength") plt.subplot(222) plt.scatter(dataSet[pointIndex][1], dataSet[pointIndex][2], s=50, c=colors[colorIndex], alpha=0.5) plt.scatter(input[1], input[2],s=50, c=colors[inputLabelIndex], marker='v', alpha=0.5) plt.xlabel("SepalWidth") plt.ylabel("PetalLength") plt.subplot(223) plt.scatter(dataSet[pointIndex][3], dataSet[pointIndex][2], s=50, c=colors[colorIndex], alpha=0.5) plt.scatter(input[3], input[2],s=50, c=colors[inputLabelIndex], marker='v', alpha=0.5) plt.xlabel("PetalWidth") plt.ylabel("PetalLength") plt.subplot(224) plt.scatter(dataSet[pointIndex][3], dataSet[pointIndex][0], s=50, c=colors[colorIndex], alpha=0.5) plt.scatter(input[3], input[0],s=50, c=colors[inputLabelIndex], marker='v', alpha=0.5) plt.xlabel("PetalWidth") plt.ylabel("SepalLength") import seaborn as sns sns.set(style="ticks") df = sns.load_dataset("iris") print(df.head(5)) sns.pairplot(df, hue="species",size=3) ###Output sepal_length sepal_width petal_length petal_width species 0 5.1 3.5 1.4 0.2 setosa 1 4.9 3.0 1.4 0.2 setosa 2 4.7 3.2 1.3 0.2 setosa 3 4.6 3.1 1.5 0.2 setosa 4 5.0 3.6 1.4 0.2 setosa
10_pipeline/stepfunctions/01_Create_Pipeline_Train_and_Deploy_Reviews_BERT_TensorFlow.ipynb
###Markdown NOTE: THIS NOTEBOOK WILL TAKE A 30 MINUTES TO COMPLETE. PLEASE BE PATIENT. Create a Training Pipeline with the Step Functions Data Science SDK ![Step Functions SageMaker Pipeline](img/stepfunctions_graph.png) ###Code from botocore.exceptions import ClientError import os import sagemaker import logging import boto3 import sagemaker import pandas as pd sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name sm = boto3.Session().client(service_name='sagemaker', region_name=region) import stepfunctions import logging from stepfunctions.template.pipeline import TrainingPipeline stepfunctions.set_stream_logger(level=logging.INFO) ###Output _____no_output_____ ###Markdown Create an IAM Execution Role for Step FunctionsWe need a StepFunctionsWorkflowExecutionRole so that you can create and execute workflows in Step Functions. ###Code iam = boto3.Session().client(service_name='iam', region_name=region) sts = boto3.Session().client(service_name='sts', region_name=region) sfn = boto3.Session().client(service_name='stepfunctions', region_name=region) stepfunction_role_name = 'DSOAWS_StepFunctionsExecutionRole' ###Output _____no_output_____ ###Markdown Create an AssumeRolePolicyDocument ###Code assume_role_policy_doc = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "states.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } ###Output _____no_output_____ ###Markdown Create `DSOAWS_StepFunctionsExecutionRole` ###Code import json import time try: iam.create_role( RoleName=stepfunction_role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy_doc), Description='DSOAWS Step Function Workflow Execution Role' ) time.sleep(10) print("Role created.") except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Role already exists. This is OK.") else: print("Unexpected error: %s" % e) ###Output _____no_output_____ ###Markdown Get the Role ARN ###Code stepfunction_role = iam.get_role(RoleName=stepfunction_role_name) stepfunction_role_arn = stepfunction_role['Role']['Arn'] print(stepfunction_role_arn) ###Output _____no_output_____ ###Markdown Add a Policy to the Role Define permissions ###Code stepfunction_permissions = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:CreateTransformJob", "sagemaker:DescribeTransformJob", "sagemaker:StopTransformJob", "sagemaker:CreateTrainingJob", "sagemaker:DescribeTrainingJob", "sagemaker:StopTrainingJob", "sagemaker:CreateHyperParameterTuningJob", "sagemaker:DescribeHyperParameterTuningJob", "sagemaker:StopHyperParameterTuningJob", "sagemaker:CreateModel", "sagemaker:CreateEndpointConfig", "sagemaker:CreateEndpoint", "sagemaker:DeleteEndpointConfig", "sagemaker:DeleteEndpoint", "sagemaker:UpdateEndpoint", "sagemaker:CreateProcessingJob", "sagemaker:DescribeProcessingJob", "sagemaker:ListProcessingJobs", "sagemaker:StopProcessingJob", "sagemaker:ListTags", "lambda:InvokeFunction", "sqs:SendMessage", "sns:Publish", "ecs:RunTask", "ecs:StopTask", "ecs:DescribeTasks", "dynamodb:GetItem", "dynamodb:PutItem", "dynamodb:UpdateItem", "dynamodb:DeleteItem", "batch:SubmitJob", "batch:DescribeJobs", "batch:TerminateJob", "glue:StartJobRun", "glue:GetJobRun", "glue:GetJobRuns", "glue:BatchStopJobRun" ], "Resource": "*" }, { "Effect": "Allow", "Action": [ "iam:PassRole" ], "Resource": "*", "Condition": { "StringEquals": { "iam:PassedToService": "sagemaker.amazonaws.com" } } }, { "Effect": "Allow", "Action": [ "events:PutTargets", "events:PutRule", "events:DescribeRule" ], "Resource": [ "arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTrainingJobsRule", "arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTransformJobsRule", "arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTuningJobsRule", "arn:aws:events:*:*:rule/StepFunctionsGetEventsForECSTaskRule", "arn:aws:events:*:*:rule/StepFunctionsGetEventsForBatchJobsRule", ] } ] } ###Output _____no_output_____ ###Markdown Turn into Policy Object ###Code stepfunction_policy_name = 'DSOAWS_StepFunctionsWorkflowExecutionPolicy' account_id = sts.get_caller_identity()['Account'] import time try: stepfunction_policy = iam.create_policy( PolicyName=stepfunction_policy_name, PolicyDocument=json.dumps(stepfunction_permissions) ) stepfunction_policy_arn = f'arn:aws:iam::{account_id}:policy/{stepfunction_policy_name}' print("Policy created.") except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Policy already exists. Updating policy...") stepfunction_policy_arn = f'arn:aws:iam::{account_id}:policy/{stepfunction_policy_name}' try: stepfunction_policy = iam.create_policy_version( PolicyArn=stepfunction_policy_arn, PolicyDocument=json.dumps(stepfunction_permissions), SetAsDefault=True) print('Policy updated.') except: print('** Policy cannot have more than 5 versions. This is likely OK.') else: print("Unexpected error: %s" % e) print(stepfunction_policy_arn) ###Output _____no_output_____ ###Markdown Attach Policy To Step Function Workflow Execution Role ###Code import time try: response = iam.attach_role_policy( PolicyArn=stepfunction_policy_arn, RoleName=stepfunction_role_name ) print("Done.") except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Policy is already attached. This is OK.") else: print("Unexpected error: %s" % e) import time try: response = iam.attach_role_policy( PolicyArn='arn:aws:iam::aws:policy/service-role/AWSLambdaRole', RoleName=stepfunction_role_name ) print("Done.") except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Policy is already attached. This is OK.") else: print("Unexpected error: %s" % e) import time try: response = iam.attach_role_policy( PolicyArn='arn:aws:iam::aws:policy/CloudWatchEventsFullAccess', RoleName=stepfunction_role_name ) print("Done.") except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Policy is already attached. This is OK.") else: print("Unexpected error: %s" % e) ###Output _____no_output_____ ###Markdown Setup Processing Step![](img/prepare_dataset_bert.png)![](img/processing.jpg) Upload the Processing Script to S3 for the Pipeline to Consume ###Code !pygmentize ./preprocess-scikit-text-to-bert.py import time processing_code_s3_prefix = 'pipeline_sklearn_processing/{}/code'.format(int(time.time())) input_code = sess.upload_data( './preprocess-scikit-text-to-bert.py', bucket=bucket, key_prefix=processing_code_s3_prefix, ) %store processing_code_s3_prefix print(processing_code_s3_prefix) ###Output _____no_output_____ ###Markdown Set the Processing Hyper-Parameters ###Code max_seq_length=64 train_split_percentage=0.90 validation_split_percentage=0.05 test_split_percentage=0.05 balance_dataset=True processing_instance_count=1 processing_instance_type='ml.c5.2xlarge' ###Output _____no_output_____ ###Markdown Specify the Raw Inputs S3 Location ###Code raw_input_data_s3_uri = 's3://{}/amazon-reviews-pds/tsv/'.format(bucket) print(raw_input_data_s3_uri) !aws s3 ls $raw_input_data_s3_uri from sagemaker.sklearn.processing import SKLearnProcessor processor = SKLearnProcessor(framework_version='0.20.0', role=role, instance_type=processing_instance_type, instance_count=processing_instance_count, max_runtime_in_seconds=7200) ###Output _____no_output_____ ###Markdown Setup Training Step![](img/bert_training.png) Show Training Script ###Code !pygmentize src/tf_bert_reviews.py ###Output _____no_output_____ ###Markdown Setup Training Hyper-ParametersNote that `max_seq_length` is re-used from the processing hyper-parameters above ###Code epochs=3 learning_rate=0.00001 epsilon=0.00000001 train_batch_size=128 validation_batch_size=128 test_batch_size=128 train_steps_per_epoch=100 validation_steps=100 test_steps=100 train_instance_count=1 train_instance_type='ml.c5.9xlarge' train_volume_size=1024 use_xla=True use_amp=True freeze_bert_layer=False enable_sagemaker_debugger=False enable_checkpointing=False enable_tensorboard=False input_mode='File' run_validation=True run_test=True run_sample_predictions=True deploy_instance_count=1 # deploy_instance_type='ml.m5.4xlarge' deploy_instance_type='ml.m5.large' # bur ###Output _____no_output_____ ###Markdown Setup Metrics To Track Model Performance ###Code metrics_definitions = [ {'Name': 'train:loss', 'Regex': 'loss: ([0-9\\.]+)'}, {'Name': 'train:accuracy', 'Regex': 'accuracy: ([0-9\\.]+)'}, {'Name': 'validation:loss', 'Regex': 'val_loss: ([0-9\\.]+)'}, {'Name': 'validation:accuracy', 'Regex': 'val_accuracy: ([0-9\\.]+)'}, ] ###Output _____no_output_____ ###Markdown Setup Estimator ###Code from sagemaker.tensorflow import TensorFlow estimator = TensorFlow(entry_point='tf_bert_reviews.py', source_dir='src', role=role, instance_count=train_instance_count, # Make sure you have at least this number of input files or the ShardedByS3Key distibution strategy will fail the job due to no data available instance_type=train_instance_type, volume_size=train_volume_size, py_version='py3', framework_version='2.1.0', hyperparameters={'epochs': epochs, 'learning_rate': learning_rate, 'epsilon': epsilon, 'train_batch_size': train_batch_size, 'validation_batch_size': validation_batch_size, 'test_batch_size': test_batch_size, 'train_steps_per_epoch': train_steps_per_epoch, 'validation_steps': validation_steps, 'test_steps': test_steps, 'use_xla': use_xla, 'use_amp': use_amp, 'max_seq_length': max_seq_length, 'freeze_bert_layer': freeze_bert_layer, 'enable_sagemaker_debugger': enable_sagemaker_debugger, 'enable_checkpointing': enable_checkpointing, 'enable_tensorboard': enable_tensorboard, 'run_validation': run_validation, 'run_test': run_test, 'run_sample_predictions': run_sample_predictions}, input_mode=input_mode, metric_definitions=metrics_definitions, # max_run=7200 # max 2 hours * 60 minutes seconds per hour * 60 seconds per minute ) ###Output _____no_output_____ ###Markdown Setup the Deploy![](img/realtime_inference.png) Setup Pipeline with the Step Functions SDKA typical task for a data scientist is to train a model and deploy that model to an endpoint. Without the Step Functions SDK, this is a four step process on SageMaker that includes the following.1. Training the model2. Creating the model on SageMaker3. Creating an endpoint configuration4. Deploying the trained model to the configured endpointThe Step Functions SDK provides the [TrainingPipeline](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/pipelines.htmlstepfunctions.template.pipeline.train.TrainingPipeline) API to simplify this procedure. The following configures `pipeline` with the necessary parameters to define a training pipeline. ###Code import time timestamp = int(time.time()) pipeline_name = 'bert-pipeline-{}'.format(timestamp) print('Pipeline name {}'.format(pipeline_name)) from __future__ import absolute_import from sagemaker.utils import base_name_from_image from sagemaker.sklearn.estimator import SKLearn from sagemaker.model import Model from sagemaker.pipeline import PipelineModel from sagemaker.processing import ProcessingInput, ProcessingOutput from sagemaker.inputs import TrainingInput from stepfunctions.steps import ( TrainingStep, TransformStep, ModelStep, EndpointConfigStep, EndpointStep, Chain, Fail, Catch, ProcessingStep ) from stepfunctions.workflow import Workflow from stepfunctions.template.pipeline.common import WorkflowTemplate from stepfunctions.template.pipeline.common import StepId class TrainingPipelineWithDifferentDeployInstanceTypeAndProcessingJob(WorkflowTemplate): """ Creates a standard training pipeline with the following steps in order: 1. Train estimator 2. Create estimator model 3. Endpoint configuration 4. Deploy model """ __allowed_kwargs = ('pipeline_name',) def __init__(self, processor, raw_input_data_s3_uri, train_split_percentage, validation_split_percentage, test_split_percentage, max_seq_length, balance_dataset, estimator, role, bucket, client, deploy_instance_count, deploy_instance_type, **kwargs): """ Args: estimator (sagemaker.estimator.EstimatorBase): The estimator to use for training. Can be a BYO estimator, Framework estimator or Amazon algorithm estimator. role (str): An AWS IAM role (either name or full Amazon Resource Name (ARN)). This role is used to create, manage, and execute the Step Functions workflows. inputs: Information about the training data. Please refer to the `fit()` method of the associated estimator, as this can take any of the following forms: * (str) - The S3 location where training data is saved. * (dict[str, str] or dict[str, `sagemaker.session.s3_input`]) - If using multiple channels for training data, you can specify a dict mapping channel names to strings or `sagemaker.session.s3_input` objects. * (`sagemaker.session.s3_input`) - Channel configuration for S3 data sources that can provide additional information about the training dataset. See `sagemaker.session.s3_input` for full details. * (`sagemaker.amazon.amazon_estimator.RecordSet`) - A collection of Amazon `Record` objects serialized and stored in S3. For use with an estimator for an Amazon algorithm. * (list[`sagemaker.amazon.amazon_estimator.RecordSet`]) - A list of `sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is a different channel of training data. bucket (str): S3 bucket under which the output artifacts from the training job will be stored. The parent path used is built using the format: ``s3://{bucket}/{pipeline_name}/models/{job_name}/``. In this format, `pipeline_name` refers to the keyword argument provided for TrainingPipeline. If a `pipeline_name` argument was not provided, one is auto-generated by the pipeline as `training-pipeline-<timestamp>`. Also, in the format, `job_name` refers to the job name provided when calling the :meth:`TrainingPipeline.run()` method. client (SFN.Client, optional): boto3 client to use for creating and interacting with the training pipeline in Step Functions. (default: None) Keyword Args: pipeline_name (str, optional): Name of the pipeline. This name will be used to name jobs (if not provided when calling execute()), models, endpoints, and S3 objects created by the pipeline. If a `pipeline_name` argument was not provided, one is auto-generated by the pipeline as `training-pipeline-<timestamp>`. (default:None) """ self.processor = processor self.raw_input_data_s3_uri = raw_input_data_s3_uri self.train_split_percentage = train_split_percentage self.validation_split_percentage = validation_split_percentage self.test_split_percentage = test_split_percentage self.max_seq_length = max_seq_length self.balance_dataset = balance_dataset self.estimator = estimator self.role = role self.bucket = bucket self.deploy_instance_count = deploy_instance_count self.deploy_instance_type = deploy_instance_type for key in self.__class__.__allowed_kwargs: setattr(self, key, kwargs.pop(key, None)) if not self.pipeline_name: self.__pipeline_name_unique = True self.pipeline_name = 'training-pipeline-{date}'.format(date=self._generate_timestamp()) self.definition = self.build_workflow_definition() self.input_template = self._extract_input_template(self.definition) workflow = Workflow(name=self.pipeline_name, definition=self.definition, role=role, format_json=True, client=client) super(TrainingPipelineWithDifferentDeployInstanceTypeAndProcessingJob, self).__init__(s3_bucket=bucket, workflow=workflow, role=role, client=client) def build_workflow_definition(self): """ Build the workflow definition for the training pipeline with all the states involved. Returns: :class:`~stepfunctions.steps.states.Chain`: Workflow definition as a chain of states involved in the the training pipeline. """ processing_inputs=[ ProcessingInput( input_name='raw_input', source=raw_input_data_s3_uri, destination='/opt/ml/processing/input/data/', s3_data_distribution_type='ShardedByS3Key' ), ProcessingInput( input_name='code', source=input_code, destination='/opt/ml/processing/input/code', ) ] processed_train_data_s3_uri = 's3://{}/{}/processing/output/bert-train'.format(self.bucket, self.pipeline_name) processed_validation_data_s3_uri = 's3://{}/{}/processing/output/bert-validation'.format(self.bucket, self.pipeline_name) processed_test_data_s3_uri = 's3://{}/{}/processing/output/bert-test'.format(self.bucket, self.pipeline_name) processing_outputs=[ ProcessingOutput(s3_upload_mode='EndOfJob', output_name='bert-train', source='/opt/ml/processing/output/bert/train', destination=processed_train_data_s3_uri ), ProcessingOutput(s3_upload_mode='EndOfJob', output_name='bert-validation', source='/opt/ml/processing/output/bert/validation', destination=processed_validation_data_s3_uri ), ProcessingOutput(s3_upload_mode='EndOfJob', output_name='bert-test', source='/opt/ml/processing/output/bert/test', destination=processed_test_data_s3_uri ), ] processing_step = ProcessingStep( 'Processing Job', # StepId.ProcessingJob.value? processor=self.processor, job_name=self.pipeline_name, inputs=processing_inputs, outputs=processing_outputs, # experiment_config=experiment_config, container_arguments=['--train-split-percentage', str(self.train_split_percentage), '--validation-split-percentage', str(self.validation_split_percentage), '--test-split-percentage', str(self.test_split_percentage), '--max-seq-length', str(self.max_seq_length), '--balance-dataset', str(self.balance_dataset)], container_entrypoint=['python3', '/opt/ml/processing/input/code/preprocess-scikit-text-to-bert.py'], ) s3_input_train_data = TrainingInput(s3_data=processed_train_data_s3_uri, distribution='ShardedByS3Key') s3_input_validation_data = TrainingInput(s3_data=processed_validation_data_s3_uri, distribution='ShardedByS3Key') s3_input_test_data = TrainingInput(s3_data=processed_test_data_s3_uri, distribution='ShardedByS3Key') training_step = TrainingStep( StepId.Train.value, estimator=self.estimator, job_name=self.pipeline_name + '/estimator-source', data={ 'train': s3_input_train_data, 'validation': s3_input_validation_data, 'test': s3_input_test_data }, ) model = self.estimator.create_model() model_step = ModelStep( StepId.CreateModel.value, instance_type=deploy_instance_type, model=model, model_name=self.pipeline_name ) endpoint_config_step = EndpointConfigStep( StepId.ConfigureEndpoint.value, endpoint_config_name=self.pipeline_name, model_name=self.pipeline_name, initial_instance_count=self.deploy_instance_count, instance_type=self.deploy_instance_type ) deploy_step = EndpointStep( StepId.Deploy.value, endpoint_name=self.pipeline_name, endpoint_config_name=self.pipeline_name, ) return Chain([ processing_step, training_step, model_step, endpoint_config_step, deploy_step ]) def execute(self, job_name=None, hyperparameters=None): """ Run the training pipeline. Args: job_name (str, optional): Name for the training job. If one is not provided, a job name will be auto-generated. (default: None) hyperparameters (dict, optional): Hyperparameters for the estimator training. (default: None) Returns: :py:class:`~stepfunctions.workflow.Execution`: Running instance of the training pipeline. """ inputs = self.input_template.copy() if hyperparameters is not None: inputs[StepId.Train.value]['HyperParameters'] = { k: str(v) for k, v in hyperparameters.items() } if job_name is None: job_name = '{base_name}-{timestamp}'.format(base_name='training-pipeline', timestamp=self._generate_timestamp()) print(inputs) # Configure training and model inputs[StepId.Train.value]['TrainingJobName'] = 'estimator-' + job_name inputs[StepId.Train.value]['OutputDataConfig']['S3OutputPath'] = 's3://{s3_bucket}/{pipeline_name}/models'.format( s3_bucket=self.s3_bucket, pipeline_name=self.workflow.name ) inputs[StepId.CreateModel.value]['ModelName'] = job_name # Configure endpoint inputs[StepId.ConfigureEndpoint.value]['EndpointConfigName'] = job_name for variant in inputs[StepId.ConfigureEndpoint.value]['ProductionVariants']: variant['ModelName'] = job_name inputs[StepId.Deploy.value]['EndpointConfigName'] = job_name inputs[StepId.Deploy.value]['EndpointName'] = job_name # Configure the path to model artifact inputs[StepId.CreateModel.value]['PrimaryContainer']['ModelDataUrl'] = '{s3_uri}/{job}/output/model.tar.gz'.format( s3_uri=inputs[StepId.Train.value]['OutputDataConfig']['S3OutputPath'], job=inputs[StepId.Train.value]['TrainingJobName'] ) return self.workflow.execute(inputs=inputs, name=job_name) # Note: If you see an error about 'TensorFlowModel' object has no attribute 'image', you are using SageMaker SDK 1.x # The Data Science SDK only supports 1.x at this time. # Waiting on this: https://github.com/aws/aws-step-functions-data-science-sdk-python/issues/69 pipeline = TrainingPipelineWithDifferentDeployInstanceTypeAndProcessingJob( processor=processor, raw_input_data_s3_uri=raw_input_data_s3_uri, train_split_percentage=train_split_percentage, validation_split_percentage=validation_split_percentage, test_split_percentage=test_split_percentage, max_seq_length=max_seq_length, balance_dataset=balance_dataset, estimator=estimator, role=stepfunction_role_arn, bucket=bucket, client=sfn, deploy_instance_count=deploy_instance_count, deploy_instance_type=deploy_instance_type, ) ###Output _____no_output_____ ###Markdown Visualize the pipeline You can now view the workflow definition, and also visualize it as a graph. This workflow and graph represent your training pipeline. View the workflow definition ###Code print(pipeline.workflow.definition.to_json(pretty=True)) ###Output _____no_output_____ ###Markdown Visualize the workflow graph *Note: This only renders in Jupyter. NOT in JupyterLab.* ###Code pipeline.render_graph() ###Output _____no_output_____ ###Markdown You should see a graph like this: Create and execute the pipeline on AWS Step FunctionsCreate the pipeline in AWS Step Functions with [create](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.htmlstepfunctions.workflow.Workflow.create). ###Code # Sleeping to wait for role and policy creations import time time.sleep(10) pipeline.create() ###Output _____no_output_____ ###Markdown Run the workflow with [execute](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.htmlstepfunctions.workflow.Workflow.execute). A link will be provided after the following cell is executed. Following this link, you can monitor your pipeline execution on Step Functions' console. ###Code execution = pipeline.execute(job_name=None, hyperparameters=None) stepfunction_arn = 'arn:aws:states:{}:{}:stateMachine:{}'.format(region, account_id, pipeline.pipeline_name) print(stepfunction_arn) %store stepfunction_arn stepfunction_name = pipeline.pipeline_name print(stepfunction_name) %store stepfunction_name ###Output _____no_output_____ ###Markdown Check Pipeline Progress_Note: This only renders in Jupyter at the moment - not in JupyterLab. This is changing soon._ ###Code execution.render_progress() ###Output _____no_output_____ ###Markdown You should see a graph like this: ###Code %%time import time events = execution.list_events() while len(events) <= 5: print('Number of events: {}'.format(len(events))) time.sleep(30) events = execution.list_events() print('Number of events: {}'.format(len(events))) execution.render_progress() ###Output _____no_output_____ ###Markdown _Wait for ^^ Number of Events ^^ to Reach At Least 6_ ###Code import json processing_job_name = json.loads(events[5]['taskSucceededEventDetails']['output'])['ProcessingJobName'] print('Processing Job Name: {}'.format(processing_job_name)) print('') processing_job_outputs = json.loads(events[5]['taskSucceededEventDetails']['output'])['ProcessingOutputConfig']['Outputs'] for output in processing_job_outputs: if output['OutputName'] == 'bert-train': train_data_s3_uri = output['S3Output']['S3Uri'] if output['OutputName'] == 'bert-validation': validation_data_s3_uri = output['S3Output']['S3Uri'] if output['OutputName'] == 'bert-test': test_data_s3_uri = output['S3Output']['S3Uri'] print('Processed Data Bert Train S3 URI: {}'.format(train_data_s3_uri)) print('Processed Data Bert Validation S3 URI: {}'.format(validation_data_s3_uri)) print('Processed Data Bert Test S3 URI: {}'.format(test_data_s3_uri)) from sagemaker.s3 import S3Downloader print(S3Downloader.list(train_data_s3_uri)) from sagemaker.s3 import S3Downloader print(S3Downloader.list(validation_data_s3_uri)) from sagemaker.s3 import S3Downloader print(S3Downloader.list(test_data_s3_uri)) %%time import time events = execution.list_events() while len(events) <= 11: print('Number of events: {}'.format(len(events))) time.sleep(30) events = execution.list_events() print('Number of events: {}'.format(len(events))) execution.render_progress() import json training_job_name = json.loads(events[11]['taskSucceededEventDetails']['output'])['TrainingJobName'] print('Training Job Name: {}'.format(training_job_name)) print('') trained_model_s3_uri = json.loads(events[11]['taskSucceededEventDetails']['output'])['ModelArtifacts']['S3ModelArtifacts'] print('Trained Model S3 URI: {}'.format(trained_model_s3_uri)) from IPython.core.display import display, HTML display(HTML('<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/jobs/{}">Training Job</a></b>'.format(region, training_job_name))) ###Output _____no_output_____ ###Markdown Copy the Model from S3 ###Code !aws s3 cp $trained_model_s3_uri ./model.tar.gz !mkdir -p ./model/ !tar -xvzf ./model.tar.gz -C ./model/ ###Output _____no_output_____ ###Markdown Show the Model Prediction Signature ###Code !saved_model_cli show --all --dir ./model/tensorflow/saved_model/0/ %%time import time events = execution.list_events() while len(events) <= 24: print('Number of events: {}'.format(len(events))) time.sleep(30) events = execution.list_events() print('Number of events: {}'.format(len(events))) execution.render_progress() ###Output _____no_output_____ ###Markdown _Wait for ^^ Number of Events ^^ to Reach At Least 19_ ###Code import json step_functions_pipeline_endpoint_name = json.loads(events[24]['taskScheduledEventDetails']['parameters'])['EndpointName'] print('Endpoint Name: {}'.format(step_functions_pipeline_endpoint_name)) %%time import time events = execution.list_events() while len(events) <= 27: print('Number of events: {}'.format(len(events))) time.sleep(30) events = execution.list_events() print('Number of events: {}'.format(len(events))) execution.render_progress() ###Output _____no_output_____ ###Markdown _Wait for ^^ Number of Events ^^ to Reach At Least 22_ ###Code step_functions_pipeline_endpoint_arn = json.loads(events[27]['stateExitedEventDetails']['output'])['EndpointArn'] print('Endpoint ARN: {}'.format(step_functions_pipeline_endpoint_arn)) from IPython.core.display import display, HTML display(HTML('<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/endpoints/{}">SageMaker REST Endpoint</a></b>'.format(region, step_functions_pipeline_endpoint_name))) ###Output _____no_output_____ ###Markdown Pass Variables to the Next Notebooks(s) ###Code print(step_functions_pipeline_endpoint_name) %store step_functions_pipeline_endpoint_name %store %%javascript Jupyter.notebook.save_checkpoint(); Jupyter.notebook.session.delete(); ###Output _____no_output_____
tutorials/W1D3_ModelFitting/hyo_W1D3_Tutorial7.ipynb
###Markdown Neuromatch Academy: Week 1, Day 3, Tutorial 7 Model Selection: AIC & cross validation Tutorial ObjectivesThis is Tutorial 7 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of linear models by generalizing to multiple linear regression (Tutorial 4). We then move on to polynomial regression (Tutorial 5). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 6) and two common methods for model selection, AIC and Cross Validation (Tutorial 7).In this tutorial, we will learn about model selection and two methods to accomplish model selection (AIC and cross-validation).Tutorial objectives:* Implement AIC and use it to compare polynomial regression models* Implement cross-validation and use it to compare polynomial regression model ###Code #@title Video Cross-Validation from IPython.display import YouTubeVideo video = YouTubeVideo(id="EZAiR2frE7Y", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ###Output _____no_output_____ ###Markdown Setup ###Code #@title Imports import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import KFold #@title Figure Settings %matplotlib inline fig_w, fig_h = (8, 6) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) %config InlineBackend.figure_format = 'retina' #@title Helper functions def ordinary_least_squares(x, y): """Ordinary least squares estimator for linear regression. Args: x (ndarray): design matrix of shape (n_samples, n_regressors) y (ndarray): vecto"r of measurements of shape (n_samples) Returns: ndarray: estimated parameter values of shape (n_regressors) """ return np.linalg.inv(x.T @ x) @ x.T @ y def make_design_matrix(x, order): """Create the design matrix of inputs for use in polynomial regression Args: x (ndarray): An array of shape (samples,) that contains the input values. max_order (scalar): The order of the polynomial we want to fit Returns: numpy array: The design matrix containing x raised to different powers """ # Broadcast to shape (n x 1) if shape (n, ) so this function generalizes to multiple inputs if x.ndim == 1: x = x[:,None] #if x has more than one feature, we don't want multiple columns of ones so we assign # x^0 here design_matrix = np.ones((x.shape[0],1)) # Loop through rest of degrees and stack columns for degree in range(1, order+1): design_matrix = np.hstack((design_matrix, x**degree)) return design_matrix def solve_poly_reg(x, y, max_order): """Fit a polynomial regression model for each order 0 through max_order. Args: x (ndarray): An array of shape (samples, ) that contains the input values y (ndarray): An array of shape (samples, ) that contains the output values max_order (scalar): The order of the polynomial we want to fit Returns: numpy array: (input_features, max_order+1) Each column contains the fitted weights for that order of polynomial regression """ # Create a dictionary with polynomial order as keys, and np array of theta # (weights) as the values theta_hat = {} # Loop over polynomial orders from 0 through max_order for order in range(max_order+1): X = make_design_matrix(x, order) this_theta = ordinary_least_squares(X, y) theta_hat[order] = this_theta return theta_hat ###Output _____no_output_____ ###Markdown Model SelectionWe now have multiple choices for which model to use for a given problem: we could use linear regression, order 2 polynomial regression, order 3 polynomial regression, etc. As we saw in Tutorial 6, different models will have different quality of predictions, both on the training data and on the test data. We need to be able to choose between models without looking at any test data. In fact, we should never look at or use the test data in any way when modeling. If we do, we would then be unable to report a true indication of how well the model generalizes to new data. We will cover two different methods for model selection in the next two sections. We will explore both methods using the same train/test data and polynomial regression models as in Tutorial 6 (generated below). ###Code # You've seen this code before! ### Generate training data np.random.seed(0) n_samples = 50 x_train = np.random.uniform(-2, 2.5, n_samples) # sample from a uniform distribution over [-2, 2.5) noise = np.random.randn(n_samples) # sample from a standard normal distribution y_train = x_train**2 - x_train - 2 + noise ### Generate testing data n_samples = 20 x_test = np.random.uniform(-3, 3, n_samples) # sample from a uniform distribution over [-2, 2.5) noise = np.random.randn(n_samples) # sample from a standard normal distribution y_test = x_test**2 - x_test - 2 + noise ### Fit polynomial regression models max_order = 5 theta_hat = solve_poly_reg(x_train, y_train, max_order) ###Output _____no_output_____ ###Markdown Akaike's Information Criterion (AIC)In order to choose the best model for a given problem, we can ask how likely the data is under a given model. We want to choose a model that assigns high probability to the data. A commonly used method for model selection that uses this approach is **Akaike’s Information Criterion (AIC)**.Essentially, AIC estimates how much information would be lost if the model predictions were used instead of the true data (the relative information value of the model). We compute the AIC for each model and choose the model with the lowest AIC. Note that AIC only tells us relative qualities, not absolute - we do not know from AIC how good our model is independent of others.AIC strives for a good tradeoff between overfitting and underfitting by taking into account the complexity of the model and the information lost. AIC is calculated as:$$ AIC = 2K - 2 log(L)$$where K is the number of parameters in your model and L is the likelihood that the model could have produced the output data. AIC for Polynomial Regression Now we know what AIC is, we want to use it to pick between our polynomial regression models. We haven't been thinking in terms of likelihoods though - so how will we calculate L? As we saw in Tutorial 2, there is a link between mean squared error and the likelihood estimates for linear regression models that we can take advantage of. *Derivation time!*We start with our formula for AIC from above:$$ AIC = 2k - 2 log L $$For a model with normal errors, we can use the log likelihood of the normal distribution:$$ \log L = -\frac{n}{2} \log(2 \pi) -\frac{n}{2}log(\sigma^2) - \sum_i^n \frac{1}{2 \sigma^2} (y_i - \tilde y_i)^2$$We can drop the first and last terms as both are constants and we're only assessing relative information with AIC. Once we drop those terms and incorporate into the AIC formula we get:$$AIC = 2k + nlog(\sigma^2)$$We can replace $\sigma^2$ with the computation for variance (the sum of squared errors divided by number of samples). Thus, we end up with the following formula for AIC for linear and polynomial regression:$$ AIC = 2K + n log(\frac{SSE}{n})$$where k is the number of parameters, n is the number of samples, and SSE is the summed squared error. Exercise: Compute and compare AIC ###Code AIC = np.zeros((max_order+1)) for order in range(0, max_order+1): # Compute predictions for this model X_design = make_design_matrix(x_train, order) y_hat = np.dot(X_design, theta_hat[order]) ##################################################################################################### ## TODO for students: Compute AIC for this order polynomial regression model # 1) Compute sum of squared errors given prediction y_hat and y_train (SSE in formula above) # 2) Identify number of parameters in this model (K in formula above) # 3) Compute AIC (call this_AIC) according to formula above ##################################################################################################### # Compute SSE residuals = y_hat - y_train sse = np.sum(residuals ** 2) # Get K K = len(theta_hat[order]) # Compute AIC AIC[order] = 2*K + len(residuals)*np.log(sse / len(residuals)) with plt.xkcd(): plt.bar(range(max_order+1), AIC); plt.ylabel('AIC') plt.xlabel('polynomial order') plt.title('comparing polynomial fits') plt.show() print(sse / len(residuals), np.mean(residuals ** 2)) # to_remove solution AIC = np.zeros((max_order+1)) for order in range(0, max_order+1): # Compute predictions for this model X_design = make_design_matrix(x_train, order) y_hat = np.dot(X_design, theta_hat[order]) # Compute SSE residuals = y_train - y_hat sse = np.sum(residuals ** 2) # Get K K = len(theta_hat[order]) # Compute AIC AIC[order] = 2*K + n_samples * np.log(sse/n_samples) with plt.xkcd(): plt.bar(range(max_order+1), AIC); plt.ylabel('AIC') plt.xlabel('polynomial order') plt.title('comparing polynomial fits') plt.show() ###Output _____no_output_____ ###Markdown Which model would we choose based on AIC? Cross ValidationAIC is just one method for model selection - another commonly used method is **cross validation**. AIC gives a measure of how likely the training data is given the model. Cross-validation takes a different approach to the model selection problem and asks how well the model predicts new data that it hasn't seen yet. Instead of looking at test data, we want to use held-out data set that will not be used for the final evaluation, **validation data**. We often have a limited amount of data though (especially in neuroscience), so we do not want to further reduce our potential training data by reassigning some as validation. Luckily, we can use **k-fold cross-validation**! In k-fold cross validation, we divide up the data into k subsets, train a model on k-1 subsets, and compute error on the held-out subset (our validation data). In total, we train k instances of each model. Each of these k instances has a different subset excluded from fitting and labeled as validation. We then average the error of each of the k trained models on its validation subset - this is the validation error of this model type. To make this explicit, let's say we have 1000 samples of training data and choose 4-fold cross-validation. Samples 0 - 250 would be subset 1, samples 250 - 500 subset 2, samples 500 - 750 subset 3, and samples 750-1000 subset 4. First, we train an order 3 polynomial regression on subsets 1,2,3 and evaluate on subset 4. Next, we train an order 3 polynomial model on subsets 1,2,4 and evalute on subset 3. We continue until we have 4 instances of a trained order 3 polynomial regression model, each with a different subset as validation data, and average the validation error from each instance.We can now compare the validation error of different models to pick a model that generalizes well to held-out data. We can choose the measure of prediction quality to report error on the validation subsets to suit our purposes. We will use MSE here but we could also use log likelihood of the data and so on. As a final step, we retrain this model on all of the training data (without subset divisions) to get our final model that we will evaluate on test data. This approach allows us to evaluate the quality of predictions on new data without sacrificing any of our precious training data. These steps are summarized in this diagram from SkLearn (https://scikit-learn.org/stable/modules/cross_validation.html) ![Diagram from Sklearn](https://scikit-learn.org/stable/_images/grid_search_workflow.png) Importantly, we need to be very careful when dividing the data into subsets. The validation subset should not be used in any way to fit the model. We should not do any preprocessing (e.g. normalization) before we divide into subsets or the validation subset could influence the training subsets. A lot of false-positives in crossvalidation come from wrongly dividing. An important consideration in the choice of model selection method are the relevant biases. If we just fit using MSE on training data, we will generally find that fits get better as we add more parameters because the model will overfit the data, as we saw in Tutorial 6. When using cross-validation, the bias is the other way round. Models with more parameters are more affected by noise so cross-validation will generally prefer models with fewer parameters. Exercise: implement cross-validationGiven our set of models to evaluate (polynomial regression models with orders 0 through 5), we will use cross-validation to determine which model has the best predictions on new data according to MSE. In this code, we split the data into 10 subsets using `Kfold` (from `sklearn.model_selection`). `KFold` handles cross-validation subset splitting and train/val assignments. In particular, the `Kfold.split` method returns an iterator which we can loop through. On each loop, this iterator assigns a different subset as validation and returns new training and validation indices with which to split the data. We will loop through the 10 train/validation splits and fit several different polynomial regression models (with different orders) for each split. You will need to use the `solve_poly_reg` method from Tutorial 5 (already implemented in this notebook). ###Code def cross_validate(x_train,y_train,max_order,n_splits): # Initialize the split method kfold_iterator = KFold(n_splits) # Initialize np array mse values for all models for each split mse_all = np.zeros((n_splits, max_order+1)) for i_split, (train_indices, val_indices) in enumerate(kfold_iterator.split(x_train)): # Split up the overall training data into cross-validation training and validation sets x_cv_train = x_train[train_indices] y_cv_train = y_train[train_indices] x_cv_val = x_train[val_indices] y_cv_val = y_train[val_indices] ############################################################################# ## TODO for students: Fill in missing ... in code below to choose which data ## to fit to and compute MSE for ############################################################################# # Fit models theta_hat = solve_poly_reg(x_cv_train, y_cv_train, max_order) # Compute MSE mse_this_split = np.zeros((max_order+1)) for order in range(0, max_order+1): X_design = make_design_matrix(x_cv_val, order) y_hat = X_design @ theta_hat[order] mse_this_split[order] = np.mean((y_hat - y_cv_val)**2) mse_all[i_split] = mse_this_split #comment this line once you've filled in the function #raise NotImplementedError("Student excercise: implement cross-validation") return mse_all # to_remove solution def cross_validate(x_train,y_train,max_order,n_splits): # Initialize the split method kfold_iterator = KFold(n_splits) # Initialize np array mse values for all models for each split mse_all = np.zeros((n_splits, max_order+1)) for i_split, (train_indices, val_indices) in enumerate(kfold_iterator.split(x_train)): # Split up the overall training data into cross-validation training and validation sets x_cv_train = x_train[train_indices] y_cv_train = y_train[train_indices] x_cv_val = x_train[val_indices] y_cv_val = y_train[val_indices] # Fit models theta_hat = solve_poly_reg(x_cv_train, y_cv_train, max_order) # Compute MSE mse_this_split = np.zeros((max_order+1)) for order in range(0, max_order+1): X_design= make_design_matrix(x_cv_val, order) y_hat = np.dot(X_design, theta_hat[order]) mse_this_split[order] = np.mean((y_cv_val - y_hat) ** 2) mse_all[i_split] = mse_this_split #comment this line once you've filled in the function #raise NotImplementedError("Student excercise: implement cross-validation") return mse_all ###Output _____no_output_____ ###Markdown Use the following code to visualize the cross-validated MSE. Which polynomial order do you think is a better model of the data? ###Code max_order = 5 n_splits = 10 mse_all = cross_validate(x_train,y_train,max_order,n_splits) plt.figure() plt.boxplot(mse_all, labels=np.arange(0,max_order+1)) plt.xlabel('Polynomial Order') plt.ylabel('Validation MSE') plt.title(f'Validation MSE over {n_splits} splits of the data'); ###Output _____no_output_____
notebooks/pic/.ipynb_checkpoints/rollover_plan-Copy1-checkpoint.ipynb
###Markdown Rollover Planner ###Code %matplotlib inline %load_ext autoreload %autoreload 2 import os import inspect currentdir = os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) os.sys.path.insert(1, parentdir+'/src') savepath = parentdir + '/data/' from visualizer import VisualModel from simulator import HumanoidSimulator from simple_planner import SimpleKneePlanner import pinocchio print(pinocchio.__version__) import numpy as np pinocchio.switchToNumpyMatrix() ###Output 2.5.6 ###Markdown 1. Load model $x_0$, $n_q$, $n_v$, $n_a$ ###Code # Define simulation steps horizon_length = 5000 time_step = 1e-3 # Define control trajectory steps ctrl_time_step = 8e-3 # Load pinocchio model m = VisualModel(display=False) x0, nq, nv, na = m.x0, m.nq, m.nv, m.na lower, upper = m.getLimit() torqueLower, torqueUpper = m.getTorqueLimit() # print(m.q0) # [-0.02388] [-0. ] [-0.15591] # [ 0.00252] [-0. ] [ 0.35367] # print(0.00252-(-0.02388)) # print(0.35367-(-0.15591)) # model_state = m.model.defaultState # from visualizer import ReferenceState # state = ReferenceState(model_state) # state.value['l_hip_y'] += np.pi/2 # state.weight['l_hip_y'] = 100. # state.update() # state1 = ReferenceState(model_state) # state1.value = state.value # state1.update() ###Output _____no_output_____ ###Markdown 2. Simple knee trajectory to initiate robot's movement ###Code simpleKneePlanner = SimpleKneePlanner(x0, nq, nv, na, horizon_length) kneeTraj = simpleKneePlanner.forward() ###Output _____no_output_____ ###Markdown 3. Simulate static trajectory to obtain handLength and initial pose unplanned trajecotry ###Code s = HumanoidSimulator(horizon_length, display=False, timeStep=time_step, view='left') s.initPose(x0, nq, nv, na) _, _, _, _,_, _, x1, handLength, timeLength, initCoM, initCoMv,initKneeAngle, peArr, keArr = s.simulate(m, kneeTraj, kneeTraj) ctrl_horizon_length = timeLength//int(ctrl_time_step/time_step) # print('ctrl_length:%d'%ctrl_horizon_length) # print(ctrl_time_step, time_step) # print(initCoM) # print(initCoMv) # print(initKneeAngle) # print(handLength) #ctrl_horizon_length-=9 from simulator import HumanoidSimulator from simple_planner import SimpleHipPlanner simpleHipPlanner = SimpleHipPlanner(x1, nq, nv, na, 100) tauTraj = simpleHipPlanner.forward() ss3 = HumanoidSimulator(horizon_length, display=False,timeStep=time_step, view='side') ss3.initPose(x0, nq, nv, na) forceArr, comArr, posArr, torqueArr, qArr, forcePose, tauArr, _, _, _, _ = ss3.simulateOptTraj(m, kneeTraj, tauTraj,ctrlTimeStep=ctrl_time_step) rollover_hardware_simple_traj = ss3.toHardwareTraj(posArr) np.savetxt(savepath+"rollover_value_simple.csv", rollover_hardware_simple_traj, delimiter=",") # ss3.downloadTraj(savepath+"rollover_value_simple.csv") ###Output (20, 5000) (20, 1667) ###Markdown 4. Optimize rollover trajectory show handLength: posture difference between initial posture and final posture ###Code # ctrl_horizon_length -= 10 print(handLength) # handLength[1] = 0. print(handLength) from simple_planner import SimpleKneePlanner from rollover_planner import RolloverPlanner rolloverPlanner = RolloverPlanner(x1, nq, nv, na, ctrl_horizon_length, contact_index=0, timeStep=ctrl_time_step, display=False) tauRolloverTraj = rolloverPlanner.forward(m, handLength) tauRolloverTraj_index = rolloverPlanner.contact_index # rolloverPlanner.saveTraj(np.matrix(tauRolloverTraj).T, savepath+'value.csv') # rolloverPlanner.saveSwConfig(savepath+'equations.txt') print(ctrl_horizon_length) print(tauRolloverTraj.shape) ###Output /home/jack/repos/rollover/src/humanoid.py:445: UserWarning: Deprecated. Use id xref = crocoddyl.FrameTranslation(i.frame, i.oMf.translation) /home/jack/repos/rollover/src/humanoid.py:445: UserWarning: Deprecated. Use placement. xref = crocoddyl.FrameTranslation(i.frame, i.oMf.translation) /home/jack/repos/rollover/src/humanoid.py:447: UserWarning: Deprecated. Use id costModel.addCost(self.rmodel.frames[i.frame].name + "_footTrack", footTrack, 1e6) /home/jack/repos/rollover/src/humanoid.py:585: UserWarning: Deprecated. Use id xref = crocoddyl.FrameTranslation(i.frame, i.oMf.translation) /home/jack/repos/rollover/src/humanoid.py:585: UserWarning: Deprecated. Use placement. xref = crocoddyl.FrameTranslation(i.frame, i.oMf.translation) /home/jack/repos/rollover/src/humanoid.py:587: UserWarning: Deprecated. Use id costModel.addCost(self.rmodel.frames[i.frame].name + "_footTrack", footTrack, 1e6) /home/jack/repos/rollover/src/humanoid.py:765: UserWarning: Deprecated. Use id xref = crocoddyl.FrameTranslation(i.frame, i.oMf.translation) /home/jack/repos/rollover/src/humanoid.py:765: UserWarning: Deprecated. Use placement. xref = crocoddyl.FrameTranslation(i.frame, i.oMf.translation) /home/jack/repos/rollover/src/humanoid.py:767: UserWarning: Deprecated. Use id costModel.addCost(self.rmodel.frames[i.frame].name + "_footTrack", footTrack, 1e6) /home/jack/repos/rollover/src/humanoid.py:451: UserWarning: Deprecated. Use id costModel.addCost(self.rmodel.frames[torsoRotationFrame.frame].name+'_torsoTrack',torsoTrack,1e5) /home/jack/repos/rollover/src/humanoid.py:463: UserWarning: Deprecated. Use id xref = crocoddyl.FrameTranslation(fp.frame, zero(3)) /home/jack/repos/rollover/src/humanoid.py:465: UserWarning: Deprecated. Use id costModel.addCost(self.rmodel.frames[fp.frame].name + "_collision", collisionAvoid, 1e6) ###Markdown Animate solution in Pinocchio ###Code # rolloverPlanner.displayTraj(m) ###Output _____no_output_____ ###Markdown 5. Simulate optimal trajectory ###Code from simulator import HumanoidSimulator from simple_planner import SimpleRolloverTrajWrapper finish_ctrl_horizon_length = 150 simpleRolloverTrajWrapper = SimpleRolloverTrajWrapper(x0, nq, nv, na, finish_ctrl_horizon_length) newtauRolloverTraj = simpleRolloverTrajWrapper.forward(tauRolloverTraj) # Simulate optimal rollover trajectory ss = HumanoidSimulator(horizon_length, display=True,timeStep=time_step, view='side') ss.initPose(x0, nq, nv, na) forceArr, comArr, posArr, torqueArr, qArr, forcePose, tauArr, peArr, keArr, contactTime, forceKnot = ss.simulateOptTraj(m, kneeTraj, newtauRolloverTraj, ctrlTimeStep=ctrl_time_step, speed=2.) # Plot potential energy and kinematic energy from utils import plotter meArr = peArr + keArr eArr = np.vstack([peArr, keArr]) eArr = np.vstack([eArr, meArr]) eArr = eArr[:,2000:2500] contactTime = 2400 drawer1 = plotter(eArr, v_line=[contactTime-2000], timeStep = 1e-3, label=['potential energy', 'kinetic energy', 'mechanical energy'], xlabel='t (s)', ylabel='energy (J)', fileName=savepath+'energy.png') drawer1.multiTimePlot() ###Output plot plot plot ###Markdown Compare control and simulation posture delay ###Code # Save trajectory to hardware print(np.shape(posArr)) rollover_hardware_traj = ss.toHardwareTraj(posArr) # bimanual_hardware_traj = posArr print(np.shape(rollover_hardware_traj)) np.savetxt(savepath+"rollover_value.csv", rollover_hardware_traj, delimiter=",") np.savetxt(savepath+"rollover_value_verify.csv", rollover_hardware_traj.T, delimiter=",") # ss.downloadTraj(savepath+"rollover_value.csv") # Calculate summit torque np.savetxt(savepath+"rollover_torque.csv", torqueArr, delimiter=",") len = np.shape(torqueArr)[1] currentArr = np.zeros([20, len]) def t2c28(x): # return math.sqrt(x) return 0.9286*x-0.05 def t2c64(x): return 0.875*x+0.1375 def t2c106(x): return 0.833*x-0.75 t2c28_v = np.vectorize(t2c28) t2c64_v = np.vectorize(t2c64) t2c106_v = np.vectorize(t2c106) # l_hip_y currentArr[0,:] = 4.1* np.ones([len]) currentArr[10,:] = 4.1* np.ones([len]) for i in range(1, 6): currentArr[i,:] = 5.2* np.ones([len]) if i+10 == 11: currentArr[i+10,:] = 5.2* np.ones([len]) else: currentArr[i+10,:] = np.absolute(t2c64_v(torqueArr[i+10,:])) for i in range(6, 8): currentArr[i,:] = np.absolute(t2c64_v(torqueArr[i,:])) currentArr[i+10,:] = np.absolute(t2c64_v(torqueArr[i+10,:])) for i in range(8, 10): currentArr[i,:] = np.absolute(t2c28_v(torqueArr[i,:])) currentArr[i+10,:] = np.absolute(t2c28_v(torqueArr[i+10,:])) currentSum = np.sum(currentArr, axis=0) currentSumArr = np.zeros([1,len]) currentSumArr[0,:] = currentSum print(np.max(currentSum),np.min(currentSum)) from utils import plotter drawer1 = plotter(currentSumArr, timeStep = 1e-3, label=['current'], xlabel='t (s)', ylabel='current (A)', fileName=savepath+'current.png') drawer1.timePlot() ###Output _____no_output_____ ###Markdown visualize contact pose ###Code m.visualizeConfig(forcePose) ###Output _____no_output_____ ###Markdown Plot simulated resultdo following command before screen capture`sudo update-alternatives --config python3` ###Code posInfo = [] posInfo.append(posArr) posInfo.append(lower) posInfo.append(upper) torqueInfo = [] torqueInfo.append(torqueArr) torqueInfo.append(torqueLower) torqueInfo.append(torqueUpper) cmpInfo = [] cmpInfo.append(posArr) cmpInfo.append(tauArr) cmpInfo.append(lower) cmpInfo.append(upper) ss.plot(forceArr,comArr,posInfo, torqueInfo,savepath, time_step, cmpInfo=cmpInfo) from simulator import HumanoidSimulator from simple_planner import SimpleRolloverTrajWrapper ###Output _____no_output_____ ###Markdown rollover trajectory replay ###Code # replay encoder trajectory ENCODER_DIR=savepath + '09_25_1/desired_trajectory.csv' sss = HumanoidSimulator(horizon_length, display=True,timeStep=time_step, view='side') sss.initPose(x0, nq, nv, na) forceArr, comArr, posArr, torqueArr, qArr, forcePose, tauArr, peArr, keArr, contactTime, motorEArr, velArr= sss.replayEncoder(m,ENCODER_DIR, DIR=savepath) meArr = peArr + keArr np.savetxt(savepath+"09_25_1/force_array_rollover.csv", forceArr.T, delimiter=",") np.savetxt(savepath+"09_25_1/com_array_rollover.csv", comArr.T, delimiter=",") np.savetxt(savepath+"09_25_1/motor_energy_array_rollover.csv", motorEArr.T, delimiter=",") np.savetxt(savepath+"09_25_1/mechanical_energy_array_rollover.csv", meArr.T, delimiter=",") ###Output _____no_output_____ ###Markdown bimanual trajectory replay ###Code # replay encoder trajectory ENCODER_DIR=savepath + '09_25_2/desired_trajectory.csv' sss = HumanoidSimulator(horizon_length, display=False,timeStep=time_step, view='side') sss.initPose(x0, nq, nv, na) forceArr, comArr, posArr, torqueArr, qArr, forcePose, tauArr, peArr, keArr, contactTime, motorEArr, velArr = sss.replayEncoder(m,ENCODER_DIR, DIR=savepath) meArr = peArr + keArr np.savetxt(savepath+"09_25_2/force_array_bimanual.csv", forceArr.T, delimiter=",") np.savetxt(savepath+"09_25_2/com_array_bimanual.csv", comArr.T, delimiter=",") np.savetxt(savepath+"09_25_2/motor_energy_array_bimanual.csv", motorEArr.T, delimiter=",") np.savetxt(savepath+"09_25_2/mechanical_energy_array_bimanual.csv", meArr.T, delimiter=",") # Plot potential energy and kinematic energy from utils import plotter eArr = np.vstack([peArr, keArr]) eArr = np.vstack([eArr, meArr]) eArr = np.vstack([eArr, motorEArr]) # print(peArr[0,forceKnot], keArr[0,forceKnot]) # print(peArr[0,contactTime-1], keArr[0,contactTime-1]) # eArr = eArr[:,2000:2500] drawer1 = plotter(eArr, v_line=[contactTime], timeStep = 1e-3, label=['potential energy', 'kinetic energy', 'mechanical energy', 'motor injection energy'], xlabel='t (s)', ylabel='energy (J)', fileName=savepath+'energy.png') drawer1.multiTimePlot() posInfo = [] posInfo.append(velArr) posInfo.append(lower) posInfo.append(upper) torqueInfo = [] torqueInfo.append(torqueArr) torqueInfo.append(torqueLower) torqueInfo.append(torqueUpper) cmpInfo = [] cmpInfo.append(posArr) cmpInfo.append(tauArr) cmpInfo.append(lower) cmpInfo.append(upper) sss.plot(forceArr,comArr,posInfo, torqueInfo,savepath, time_step, cmpInfo=cmpInfo) ###Output _____no_output_____ ###Markdown Generate Solidwork config files ###Code # q = qArr[:,1800] # m.visualizeConfig(q) # np.savetxt(savepath+"config_rollover.csv", q, delimiter=",") # from simple_planner import SimpleKneePlanner # from rollover_planner import RolloverPlanner # print(rolloverPlanner.x[:,-1]) # print(x0.T.tolist()[0]) # rolloverPlanner.saveConfig(x0.T.tolist()[0],savepath+'kneeling_config.xlsx') print(x0.shape) # m.visualizeConfig(x0[:nq]) # rolloverPlanner.readConfig(savepath+'config.xlsx') # from pinocchio.rpy import matrixToRpy, rpyToMatrix, rotate # m = rotate('z', np.pi /3).dot(rotate('y', np.pi / 5)).dot(rotate('x', np.pi / 7)) # print(np.pi/3,np.pi/5,np.pi/7) # print(m) # print(matrixToRpy(m)) # rpy = np.array(list(range(3))) * np.pi / 2 # rpyToMatrix(rpy) # matrixToRpy(rpyToMatrix(rpy)) # matrixToRpy(m) # rpyToMatrix(matrixToRpy(m)) # from visualizer import VisualModel # pinocchio.switchToNumpyMatrix() # v = VisualModel(display=True) # x = v.q0.T.tolist()[0] # print(rolloverPlanner.x[:,-1]) # rolloverPlanner.saveConfig(x,savepath+'kneeling_config.xlsx') # v.q0 = np.matrix(rolloverPlanner.x[:,-1]).T # v.q0[0] = 0. # v.q0[1] = 0 # v.q0[2] = 0 # v.show() # x = v.q0.T.tolist()[0] # rolloverPlanner.saveConfig(x,savepath+'kneeling_config.xlsx') # quat = np.asarray(v.q0[3:7].copy()).tolist() # print(quat) # # Convert quaternion to rpy # vector = np.matrix([0, 0, 0, quat[0][0], quat[1][0], quat[2][0], quat[3][0]]).T # print(vector) # se3 = pinocchio.XYZQUATToSE3(vector) # rpy = matrixToRpy(se3.rotation) # print(rpy) # m = rotate('z', np.pi /2).dot(rotate('y', np.pi / 100)).dot(rotate('x', np.pi /3)) # print(np.pi/3,np.pi/5,np.pi/7) # print(m) # print(matrixToRpy(m)) # n = pinocchio.SE3.Identity() # n.rotation = m # o = pinocchio.SE3ToXYZQUAT(n) # print(o) # # v.q0[3:7]=o[3:7] # # v.q0[0] -= 0.02639 # # v.q0[1] -= 0. # # v.q0[2] -= 0.50958 # v.q0[0] = 0.0 # v.q0[1] = 0. # v.q0[2] = 0.0 # v.show() # v.show() # quat = np.asarray(v.q0[3:7].copy()).tolist() # print(quat) # # Convert quaternion to rpy # vector = np.matrix([0, 0, 0, quat[0][0], quat[1][0], quat[2][0], quat[3][0]]).T # print(vector) # se3 = pinocchio.XYZQUATToSE3(vector) # rpy = matrixToRpy(se3.rotation) # print(rpy) # import crocoddyl # print(crocoddyl.__file__) ###Output _____no_output_____
code/reproduce_Fig6+S15.ipynb
###Markdown Table of Contents1&nbsp;&nbsp;Import libraries2&nbsp;&nbsp;Load data3&nbsp;&nbsp;Process data3.1&nbsp;&nbsp;Spatial average4&nbsp;&nbsp;Visualize Import libraries ###Code import xarray as xr import xarray.ufuncs as xrf from averaging import xarray_average import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import xclim as xc %reload_ext watermark %watermark --iversions -v -m ###Output xclim 0.15.2 numpy 1.17.3 xarray 0.15.1 seaborn 0.9.0 pandas 1.0.5 CPython 3.6.9 IPython 7.1.1 compiler : GCC 8.4.0 system : Linux release : 4.15.0-112-generic machine : x86_64 processor : x86_64 CPU cores : 8 interpreter: 64bit ###Markdown Load data ###Code inpath = '/mnt/4data/CMAM/0A.daily/' ds = xr.open_dataset(f'{inpath}accelogw/accelogw_daily_CMAM_CMAM30-SD_r1i1p1_19790101-20101231_70hPa.nc') ds ###Output _____no_output_____ ###Markdown Process data ###Code time_mask = (ds.time.dt.month == 12) | (ds.time.dt.month == 1) | (ds.time.dt.month == 2) ds_sel = ds['accelogw'].sel(time = time_mask) sel_dict_ea = dict(lon =slice(110,145), lat=slice(30,48)) sel_dict_hi = dict(lon =slice(70,102.5), lat=slice(20,40)) sel_dict_wa = dict(lon =slice(-125+360,-102.5+360), lat=slice(27.5,52)) sel_dict_ls = {'Himalayas': sel_dict_hi, \ 'West America': sel_dict_wa, \ 'East Asia': sel_dict_ea} weights = xrf.cos(xrf.deg2rad(ds_sel.lat)) ts_ls = [] g_ls = [] for hs_name in sel_dict_ls.keys(): print(hs_name) sel_dict = sel_dict_ls[hs_name] ds_sel_hs = ds_sel.sel(**sel_dict) #ds_sel_hs = xarray_average(ds_sel_hs, dim = ['lat', 'lon'], weights = weights)#.load() #ds_sel_hs.name = 'accelogw' ts = ds_sel_hs.dropna('time').to_series().reset_index() #sys.exit() g_ls += [hs_name]*ts.shape[0] ts_ls.append(ts) df = pd.concat(ts_ls) df['g'] = g_ls df['accelogw'] = df['accelogw']*24*3600 df.head() ###Output Himalayas West America East Asia ###Markdown Spatial average ###Code ts_ls = [] g_ls = [] for hs_name in sel_dict_ls.keys(): sel_dict = sel_dict_ls[hs_name] ds_sel_hs = ds_sel.sel(**sel_dict) ds_sel_hs = xarray_average(ds_sel_hs, dim = ['lat', 'lon'], weights = weights)#.load() ds_sel_hs.name = 'accelogw' ts = ds_sel_hs.dropna('time').to_series().reset_index() g_ls += [hs_name]*ts.shape[0] ts_ls.append(ts) df2 = pd.concat(ts_ls) df2['g'] = g_ls df2['accelogw'] = df2['accelogw']*24*3600 df2.head() ###Output _____no_output_____ ###Markdown Visualize ###Code plt.rcParams.update({'font.size': 18}) # Initialize the FacetGrid object bins = np.arange(-90,5) bins_fit = np.arange(-90,1) #bins = np.arange(-90,10,5) xticks = np.arange(-90,10,10) min_xticks = np.arange(-85,5,10) nh = len(sel_dict_ls) pal = sns.cubehelix_palette(nh, light=.7) # rot=-.25, colors = ["amber", "green", "purple"] # , "greyish", pal = sns.xkcd_palette(colors) sns.set_style("ticks", {"xtick.major.size": 10, "ytick.major.size": 10}) g = sns.FacetGrid(df, row="g", hue="g", aspect=5, palette=pal) g.map(sns.distplot, "accelogw", norm_hist = True, kde=False, rug=False, bins = bins) g.axes[0][0].axvline(x=-6.66, lw=3, clip_on=False, color = 'k')#pal[0]) g.axes[1][0].axvline(x=-5.07, lw=3, clip_on=False, color = 'k')#pal[1]) g.axes[2][0].axvline(x=-7.13, lw=3, clip_on=False, color = 'k')#pal[2]) quantiles = df.groupby('g').quantile([0.01,0.1])['accelogw'] dist_name = 'exponweib' #dist_name = 'weibull_min' # dist_name = 'lognorm' # # 'gamma' dist = xc.indices.generic.get_dist(dist_name) m_const = -1 for i, hs_name in enumerate(sel_dict_ls.keys()): ax = g.axes[i][0] arr = df2["accelogw"][df2['g'] == hs_name] arr.index.name = 'time' arr = arr.to_xarray() arr = arr.where(arr < 1).dropna('time') sns.distplot(arr, norm_hist = True, kde=False, rug=False, \ bins = bins, ax = ax) ind = xc.indices.generic.fit(arr*m_const, dist = dist_name).compute() mean, var, skew, kurt = dist.stats(*ind.values, moments='mvsk') textstr = '\n'.join(( r'$\mu=%.2f$' % (np.log(ind.loc['loc']), ), r'$\sigma=%.2f$' % (ind.loc['s'], ))) textstr = '\n'.join(( 'Mean=%.2f' % (mean, ), 'Variance=%.2f' % (var, ), 'Skewness=%.2f' % (skew, ), 'Kurtosis=%.2f' % (kurt, ))) ax.text(0.45, 0.8, textstr, fontsize=14, verticalalignment='top', \ color = 'C0', transform=ax.transAxes) ax.plot(bins_fit,dist.pdf(bins_fit*m_const, *ind.values), color = 'C0') arr = df["accelogw"][df['g'] == hs_name] arr.index.name = 'time' arr = arr.to_xarray() arr = arr.where(arr < 1).dropna('time') ind = xc.indices.generic.fit(arr*-1, dist = dist_name).compute() dist = xc.indices.generic.get_dist(dist_name) ax.plot(bins_fit, dist.pdf(bins_fit*-1, *ind.values), color = pal[i]) mean, var, skew, kurt = dist.stats(*ind.values, moments='mvsk') textstr = '\n'.join(( r'$\mu=%.2f$' % (np.log(ind.loc['loc']), ), r'$\sigma=%.2f$' % (ind.loc['s'], ))) textstr = '\n'.join(( 'Mean=%.2f' % (mean, ), 'Variance=%.2f' % (var, ), 'Skewness=%.2f' % (skew, ), 'Kurtosis=%.2f' % (kurt, ))) ax.text(0.275, 0.8, textstr, fontsize=14, verticalalignment='top', \ color = pal[i], transform=ax.transAxes) ax.axvline(x=quantiles[hs_name][0.01], lw=3, clip_on=False, \ color = 'k', linestyle = 'dashed') ax.axvline(x=quantiles[hs_name][0.1], lw=3, clip_on=False, \ color = 'k', linestyle = 'dashed') ax.tick_params(axis="y",direction="out", pad=10, labelsize = 15) ax.tick_params(axis="x",direction="out", pad=10, labelsize = 15) ax.xaxis.set_minor_locator(plt.MultipleLocator(5)) ax.set_ylim(0,0.9) ax.set_yscale('log') def label(x, color, label): ax = plt.gca() ax.text(0.1, .35, label, fontweight="bold", color=color, ha="left", va="center", transform=ax.transAxes) g.map(label, "accelogw") g.set(xticks=xticks) # Set the subplots to overlap #g.fig.subplots_adjust(hspace=-.25) # Remove axes details that don't play well with overlap g.set_titles("") #g.set(yscale="log") g.axes[nh-1][0].set_xlabel('OGWD [m/s/day]', fontsize = 20) g.axes[nh-2][0].set_ylabel('Occurrence density', fontsize = 20) #plt.savefig(f'OGWD_distribution_hotspots_DJFonly_wfits_{dist_name}.pdf', bbox_inches='tight') ###Output _____no_output_____
projects/mslearn-aml-labs/02-Training_Models.ipynb
###Markdown Training ModelsThe central goal of machine learning is to train predictive models that can be used by applications. In Azure Machine Learning, you can use scripts to train models leveraging common machine learning frameworks like Scikit-Learn, Tensorflow, PyTorch, SparkML, and others. You can run these training scripts as experiments in order to track metrics and outputs - in particular, the trained models. Before You StartBefore you start this lab, ensure that you have completed the *Create an Azure Machine Learning Workspace* and *Create a Compute Instance* tasks in [Lab 1: Getting Started with Azure Machine Learning](./labdocs/Lab01.md). Then open this notebook in Jupyter on your Compute Instance. Connect to Your WorkspaceThe first thing you need to do is to connect to your workspace using the Azure ML SDK.> **Note**: If you do not have a current authenticated session with your Azure subscription, you'll be prompted to authenticate. Follow the instructions to authenticate using the code provided. ###Code import azureml.core from azureml.core import Workspace # Load the workspace from the saved config file ws = Workspace.from_config() print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name)) ###Output Ready to use Azure ML 1.6.0 to work with wfml ###Markdown Create a Training ScriptYou're going to use a Python script to train a machine learning model based on the diabates data, so let's start by creating a folder for the script and data files. ###Code import os, shutil # Create a folder for the experiment files training_folder = 'diabetes-training' os.makedirs(training_folder, exist_ok=True) # Copy the data file into the experiment folder shutil.copy('data/diabetes.csv', os.path.join(training_folder, "diabetes.csv")) ###Output _____no_output_____ ###Markdown Now you're ready to create the training script and save it in the folder. ###Code %%writefile $training_folder/diabetes_training.py # Import libraries from azureml.core import Run import pandas as pd import numpy as np import joblib from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve # Get the experiment run context run = Run.get_context() # load the diabetes dataset print("Loading Data...") diabetes = pd.read_csv('diabetes.csv') # Separate features and labels X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values # Split data into training set and test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0) # Set regularization hyperparameter reg = 0.01 # Train a logistic regression model print('Training a logistic regression model with regularization rate of', reg) run.log('Regularization Rate', np.float(reg)) model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train) # calculate accuracy y_hat = model.predict(X_test) acc = np.average(y_hat == y_test) print('Accuracy:', acc) run.log('Accuracy', np.float(acc)) # calculate AUC y_scores = model.predict_proba(X_test) auc = roc_auc_score(y_test,y_scores[:,1]) print('AUC: ' + str(auc)) run.log('AUC', np.float(auc)) # Save the trained model in the outputs folder os.makedirs('outputs', exist_ok=True) joblib.dump(value=model, filename='outputs/diabetes_model.pkl') run.complete() ###Output Writing diabetes-training/diabetes_training.py ###Markdown Use an Estimator to Run the Script as an ExperimentYou can run experiment scripts using a **RunConfiguration** and a **ScriptRunConfig**, or you can use an **Estimator**, which abstracts both of these configurations in a single object.In this case, we'll use a generic **Estimator** object to run the training experiment. Note that the default environment for this estimator does not include the **scikit-learn** package, so you need to explicitly add that to the configuration. The conda environment is built on-demand the first time the estimator is used, and cached for future runs that use the same configuration; so the first run will take a little longer. On subsequent runs, the cached environment can be re-used so they'll complete more quickly. ###Code from azureml.train.estimator import Estimator from azureml.core import Experiment # Create an estimator estimator = Estimator(source_directory=training_folder, entry_script='diabetes_training.py', compute_target='local', conda_packages=['scikit-learn'] ) # Create an experiment experiment_name = 'diabetes-training' experiment = Experiment(workspace = ws, name = experiment_name) # Run the experiment based on the estimator run = experiment.submit(config=estimator) run.wait_for_completion(show_output=True) ###Output RunId: diabetes-training_1591406467_640e431f Web View: https://ml.azure.com/experiments/diabetes-training/runs/diabetes-training_1591406467_640e431f?wsid=/subscriptions/6119d7ed-3cde-4ffe-90b1-fa2f5a2b80b1/resourcegroups/learn-25063b15-6014-43e8-9e49-2bb2d3a3c984/workspaces/wfml Streaming azureml-logs/60_control_log.txt ========================================= Streaming log file azureml-logs/60_control_log.txt Starting the daemon thread to refresh tokens in background for process with pid = 29377 Running: ['/bin/bash', '/tmp/azureml_runs/diabetes-training_1591406467_640e431f/azureml-environment-setup/docker_env_checker.sh'] Materialized image not found on target: azureml/azureml_c86bf9d3c3b717eea982d145df7cbfc0 Logging experiment preparation status in history service. Running: ['/bin/bash', '/tmp/azureml_runs/diabetes-training_1591406467_640e431f/azureml-environment-setup/docker_env_builder.sh'] Running: ['docker', 'build', '-f', 'azureml-environment-setup/Dockerfile', '-t', 'azureml/azureml_c86bf9d3c3b717eea982d145df7cbfc0', '.'] Sending build context to Docker daemon 955.4kB Step 1/14 : FROM mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04@sha256:a1b514f3ba884b9a7695cbba5638933ddaf222e8ce3e8c81e8cdf861679abb05 sha256:a1b514f3ba884b9a7695cbba5638933ddaf222e8ce3e8c81e8cdf861679abb05: Pulling from azureml/base a1298f4ce990: Pulling fs layer 04a3282d9c4b: Pulling fs layer 9b0d3db6dc03: Pulling fs layer 8269c605f3f1: Pulling fs layer 6504d449e70c: Pulling fs layer 4e38f320d0d4: Pulling fs layer b0a763e8ee03: Pulling fs layer 11917a028ca4: Pulling fs layer a6c378d11cbf: Pulling fs layer 6cc007ad9140: Pulling fs layer 6c1698a608f3: Pulling fs layer 6504d449e70c: Waiting 8269c605f3f1: Waiting b0a763e8ee03: Waiting 6cc007ad9140: Waiting 11917a028ca4: Waiting 9b0d3db6dc03: Verifying Checksum 9b0d3db6dc03: Download complete 04a3282d9c4b: Download complete 8269c605f3f1: Download complete a1298f4ce990: Download complete 4e38f320d0d4: Verifying Checksum 4e38f320d0d4: Download complete b0a763e8ee03: Verifying Checksum b0a763e8ee03: Download complete 6504d449e70c: Verifying Checksum 6504d449e70c: Download complete 6cc007ad9140: Verifying Checksum 6cc007ad9140: Download complete 6c1698a608f3: Download complete 11917a028ca4: Verifying Checksum 11917a028ca4: Download complete a1298f4ce990: Pull complete 04a3282d9c4b: Pull complete 9b0d3db6dc03: Pull complete 8269c605f3f1: Pull complete a6c378d11cbf: Verifying Checksum a6c378d11cbf: Download complete 6504d449e70c: Pull complete 4e38f320d0d4: Pull complete b0a763e8ee03: Pull complete 11917a028ca4: Pull complete a6c378d11cbf: Pull complete 6cc007ad9140: Pull complete 6c1698a608f3: Pull complete Digest: sha256:a1b514f3ba884b9a7695cbba5638933ddaf222e8ce3e8c81e8cdf861679abb05 Status: Downloaded newer image for mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04@sha256:a1b514f3ba884b9a7695cbba5638933ddaf222e8ce3e8c81e8cdf861679abb05 ---> 93a72e6bd1ce Step 2/14 : USER root ---> Running in 76c31127942d Removing intermediate container 76c31127942d ---> 2428e0caac04 Step 3/14 : RUN mkdir -p $HOME/.cache ---> Running in 27bd934e8590 Removing intermediate container 27bd934e8590 ---> b2943df10ba1 Step 4/14 : WORKDIR / ---> Running in 402d961dc2d1 Removing intermediate container 402d961dc2d1 ---> a33e617b8158 Step 5/14 : COPY azureml-environment-setup/99brokenproxy /etc/apt/apt.conf.d/ ---> d0c78bae9fbe Step 6/14 : RUN if dpkg --compare-versions `conda --version | grep -oE '[^ ]+$'` lt 4.4.11; then conda install conda==4.4.11; fi ---> Running in 0267c3d80d0e Removing intermediate container 0267c3d80d0e ---> be590e44c068 Step 7/14 : COPY azureml-environment-setup/mutated_conda_dependencies.yml azureml-environment-setup/mutated_conda_dependencies.yml ---> 2b8af5917af1 Step 8/14 : RUN ldconfig /usr/local/cuda/lib64/stubs && conda env create -p /azureml-envs/azureml_4b824bcb98517d791c41923f24d65461 -f azureml-environment-setup/mutated_conda_dependencies.yml && rm -rf "$HOME/.cache/pip" && conda clean -aqy && CONDA_ROOT_DIR=$(conda info --root) && rm -rf "$CONDA_ROOT_DIR/pkgs" && find "$CONDA_ROOT_DIR" -type d -name __pycache__ -exec rm -rf {} + && ldconfig ---> Running in 72b0e22ab36f Solving environment: ...working... done  ==> WARNING: A newer version of conda exists. <== current version: 4.5.11 latest version: 4.8.3 Please update conda by running $ conda update -n base -c defaults conda pip-20.0.2 | 1.9 MB | ########## | 100%  six-1.14.0 | 27 KB | ########## | 100%  mkl-service-2.3.0 | 208 KB | ########## | 100%  certifi-2020.4.5.1 | 159 KB | ########## | 100%  numpy-base-1.18.1 | 5.2 MB | ########## | 100%  blas-1.0 | 6 KB | ########## | 100%  libgfortran-ng-7.3.0 | 1.3 MB | ########## | 100%  numpy-1.18.1 | 5 KB | ########## | 100%  libgcc-ng-9.1.0 | 8.1 MB | ########## | 100%  scipy-1.4.1 | 18.9 MB | ########## | 100%  mkl_fft-1.0.15 | 173 KB | ########## | 100%  zlib-1.2.11 | 120 KB | ########## | 100%  tk-8.6.8 | 3.1 MB | ########## | 100%  sqlite-3.23.1 | 1.5 MB | ########## | 100%  libedit-3.1 | 171 KB | ########## | 100%  intel-openmp-2020.1 | 940 KB | ########## | 100%  openssl-1.0.2u | 3.1 MB | ########## | 100%  mkl_random-1.1.0 | 369 KB | ########## | 100%  xz-5.2.5 | 438 KB | ########## | 100%  wheel-0.34.2 | 49 KB | ########## | 100%  ncurses-6.0 | 907 KB | ########## | 100%  python-3.6.2 | 27.0 MB | ########## | 100%  libstdcxx-ng-9.1.0 | 4.0 MB | ########## | 100%  setuptools-46.4.0 | 646 KB | ########## | 100%  joblib-0.15.1 | 207 KB | ########## | 100%  scikit-learn-0.22.1 | 7.1 MB | ########## | 100%  mkl-2019.4 | 204.1 MB | ########## | 100%  readline-7.0 | 387 KB | ########## | 100%  libffi-3.2.1 | 43 KB | ########## | 100%  ca-certificates-2020 | 132 KB | ########## | 100%  Downloading and Extracting Packages Preparing transaction: ...working... done Verifying transaction: ...working... done Executing transaction: ...working... done Collecting azureml-defaults Downloading azureml_defaults-1.6.0-py3-none-any.whl (3.0 kB) Collecting applicationinsights>=0.11.7 Downloading applicationinsights-0.11.9-py2.py3-none-any.whl (58 kB) Collecting werkzeug==0.16.1 Downloading Werkzeug-0.16.1-py2.py3-none-any.whl (327 kB) Collecting flask==1.0.3 Downloading Flask-1.0.3-py2.py3-none-any.whl (92 kB) Collecting azureml-model-management-sdk==1.0.1b6.post1 Downloading azureml_model_management_sdk-1.0.1b6.post1-py2.py3-none-any.whl (130 kB) Collecting azureml-dataprep[fuse]<1.7.0a,>=1.6.2a Downloading azureml_dataprep-1.6.3-py3-none-any.whl (27.8 MB) Collecting gunicorn==19.9.0 Downloading gunicorn-19.9.0-py2.py3-none-any.whl (112 kB) Collecting json-logging-py==0.2 Downloading json-logging-py-0.2.tar.gz (3.6 kB) Collecting configparser==3.7.4 Downloading configparser-3.7.4-py2.py3-none-any.whl (22 kB) Collecting azureml-core~=1.6.0 Downloading azureml_core-1.6.0.post1-py3-none-any.whl (1.3 MB) Collecting click>=5.1 Downloading click-7.1.2-py2.py3-none-any.whl (82 kB) Collecting itsdangerous>=0.24 Downloading itsdangerous-1.1.0-py2.py3-none-any.whl (16 kB) Collecting Jinja2>=2.10 Downloading Jinja2-2.11.2-py2.py3-none-any.whl (125 kB) Requirement already satisfied: numpy>=1.13.0 in /azureml-envs/azureml_4b824bcb98517d791c41923f24d65461/lib/python3.6/site-packages (from azureml-model-management-sdk==1.0.1b6.post1->azureml-defaults->-r /azureml-environment-setup/condaenv.6a6y_tqq.requirements.txt (line 1)) (1.18.1) Collecting python-dateutil>=2.5.3 Downloading python_dateutil-2.8.1-py2.py3-none-any.whl (227 kB) Collecting pandas>=0.20.2 Downloading pandas-1.0.4-cp36-cp36m-manylinux1_x86_64.whl (10.1 MB) Requirement already satisfied: six>=1.10 in /azureml-envs/azureml_4b824bcb98517d791c41923f24d65461/lib/python3.6/site-packages (from azureml-model-management-sdk==1.0.1b6.post1->azureml-defaults->-r /azureml-environment-setup/condaenv.6a6y_tqq.requirements.txt (line 1)) (1.14.0) Collecting adal>=0.4.5 Downloading adal-1.2.4-py2.py3-none-any.whl (55 kB) Collecting dill>=0.2.7.1 Downloading dill-0.3.1.1.tar.gz (151 kB) Collecting requests>=2.17.3 Downloading requests-2.23.0-py2.py3-none-any.whl (58 kB) Collecting pytz>=2017.2 Downloading pytz-2020.1-py2.py3-none-any.whl (510 kB) Collecting liac-arff>=2.1.1 Downloading liac-arff-2.4.0.tar.gz (15 kB) Collecting azure-identity<1.3.0,>=1.2.0 Downloading azure_identity-1.2.0-py2.py3-none-any.whl (58 kB) Collecting cloudpickle>=1.1.0 Downloading cloudpickle-1.4.1-py3-none-any.whl (26 kB) Collecting azureml-dataprep-native<15.0.0,>=14.1.0 Downloading azureml_dataprep_native-14.2.0-cp36-cp36m-manylinux1_x86_64.whl (1.3 MB) Collecting dotnetcore2>=2.1.14 Downloading dotnetcore2-2.1.14-py3-none-manylinux1_x86_64.whl (29.3 MB) Collecting fusepy>=3.0.1; extra == "fuse" Downloading fusepy-3.0.1.tar.gz (11 kB) Collecting msrestazure>=0.4.33 Downloading msrestazure-0.6.3-py2.py3-none-any.whl (40 kB) Collecting ndg-httpsclient Downloading ndg_httpsclient-0.5.1-py3-none-any.whl (34 kB) Collecting azure-mgmt-keyvault>=0.40.0 Downloading azure_mgmt_keyvault-2.2.0-py2.py3-none-any.whl (89 kB) Collecting cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.* Downloading cryptography-2.9.2-cp35-abi3-manylinux2010_x86_64.whl (2.7 MB) Collecting backports.tempfile Downloading backports.tempfile-1.0-py2.py3-none-any.whl (4.4 kB) Collecting contextlib2 Downloading contextlib2-0.6.0.post1-py2.py3-none-any.whl (9.8 kB) Collecting pathspec Downloading pathspec-0.8.0-py2.py3-none-any.whl (28 kB) Collecting PyJWT Downloading PyJWT-1.7.1-py2.py3-none-any.whl (18 kB) Collecting azure-mgmt-storage>=1.5.0 Downloading azure_mgmt_storage-10.0.0-py2.py3-none-any.whl (532 kB) Collecting azure-mgmt-authorization>=0.40.0 Downloading azure_mgmt_authorization-0.60.0-py2.py3-none-any.whl (82 kB) Collecting pyopenssl Downloading pyOpenSSL-19.1.0-py2.py3-none-any.whl (53 kB) Collecting jmespath Downloading jmespath-0.10.0-py2.py3-none-any.whl (24 kB) Collecting azure-mgmt-network~=10.0 Downloading azure_mgmt_network-10.2.0-py2.py3-none-any.whl (8.6 MB) Collecting azure-graphrbac>=0.40.0 Downloading azure_graphrbac-0.61.1-py2.py3-none-any.whl (141 kB) Collecting SecretStorage Downloading SecretStorage-3.1.2-py3-none-any.whl (14 kB) Collecting azure-common>=1.1.12 Downloading azure_common-1.1.25-py2.py3-none-any.whl (12 kB) Collecting azure-mgmt-resource>=1.2.1 Downloading azure_mgmt_resource-10.0.0-py2.py3-none-any.whl (809 kB) Collecting azure-mgmt-containerregistry>=2.0.0 Downloading azure_mgmt_containerregistry-2.8.0-py2.py3-none-any.whl (718 kB) Collecting ruamel.yaml>0.16.7 Downloading ruamel.yaml-0.16.10-py2.py3-none-any.whl (111 kB) Collecting docker Downloading docker-4.2.1-py2.py3-none-any.whl (143 kB) Collecting jsonpickle Downloading jsonpickle-1.4.1-py2.py3-none-any.whl (36 kB) Collecting urllib3>=1.23 Downloading urllib3-1.25.9-py2.py3-none-any.whl (126 kB) Collecting msrest>=0.5.1 Downloading msrest-0.6.15-py2.py3-none-any.whl (84 kB) Collecting MarkupSafe>=0.23 Downloading MarkupSafe-1.1.1-cp36-cp36m-manylinux1_x86_64.whl (27 kB) Collecting idna<3,>=2.5 Downloading idna-2.9-py2.py3-none-any.whl (58 kB) Requirement already satisfied: certifi>=2017.4.17 in /azureml-envs/azureml_4b824bcb98517d791c41923f24d65461/lib/python3.6/site-packages (from requests>=2.17.3->azureml-model-management-sdk==1.0.1b6.post1->azureml-defaults->-r /azureml-environment-setup/condaenv.6a6y_tqq.requirements.txt (line 1)) (2020.4.5.1) Collecting chardet<4,>=3.0.2 Downloading chardet-3.0.4-py2.py3-none-any.whl (133 kB) Collecting msal-extensions~=0.1.3 Downloading msal_extensions-0.1.3-py2.py3-none-any.whl (9.0 kB) Collecting azure-core<2.0.0,>=1.0.0 Downloading azure_core-1.6.0-py2.py3-none-any.whl (120 kB) Collecting msal<2.0.0,>=1.0.0 Downloading msal-1.3.0-py2.py3-none-any.whl (48 kB) Collecting distro>=1.2.0 Downloading distro-1.5.0-py2.py3-none-any.whl (18 kB) Collecting pyasn1>=0.1.1 Downloading pyasn1-0.4.8-py2.py3-none-any.whl (77 kB) Collecting cffi!=1.11.3,>=1.8 Downloading cffi-1.14.0-cp36-cp36m-manylinux1_x86_64.whl (399 kB) Collecting backports.weakref Downloading backports.weakref-1.0.post1-py2.py3-none-any.whl (5.2 kB) Collecting jeepney>=0.4.2 Downloading jeepney-0.4.3-py3-none-any.whl (21 kB) Collecting ruamel.yaml.clib>=0.1.2; platform_python_implementation == "CPython" and python_version < "3.9" Downloading ruamel.yaml.clib-0.2.0-cp36-cp36m-manylinux1_x86_64.whl (548 kB) Collecting websocket-client>=0.32.0 Downloading websocket_client-0.57.0-py2.py3-none-any.whl (200 kB) Collecting importlib-metadata Downloading importlib_metadata-1.6.1-py2.py3-none-any.whl (31 kB) Collecting requests-oauthlib>=0.5.0 Downloading requests_oauthlib-1.3.0-py2.py3-none-any.whl (23 kB) Collecting isodate>=0.6.0 Downloading isodate-0.6.0-py2.py3-none-any.whl (45 kB) Collecting portalocker~=1.0 Downloading portalocker-1.7.0-py2.py3-none-any.whl (14 kB) Collecting pycparser Downloading pycparser-2.20-py2.py3-none-any.whl (112 kB) Collecting zipp>=0.5 Downloading zipp-3.1.0-py3-none-any.whl (4.9 kB) Collecting oauthlib>=3.0.0 Downloading oauthlib-3.1.0-py2.py3-none-any.whl (147 kB) Building wheels for collected packages: json-logging-py, dill, liac-arff, fusepy Building wheel for json-logging-py (setup.py): started Building wheel for json-logging-py (setup.py): finished with status 'done' Created wheel for json-logging-py: filename=json_logging_py-0.2-py3-none-any.whl size=3923 sha256=c94a2b78e2899116662c100f277295e4a753ddae0e8f7b02e11d4de59d359a60 Stored in directory: /root/.cache/pip/wheels/e2/1d/52/535a274b9c2ce7d4064838f2bdb62013801281ef7d7f21e2ee Building wheel for dill (setup.py): started Building wheel for dill (setup.py): finished with status 'done' Created wheel for dill: filename=dill-0.3.1.1-py3-none-any.whl size=78530 sha256=1e9aae5698ee2f689d7dfa567d7dd9e863a4d8d7976b2fa810b82b01104dacb4 Stored in directory: /root/.cache/pip/wheels/09/84/74/d2b4feb9ac9488bc83c475cb2cbe8e8b7d9cea8320d32f3787 Building wheel for liac-arff (setup.py): started Building wheel for liac-arff (setup.py): finished with status 'done' Created wheel for liac-arff: filename=liac_arff-2.4.0-py3-none-any.whl size=13333 sha256=aae20e04a0454503968fb1561e2df77f81ca9f9f530c85db11eb8de154525aa4 Stored in directory: /root/.cache/pip/wheels/ba/2a/e1/6f7be2e2ea150e2486bff64fd6f0670f4f35f4c8f31c819fb8 Building wheel for fusepy (setup.py): started Building wheel for fusepy (setup.py): finished with status 'done' Created wheel for fusepy: filename=fusepy-3.0.1-py3-none-any.whl size=10503 sha256=cea7b7c03b4f73f994453951a1f79b08393685be2f4ddfb790517f525469045b Stored in directory: /root/.cache/pip/wheels/21/5c/83/1dd7e8a232d12227e5410120f4374b33adeb4037473105b079 Successfully built json-logging-py dill liac-arff fusepy Installing collected packages: applicationinsights, werkzeug, click, itsdangerous, MarkupSafe, Jinja2, flask, python-dateutil, pytz, pandas, pycparser, cffi, cryptography, PyJWT, idna, chardet, urllib3, requests, adal, dill, liac-arff, azureml-model-management-sdk, portalocker, msal, msal-extensions, azure-core, azure-identity, cloudpickle, azureml-dataprep-native, distro, dotnetcore2, fusepy, azureml-dataprep, gunicorn, json-logging-py, configparser, oauthlib, requests-oauthlib, isodate, msrest, msrestazure, pyasn1, pyopenssl, ndg-httpsclient, azure-common, azure-mgmt-keyvault, backports.weakref, backports.tempfile, contextlib2, pathspec, azure-mgmt-storage, azure-mgmt-authorization, jmespath, azure-mgmt-network, azure-graphrbac, jeepney, SecretStorage, azure-mgmt-resource, azure-mgmt-containerregistry, ruamel.yaml.clib, ruamel.yaml, websocket-client, docker, zipp, importlib-metadata, jsonpickle, azureml-core, azureml-defaults Successfully installed Jinja2-2.11.2 MarkupSafe-1.1.1 PyJWT-1.7.1 SecretStorage-3.1.2 adal-1.2.4 applicationinsights-0.11.9 azure-common-1.1.25 azure-core-1.6.0 azure-graphrbac-0.61.1 azure-identity-1.2.0 azure-mgmt-authorization-0.60.0 azure-mgmt-containerregistry-2.8.0 azure-mgmt-keyvault-2.2.0 azure-mgmt-network-10.2.0 azure-mgmt-resource-10.0.0 azure-mgmt-storage-10.0.0 azureml-core-1.6.0.post1 azureml-dataprep-1.6.3 azureml-dataprep-native-14.2.0 azureml-defaults-1.6.0 azureml-model-management-sdk-1.0.1b6.post1 backports.tempfile-1.0 backports.weakref-1.0.post1 cffi-1.14.0 chardet-3.0.4 click-7.1.2 cloudpickle-1.4.1 configparser-3.7.4 contextlib2-0.6.0.post1 cryptography-2.9.2 dill-0.3.1.1 distro-1.5.0 docker-4.2.1 dotnetcore2-2.1.14 flask-1.0.3 fusepy-3.0.1 gunicorn-19.9.0 idna-2.9 importlib-metadata-1.6.1 isodate-0.6.0 itsdangerous-1.1.0 jeepney-0.4.3 jmespath-0.10.0 json-logging-py-0.2 jsonpickle-1.4.1 liac-arff-2.4.0 msal-1.3.0 msal-extensions-0.1.3 msrest-0.6.15 msrestazure-0.6.3 ndg-httpsclient-0.5.1 oauthlib-3.1.0 pandas-1.0.4 pathspec-0.8.0 portalocker-1.7.0 pyasn1-0.4.8 pycparser-2.20 pyopenssl-19.1.0 python-dateutil-2.8.1 pytz-2020.1 requests-2.23.0 requests-oauthlib-1.3.0 ruamel.yaml-0.16.10 ruamel.yaml.clib-0.2.0 urllib3-1.25.9 websocket-client-0.57.0 werkzeug-0.16.1 zipp-3.1.0  # # To activate this environment, use: # > source activate /azureml-envs/azureml_4b824bcb98517d791c41923f24d65461 # # To deactivate an active environment, use: # > source deactivate # Removing intermediate container 72b0e22ab36f ---> a910ed9d615f Step 9/14 : ENV PATH /azureml-envs/azureml_4b824bcb98517d791c41923f24d65461/bin:$PATH ---> Running in 5b536dd75fbf Removing intermediate container 5b536dd75fbf ---> 6f49f6d92ef8 Step 10/14 : ENV AZUREML_CONDA_ENVIRONMENT_PATH /azureml-envs/azureml_4b824bcb98517d791c41923f24d65461 ---> Running in c59c9ff0acd1 Removing intermediate container c59c9ff0acd1 ---> e28897fb5cb4 Step 11/14 : ENV LD_LIBRARY_PATH /azureml-envs/azureml_4b824bcb98517d791c41923f24d65461/lib:$LD_LIBRARY_PATH ---> Running in 662aeddb78e4 Removing intermediate container 662aeddb78e4 ---> 2fcddf377796 Step 12/14 : COPY azureml-environment-setup/spark_cache.py azureml-environment-setup/log4j.properties /azureml-environment-setup/ ---> 509fe9e3c997 Step 13/14 : ENV AZUREML_ENVIRONMENT_IMAGE True ---> Running in b22030bbe313 Removing intermediate container b22030bbe313 ---> da555f7041ca Step 14/14 : CMD ["bash"] ---> Running in 1f15bfc12609 Removing intermediate container 1f15bfc12609 ---> 8c8954e27791 Successfully built 8c8954e27791 Successfully tagged azureml/azureml_c86bf9d3c3b717eea982d145df7cbfc0:latest Logging experiment running status in history service. Running: ['docker', 'run', '--name', 'diabetes-training_1591406467_640e431f', '--rm', '-v', '/tmp/azureml_runs/diabetes-training_1591406467_640e431f:/azureml-run', '--shm-size', '2g', '-e', 'EXAMPLE_ENV_VAR=EXAMPLE_VALUE', '-e', 'AZUREML_CONTEXT_MANAGER_TRACKUSERERROR=eyJTa2lwSGlzdG9yeUltcG9ydENoZWNrIjoiRmFsc2UifQ==', '-e', 'AZUREML_CONTEXT_MANAGER_RUNHISTORY=eyJPdXRwdXRDb2xsZWN0aW9uIjp0cnVlLCJEaXJlY3Rvcmllc1RvV2F0Y2giOlsibG9ncyJdLCJzbmFwc2hvdFByb2plY3QiOnRydWV9', '-e', 'AZUREML_CONTEXT_MANAGER_PROJECTPYTHONPATH=bnVsbA==', '-e', 'AZUREML_RUN_TOKEN_EXPIRY=1593220869', '-e', 'AZUREML_RUN_TOKEN=eyJhbGciOiJSUzI1NiIsImtpZCI6IkZDMUYyMjE5MzQ4MTA3MDcyQkE5N0M2MTUzNjlENTc0QkFDQjAzMEYiLCJ0eXAiOiJKV1QifQ.eyJyb2xlIjoiQ29udHJpYnV0b3IiLCJzY29wZSI6Ii9zdWJzY3JpcHRpb25zLzYxMTlkN2VkLTNjZGUtNGZmZS05MGIxLWZhMmY1YTJiODBiMS9yZXNvdXJjZUdyb3Vwcy9sZWFybi0yNTA2M2IxNS02MDE0LTQzZTgtOWU0OS0yYmIyZDNhM2M5ODQvcHJvdmlkZXJzL01pY3Jvc29mdC5NYWNoaW5lTGVhcm5pbmdTZXJ2aWNlcy93b3Jrc3BhY2VzL3dmbWwiLCJhY2NvdW50aWQiOiIwMDAwMDAwMC0wMDAwLTAwMDAtMDAwMC0wMDAwMDAwMDAwMDAiLCJ3b3Jrc3BhY2VJZCI6IjZlZDM0MzE1LWYzN2MtNDA1Ni1hZGJmLWEwY2Q1YmQyNGEwOCIsInByb2plY3RpZCI6IjAwMDAwMDAwLTAwMDAtMDAwMC0wMDAwLTAwMDAwMDAwMDAwMCIsImRpc2NvdmVyeSI6InVyaTovL2Rpc2NvdmVyeXVyaS8iLCJ0aWQiOiI2MDRjMTUwNC1jNmEzLTQwODAtODFhYS1iMzMwOTExMDQxODciLCJvaWQiOiIxY2RhY2MwNi1lMTIyLTQwZTUtODYxYi04YjU1MDg4NjRhZjgiLCJwdWlkIjoiMTAwMzIwMDBDNUNEMzFGNSIsImlzcyI6ImF6dXJlbWwiLCJpZHAiOiJsaXZlLmNvbSIsImFwcGlkIjoiV2lsbCBpYW0iLCJhbHRzZWNpZCI6IjE6bGl2ZS5jb206MDAwMzQwMDEwNjI2MEQ5NyIsImV4cCI6MTU5MzIyMDg2OSwiYXVkIjoiYXp1cmVtbCJ9.cv3tjxMUkSRP3Ra-1NecawLxgXfAOkiRBxrbrYyvfk1loHyp6foe-StuhkySSPLBhVL5-o1QMDEdgSTIwWBTadvuIhBqwMHq1Eqqzb7uTMfPW1O9tN8FI-lCfGoFSGqgpKYHlOxnHYy1-G1UX0MFxL0oWm3dRbbJXFAsJujys2xRWGe-xITurgmLS7HE7XPdIDtLNPKu_MsnsRsATe9fnUFcUQcyP5w3nLIh8rlKBlzFL_3VzD6LiX0vqRQwHCfnbDKRMXZhQJPVzRseDJVkaPjfeKLH0sorJaF2as3sJOwDs8BtVQSjsWiVXzT3gXAkXBhIDkD1Zm9y-v8TKyrY-g', '-e', 'HBI_WORKSPACE_JOB=false', '-e', 'AZUREML_RUN_TOKEN_RAND=af21852a-c595-4cdf-86ae-d3b5b42ad17e', '-e', 'AZUREML_RUN_TOKEN_PASS=08a1d570-d844-49b5-9254-96cbef5f5746', '-e', 'PYTHONUNBUFFERED=True', '-e', 'AZUREML_COMMUNICATOR=None', '-e', 'AZUREML_FRAMEWORK=Python', '-e', 'AZUREML_ARM_PROJECT_NAME=diabetes-training', '-e', 'AZUREML_ARM_WORKSPACE_NAME=wfml', '-e', 'AZUREML_ARM_SUBSCRIPTION=6119d7ed-3cde-4ffe-90b1-fa2f5a2b80b1', '-e', 'AZUREML_ARM_RESOURCEGROUP=learn-25063b15-6014-43e8-9e49-2bb2d3a3c984', '-e', 'AZUREML_EXPERIMENT_SCOPE=/subscriptions/6119d7ed-3cde-4ffe-90b1-fa2f5a2b80b1/resourceGroups/learn-25063b15-6014-43e8-9e49-2bb2d3a3c984/providers/Microsoft.MachineLearningServices/workspaces/wfml/experiments/diabetes-training', '-e', 'AZUREML_WORKSPACE_ID=6ed34315-f37c-4056-adbf-a0cd5bd24a08', '-e', 'AZUREML_WORKSPACE_SCOPE=/subscriptions/6119d7ed-3cde-4ffe-90b1-fa2f5a2b80b1/resourceGroups/learn-25063b15-6014-43e8-9e49-2bb2d3a3c984/providers/Microsoft.MachineLearningServices/workspaces/wfml', '-e', 'AZUREML_DATA_CONTAINER_ID=dcid.diabetes-training_1591406467_640e431f', '-e', 'AZUREML_DISCOVERY_SERVICE_ENDPOINT=https://eastus.experiments.azureml.net/discovery', '-e', 'AZUREML_RUN_HISTORY_SERVICE_ENDPOINT=https://eastus.experiments.azureml.net', '-e', 'AZUREML_SERVICE_ENDPOINT=https://eastus.experiments.azureml.net', '-e', 'AZUREML_RUN_CONFIGURATION=azureml-setup/mutated_run_configuration.json', '-e', 'AZUREML_INSTRUMENTATION_KEY=2d586587-4df8-4336-9af2-277fe3c5d9cd', '-e', 'AZUREML_DRIVERLOG_PATH=azureml-logs/driver_log.txt', '-e', 'TELEMETRY_LOGS=azureml-logs/telemetry_logs/', '-e', 'FAIRLEARN_LOGS=azureml-logs/telemetry_logs/fairlearn_log.txt', '-e', 'INTERPRET_TEXT_LOGS=azureml-logs/telemetry_logs/interpret_text_log.txt', '-e', 'INTERPRET_C_LOGS=azureml-logs/telemetry_logs/interpret_community_log.txt', '-e', 'AZUREML_JOBRELEASELOG_PATH=azureml-logs/job_release_log.txt', '-e', 'AZUREML_JOBPREPLOG_PATH=azureml-logs/job_prep_log.txt', '-e', 'AZUREML_CONTROLLOG_PATH=azureml-logs/control_log.txt', '-e', 'AZUREML_LOGDIRECTORY_PATH=azureml-logs/', '-e', 'AZUREML_PIDFILE_PATH=azureml-setup/pid.txt', '-e', 'AZUREML_RUN_ID=diabetes-training_1591406467_640e431f', 'azureml/azureml_c86bf9d3c3b717eea982d145df7cbfc0', '/bin/bash', '-c', 'cd /azureml-run && "/azureml-envs/azureml_4b824bcb98517d791c41923f24d65461/bin/python" "azureml-setup/run_script.py" "/azureml-envs/azureml_4b824bcb98517d791c41923f24d65461/bin/python" "azureml-setup/context_manager_injector.py" "-i" "ProjectPythonPath:context_managers.ProjectPythonPath" "-i" "RunHistory:context_managers.RunHistory" "-i" "TrackUserError:context_managers.TrackUserError" "diabetes_training.py"'] Streaming log file azureml-logs/70_driver_log.txt Streaming azureml-logs/70_driver_log.txt ======================================== Entering context manager injector. Current time:2020-06-06T01:25:34.994882 Starting the daemon thread to refresh tokens in background for process with pid = 8 Entering Run History Context Manager. Preparing to call script [ diabetes_training.py ] with arguments: [] After variable expansion, calling script [ diabetes_training.py ] with arguments: [] Loading Data... Training a logistic regression model with regularization rate of 0.01 Accuracy: 0.774 AUC: 0.8484929598487486 Starting the daemon thread to refresh tokens in background for process with pid = 8 The experiment completed successfully. Finalizing run... Logging experiment finalizing status in history service. Cleaning up all outstanding Run operations, waiting 300.0 seconds 2 items cleaning up... Cleanup took 0.21400070190429688 seconds Execution Summary ================= RunId: diabetes-training_1591406467_640e431f Web View: https://ml.azure.com/experiments/diabetes-training/runs/diabetes-training_1591406467_640e431f?wsid=/subscriptions/6119d7ed-3cde-4ffe-90b1-fa2f5a2b80b1/resourcegroups/learn-25063b15-6014-43e8-9e49-2bb2d3a3c984/workspaces/wfml ###Markdown As with any experiment run, you can use the **RunDetails** widget to view information about the run and get a link to it in Azure Machine Learning studio. ###Code from azureml.widgets import RunDetails RunDetails(run).show() ###Output _____no_output_____ ###Markdown You can also retrieve the metrics and outputs from the **Run** object. ###Code # Get logged metrics metrics = run.get_metrics() for key in metrics.keys(): print(key, metrics.get(key)) print('\n') for file in run.get_file_names(): print(file) ###Output Regularization Rate 0.01 Accuracy 0.774 AUC 0.8484929598487486 azureml-logs/60_control_log.txt azureml-logs/70_driver_log.txt logs/azureml/8_azureml.log outputs/diabetes_model.pkl ###Markdown Register the Trained ModelNote that the outputs of the experiment include the trained model file (**diabetes_model.pkl**). You can register this model in your Azure Machine Learning workspace, making it possible to track model versions and retrieve them later. ###Code from azureml.core import Model # Register the model run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model', tags={'Training context':'Estimator'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']}) # List registered models for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') ###Output diabetes_model version: 1 Training context : Estimator AUC : 0.8484929598487486 Accuracy : 0.774 ###Markdown Create a Parameterized Training ScriptYou can increase the flexibility of your training experiment by adding parameters to your script, enabling you to repeat the same training experiment with different settings. In this case, you'll add a parameter for the regularization rate used by the Logistic Regression algorithm when training the model.Again, lets start by creating a folder for the parameterized script and the training data. ###Code import os, shutil # Create a folder for the experiment files training_folder = 'diabetes-training-params' os.makedirs(training_folder, exist_ok=True) # Copy the data file into the experiment folder shutil.copy('data/diabetes.csv', os.path.join(training_folder, "diabetes.csv")) ###Output _____no_output_____ ###Markdown Now let's create a script containing a parameter for the regularization rate hyperparameter. ###Code %%writefile $training_folder/diabetes_training.py # Import libraries from azureml.core import Run import pandas as pd import numpy as np import joblib import argparse from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve # Get the experiment run context run = Run.get_context() # Set regularization hyperparameter parser = argparse.ArgumentParser() parser.add_argument('--reg_rate', type=float, dest='reg', default=0.01) args = parser.parse_args() reg = args.reg # load the diabetes dataset print("Loading Data...") # load the diabetes dataset diabetes = pd.read_csv('diabetes.csv') # Separate features and labels X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values # Split data into training set and test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0) # Train a logistic regression model print('Training a logistic regression model with regularization rate of', reg) run.log('Regularization Rate', np.float(reg)) model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train) # calculate accuracy y_hat = model.predict(X_test) acc = np.average(y_hat == y_test) print('Accuracy:', acc) run.log('Accuracy', np.float(acc)) # calculate AUC y_scores = model.predict_proba(X_test) auc = roc_auc_score(y_test,y_scores[:,1]) print('AUC: ' + str(auc)) run.log('AUC', np.float(auc)) os.makedirs('outputs', exist_ok=True) joblib.dump(value=model, filename='outputs/diabetes_model.pkl') run.complete() ###Output Writing diabetes-training-params/diabetes_training.py ###Markdown Use a Framework-Specific EstimatorYou used a generic **Estimator** class to run the training script, but you can also take advantage of framework-specific estimators that include environment definitions for common machine learning frameworks. In this case, you're using Scikit-Learn, so you can use the **SKLearn** estimator. This means that you don't need to specify the **scikit-learn** package in the configuration.> **Note**: Once again, the training experiment uses a new environment; which must be created the first time it is run. ###Code from azureml.train.sklearn import SKLearn from azureml.widgets import RunDetails # Create an estimator estimator = SKLearn(source_directory=training_folder, entry_script='diabetes_training.py', script_params = {'--reg_rate': 0.1}, compute_target='local' ) # Create an experiment experiment_name = 'diabetes-training' experiment = Experiment(workspace = ws, name = experiment_name) # Run the experiment run = experiment.submit(config=estimator) # Show the run details while running RunDetails(run).show() run.wait_for_completion() ###Output _____no_output_____ ###Markdown Once again, you can get the metrics and outputs from the run. ###Code # Get logged metrics metrics = run.get_metrics() for key in metrics.keys(): print(key, metrics.get(key)) print('\n') for file in run.get_file_names(): print(file) ###Output Regularization Rate 0.1 Accuracy 0.7736666666666666 AUC 0.8483904671874223 azureml-logs/60_control_log.txt azureml-logs/70_driver_log.txt logs/azureml/8_azureml.log outputs/diabetes_model.pkl ###Markdown Register A New Version of the ModelNow that you've trained a new model, you can register it as a new version in the workspace. ###Code from azureml.core import Model # Register the model run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model', tags={'Training context':'Parameterized SKLearn Estimator'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']}) # List registered models for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') ###Output diabetes_model version: 2 Training context : Parameterized SKLearn Estimator AUC : 0.8483904671874223 Accuracy : 0.7736666666666666 diabetes_model version: 1 Training context : Estimator AUC : 0.8484929598487486 Accuracy : 0.774
.ipynb_checkpoints/modern_3_indexes-checkpoint.ipynb
###Markdown IndexesToday we're going to be talking about pandas' [`Index`es](http://pandas.pydata.org/pandas-docs/version/0.18.0/api.htmlindex).They're essential to pandas, but can be a difficult concept to grasp at first.I suspect this is partly because they're unlike what you'll find in SQL or R.`Index`es offer- a metadata container- easy label-based row selection and assignment- easy label-based alignment in operationsOne of my first tasks when analyzing a new dataset is to identify a unique identifier for each observation, and set that as the index. It could be a simple integer, or like in our first chapter, it could be several columns (`carrier`, `origin` `dest`, `tail_num` `date`).To demonstrate the benefits of proper `Index` use, we'll first fetch some weather data from sensors at a bunch of airports across the US.See [here](https://github.com/akrherz/iem/blob/master/scripts/asos/iem_scraper_example.py) for the example scraper I based this off of.Those uninterested in the details of fetching and prepping the data and [skip past it](set-operations).At a high level, here's how we'll fetch the data: the sensors are broken up by "network" (states).We'll make one API call per state to get the list of airport IDs per network (using `get_ids` below).Once we have the IDs, we'll again make one call per state getting the actual observations (in `get_weather`).Feel free to skim the code below, I'll highlight the interesting bits. ###Code %matplotlib inline import os import json import glob import datetime from io import StringIO import requests import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import prep sns.set_style('ticks') pd.options.display.max_rows = 10 # States are broken into networks. The networks have a list of ids, each representing a station. # We will take that list of ids and pass them as query parameters to the URL we built up ealier. states = """AK AL AR AZ CA CO CT DE FL GA HI IA ID IL IN KS KY LA MA MD ME MI MN MO MS MT NC ND NE NH NJ NM NV NY OH OK OR PA RI SC SD TN TX UT VA VT WA WI WV WY""".split() # IEM has Iowa AWOS sites in its own labeled network networks = ['AWOS'] + ['{}_ASOS'.format(state) for state in states] def get_weather(stations, start=pd.Timestamp('2017-01-01'), end=pd.Timestamp('2017-01-31')): ''' Fetch weather data from MESONet between ``start`` and ``stop``. ''' url = ("http://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?" "&data=tmpf&data=relh&data=sped&data=mslp&data=p01i&data=v" "sby&data=gust_mph&data=skyc1&data=skyc2&data=skyc3" "&tz=Etc/UTC&format=comma&latlon=no" "&{start:year1=%Y&month1=%m&day1=%d}" "&{end:year2=%Y&month2=%m&day2=%d}&{stations}") stations = "&".join("station=%s" % s for s in stations) weather = (pd.read_csv(url.format(start=start, end=end, stations=stations), comment="#") .rename(columns={"valid": "date"}) .rename(columns=str.strip) .assign(date=lambda df: pd.to_datetime(df['date'])) .set_index(["station", "date"]) .sort_index()) float_cols = ['tmpf', 'relh', 'sped', 'mslp', 'p01i', 'vsby', "gust_mph"] weather[float_cols] = weather[float_cols].apply(pd.to_numeric, errors="corce") return weather def get_ids(network): url = "http://mesonet.agron.iastate.edu/geojson/network.php?network={}" r = requests.get(url.format(network)) md = pd.io.json.json_normalize(r.json()['features']) md['network'] = network return md ###Output _____no_output_____ ###Markdown There isn't too much in `get_weather` worth mentioning, just grabbing some CSV files from various URLs.They put metadata in the "CSV"s at the top of the file as lines prefixed by a ``.Pandas will ignore these with the `comment=''` parameter.I do want to talk briefly about the gem of a method that is [`json_normalize`](http://pandas.pydata.org/pandas-docs/version/0.18.0/generated/pandas.io.json.json_normalize.html) .The weather API returns some slightly-nested data. ###Code url = "http://mesonet.agron.iastate.edu/geojson/network.php?network={}" r = requests.get(url.format("AWOS")) js = r.json() js['features'][:2] ###Output _____no_output_____ ###Markdown If we just pass that list off to the `DataFrame` constructor, we get this. ###Code pd.DataFrame(js['features']).head() ###Output _____no_output_____ ###Markdown In general, DataFrames don't handle nested data that well.It's often better to normalize it somehow.In this case, we can "lift"the nested items (`geometry.coordinates`, `properties.sid`, and `properties.sname`)up to the top level. ###Code pd.io.json.json_normalize(js['features']) ###Output _____no_output_____ ###Markdown Sure, it's not *that* difficult to write a quick for loop or list comprehension to extract those,but that gets tedious.If we were using the latitude and longitude data, we would want to splitthe `geometry.coordinates` column into two. But we aren't so we won't.Going back to the task, we get the airport IDs for every network (state)with `get_ids`. Then we pass those IDs into `get_weather` to fetch theactual weather data. ###Code import os ids = pd.concat([get_ids(network) for network in networks], ignore_index=True) gr = ids.groupby('network') store = 'data/weather.h5' if not os.path.exists(store): os.makedirs("data/weather", exist_ok=True) for k, v in gr: weather = get_weather(v['id']) weather.to_csv("data/weather/{}.csv".format(k)) weather = pd.concat([ pd.read_csv(f, parse_dates=['date'], index_col=['station', 'date']) for f in glob.glob('data/weather/*.csv') ]).sort_index() weather.to_hdf("data/weather.h5", "weather") else: weather = pd.read_hdf("data/weather.h5", "weather") weather.head() ###Output _____no_output_____ ###Markdown OK, that was a bit of work. Here's a plot to reward ourselves. ###Code airports = ['W43', 'AFO', '82V', 'DUB'] g = sns.FacetGrid(weather.loc[airports].reset_index(), col='station', hue='station', col_wrap=2, size=4) g.map(sns.regplot, 'sped', 'gust_mph'); ###Output _____no_output_____ ###Markdown Set OperationsIndexes are set-like (technically *multi*sets, since you can have duplicates), so they support most python `set` operations. Since indexes are immutable you won't find any of the inplace `set` operations.One other difference is that since `Index`es are also array-like, you can't use some infix operators like `-` for `difference`. If you have a numeric index it is unclear whether you intend to perform math operations or set operations.You can use `&` for intersection, `|` for union, and `^` for symmetric difference though, since there's no ambiguity.For example, lets find the set of airports that we have both weather and flight information on. Since `weather` had a MultiIndex of `airport, datetime`, we'll use the `levels` attribute to get at the airport data, separate from the date data. ###Code # Bring in the flights data flights = pd.read_hdf('data/flights.h5', 'flights') weather_locs = weather.index.levels[0] # The `categories` attribute of a Categorical is an Index origin_locs = flights.origin.cat.categories dest_locs = flights.dest.cat.categories airports = weather_locs & origin_locs & dest_locs airports (origin_locs | dest_locs) ^ weather_locs 266 + 2101 print("Weather, no flights:\n\t", weather_locs.difference(origin_locs | dest_locs), end='\n\n') print("Flights, no weather:\n\t", (origin_locs | dest_locs).difference(weather_locs), end='\n\n') print("Dropped Stations:\n\t", (origin_locs | dest_locs) ^ weather_locs) # ^ Returns set of all elements in either (origin_locs | dest_locs) or weather_locs, but not both - i.e. items that are only in one or the other ###Output Weather, no flights: Index(['04V', '04W', '05U', '06D', '08D', '0A9', '0CO', '0E0', '0F2', '0J4', ... 'Y50', 'Y51', 'Y63', 'Y70', 'YIP', 'YKM', 'YKN', 'YNG', 'ZPH', 'ZZV'], dtype='object', length=2069) Flights, no weather: Index(['ADK', 'ADQ', 'ANC', 'BET', 'BQN', 'BRW', 'CDV', 'FAI', 'FCA', 'GUM', 'HNL', 'ITO', 'JNU', 'KOA', 'KTN', 'LIH', 'MQT', 'OGG', 'OME', 'OTZ', 'PPG', 'PSE', 'PSG', 'SCC', 'SCE', 'SIT', 'SJU', 'STT', 'STX', 'WRG', 'YAK', 'YUM'], dtype='object') Dropped Stations: Index(['04V', '04W', '05U', '06D', '08D', '0A9', '0CO', '0E0', '0F2', '0J4', ... 'Y63', 'Y70', 'YAK', 'YIP', 'YKM', 'YKN', 'YNG', 'YUM', 'ZPH', 'ZZV'], dtype='object', length=2101) ###Markdown FlavorsPandas has many subclasses of the regular `Index`, each tailored to a specific kind of data.Most of the time these will be created for you automatically, so you don't have to worry about which one to choose.1. [`Index`](http://pandas.pydata.org/pandas-docs/version/0.18.0/generated/pandas.Index.htmlpandas.Index)2. `Int64Index`3. `RangeIndex`: Memory-saving special case of `Int64Index`4. `FloatIndex`5. `DatetimeIndex`: Datetime64[ns] precision data6. `PeriodIndex`: Regularly-spaced, arbitrary precision datetime data.7. `TimedeltaIndex`8. `CategoricalIndex`9. `MultiIndex`You will sometimes create a `DatetimeIndex` with [`pd.date_range`](http://pandas.pydata.org/pandas-docs/version/0.18.0/generated/pandas.date_range.html) ([`pd.period_range`](http://pandas.pydata.org/pandas-docs/version/0.18.0/generated/pandas.period_range.html) for `PeriodIndex`).And you'll sometimes make a `MultiIndex` directly too (I'll have an example of this in my post on performace).Some of these specialized index types are purely optimizations; others use information about the data to provide additional methods.And while you might occasionally work with indexes directly (like the set operations above), most of they time you'll be operating on a Series or DataFrame, which in turn makes use of its Index. Row SlicingWe saw in part one that they're great for making *row* subsetting as easy as column subsetting. ###Code weather.loc['DSM'].head() ###Output _____no_output_____ ###Markdown Without indexes we'd probably resort to boolean masks. ###Code weather2 = weather.reset_index() weather2[weather2['station'] == 'DSM'].head() ###Output _____no_output_____ ###Markdown Slightly less convenient, but still doable. Indexes for Easier Arithmetic, Analysis It's nice to have your metadata (labels on each observation) next to you actual values. But if you store them in an array, they'll get in the way of your operations.Say we wanted to translate the Fahrenheit temperature to Celsius. ###Code # With indices temp = weather['tmpf'] c = (temp - 32) * 5 / 9 c.to_frame() # without temp2 = weather.reset_index()[['station', 'date', 'tmpf']] temp2['tmpf'] = (temp2['tmpf'] - 32) * 5 / 9 temp2.head() ###Output _____no_output_____ ###Markdown Again, not terrible, but not as good.And, what if you had wanted to keep Fahrenheit around as well, instead of overwriting it like we did?Then you'd need to make a copy of everything, including the `station` and `date` columns.We don't have that problem, since indexes are immutable and safely shared between DataFrames / Series. ###Code temp.index is c.index ###Output _____no_output_____ ###Markdown Indexes for AlignmentI've saved the best for last.Automatic alignment, or reindexing, is fundamental to pandas.All binary operations (add, multiply, etc.) between Series/DataFrames first *align* and then proceed.Let's suppose we have hourly observations on temperature and windspeed.And suppose some of the observations were invalid, and not reported (simulated below by sampling from the full dataset). We'll assume the missing windspeed observations were potentially different from the missing temperature observations. ###Code dsm = weather.loc['DSM'] hourly = dsm.resample('H').mean() temp = hourly['tmpf'].sample(frac=.5, random_state=99).sort_index() sped = hourly['sped'].sample(frac=.5, random_state=2).sort_index() temp.head().to_frame() sped.head() ###Output _____no_output_____ ###Markdown Notice that the two indexes aren't identical.Suppose that the `windspeed : temperature` ratio is meaningful.When we go to compute that, pandas will automatically align the two by index label. ###Code sped / temp ###Output _____no_output_____ ###Markdown This lets you focus on doing the operation, rather than manually aligning things, ensuring that the arrays are the same length and in the same order.By deault, missing values are inserted where the two don't align.You can use the method version of any binary operation to specify a `fill_value` ###Code # This is the same as sped / temp sped.div(temp) sped.div(temp, fill_value=1) ###Output _____no_output_____ ###Markdown And since I couldn't find anywhere else to put it, you can control the axis the operation is aligned along as well. ###Code hourly.div(sped, axis='index') ###Output _____no_output_____ ###Markdown The non row-labeled version of this is messy. ###Code temp2 = temp.reset_index() sped2 = sped.reset_index() # Find rows where the operation is defined common_dates = pd.Index(temp2.date) & sped2.date pd.concat([ # concat to not lose date information sped2.loc[sped2['date'].isin(common_dates), 'date'], (sped2.loc[sped2.date.isin(common_dates), 'sped'] / temp2.loc[temp2.date.isin(common_dates), 'tmpf'])], axis=1).dropna(how='all') ###Output _____no_output_____ ###Markdown And we have a bug in there. Can you spot it?I only grabbed the dates from `sped2` in the line `sped2.loc[sped2['date'].isin(common_dates), 'date']`.Really that should be `sped2.loc[sped2.date.isin(common_dates)] | temp2.loc[temp2.date.isin(common_dates)]`.But I think leaving the buggy version states my case even more strongly. The `temp / sped` version where pandas aligns everything is better. MergingThere are two ways of merging DataFrames / Series in pandas.1. Relational Database style with `pd.merge`2. Array style with `pd.concat`Personally, I think in terms of the `concat` style.I learned pandas before I ever really used SQL, so it comes more naturally to me I suppose. Concat Version ###Code pd.concat([temp, sped], axis=1) ###Output _____no_output_____ ###Markdown The `axis` parameter controls how the data should be stacked, `0` for vertically, `1` for horizontally.The `join` parameter controls the merge behavior on the shared axis, (the Index for `axis=1`). By default it's like a union of the two indexes, or an outer join. ###Code pd.concat([temp, sped], axis=1, join='inner') ###Output _____no_output_____ ###Markdown Merge VersionSince we're joining by index here the merge version is quite similar.We'll see an example later of a one-to-many join where the two differ. ###Code pd.merge(temp.to_frame(), sped.to_frame(), left_index=True, right_index=True).head() pd.merge(temp.to_frame(), sped.to_frame(), left_index=True, right_index=True, how='outer').head() ###Output _____no_output_____ ###Markdown Like I said, I typically prefer `concat` to `merge`.The exception here is one-to-many type joins. Let's walk through one of those,where we join the flight data to the weather data.To focus just on the merge, we'll aggregate hour weather data to be daily, rather than trying to find the closest recorded weather observation to each departure (you could do that, but it's not the focus right now). We'll then join the one `(airport, date)` record to the many `(airport, date, flight)` records.Quick tangent, to get the weather data to daily frequency, we'll need to resample (more on that in the timeseries section). The resample essentially splits the recorded values into daily buckets and computes the aggregation function on each bucket. The only wrinkle is that we have to resample *by station*, so we'll use the `pd.TimeGrouper` helper. My suggestion for doing it: - Drop irrelevant stations from weather data- Resample weather data so that it is daily by station (use TimeGrouper helper)- Merge weather data with flight data ###Code (origin_locs | dest_locs) weather idx_cols = ['unique_carrier', 'origin', 'dest', 'tail_num', 'fl_num', 'fl_date'] data_cols = ['crs_dep_time', 'dep_delay', 'crs_arr_time', 'arr_delay', 'taxi_out', 'taxi_in', 'wheels_off', 'wheels_on'] df = flights.set_index(idx_cols)[data_cols].sort_index() def mode(x): ''' Arbitrarily break ties. ''' return x.value_counts().index[0] aggfuncs = {'tmpf': 'mean', 'relh': 'mean', 'sped': 'mean', 'mslp': 'mean', 'p01i': 'mean', 'vsby': 'mean', 'gust_mph': 'mean', 'skyc1': mode, 'skyc2': mode, 'skyc3': mode} # TimeGrouper works on a DatetimeIndex, so we move `station` to the # columns and then groupby it as well. daily = (weather.reset_index(level="station") .groupby([pd.TimeGrouper('1d'), "station"]) .agg(aggfuncs)) daily.head() ###Output _____no_output_____ ###Markdown Now that we have daily flight and weather data, we can merge.We'll use the `on` keyword to indicate the columns we'll merge on (this is like a `USING (...)` SQL statement), we just have to make sure the names align. ###Code flights.fl_date.unique() flights.origin.unique() daily.reset_index().date.unique() daily.reset_index().station.unique() ###Output _____no_output_____ ###Markdown The merge version ###Code daily_ = ( daily .reset_index() .rename(columns={'date': 'fl_date', 'station': 'origin'}) .assign(origin=lambda x: pd.Categorical(x.origin, categories=flights.origin.cat.categories)) ) flights.fl_date.unique() daily.reset_index().date.unique() m = pd.merge(flights, daily_, on=['fl_date', 'origin']).set_index(idx_cols).sort_index() m.head() ###Output _____no_output_____ ###Markdown Since data-wrangling on its own is never the goal, let's do some quick analysis.Seaborn makes it easy to explore bivariate relationships. Looking at the various [sky coverage states](https://en.wikipedia.org/wiki/METARCloud_reporting): ###Code m.groupby('skyc1').dep_delay.agg(['mean', 'count']).sort_values(by='mean') import statsmodels.api as sm ###Output /Users/taugspurger/miniconda3/envs/modern-pandas/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead. from pandas.core import datetools ###Markdown Statsmodels (via [patsy](http://patsy.readthedocs.org/) can automatically convert dummy data to dummy variables in a formula with the `C` function). ###Code mod = sm.OLS.from_formula('dep_delay ~ C(skyc1) + tmpf + relh + sped + mslp', data=m) res = mod.fit() res.summary() fig, ax = plt.subplots() ax.scatter(res.fittedvalues, res.resid, color='k', marker='.', alpha=.25) ax.set(xlabel='Predicted', ylabel='Residual') sns.despine() ###Output _____no_output_____
Basics/Python_For_SDC/Basics.ipynb
###Markdown Quick Sort ###Code def quicksort(arr): if len(arr) <= 1: return arr pivot = arr[len(arr) // 2] left = [x for x in arr if x < pivot] middle = [x for x in arr if x == pivot] right = [x for x in arr if x > pivot] return quicksort(left) + middle + quicksort(right) quicksort([5,4,3,2,1]) s = "hello" print(s.capitalize()) # Capitalize a string; prints "Hello" print(s.upper()) # Convert a string to uppercase; prints "HELLO" print(s.rjust(7)) # Right-justify a string, padding with spaces; prints " hello" print(s.center(7)) # Center a string, padding with spaces; prints " hello " print(s.replace('l', '(ell)')) # Replace all instances of one substring with another; # prints "he(ell)(ell)o" print(' world '.strip()) # Strip leading and trailing whitespace; prints "world" import numpy as np a = np.array([1, 2, 3]) # Create a rank 1 array print(type(a)) # Prints "<class 'numpy.ndarray'>" print(a.shape) # Prints "(3,)" print(a[0], a[1], a[2]) # Prints "1 2 3" a[0] = 5 # Change an element of the array print(a) # Prints "[5, 2, 3]" b = np.array([[1,2,3],[4,5,6]]) # Create a rank 2 array print(b.shape) # Prints "(2, 3)" print(b[0, 0], b[0, 1], b[1, 0]) # Prints "1 2 4" import numpy as np a = np.zeros((2,2)) # Create an array of all zeros print(a) # Prints "[[ 0. 0.] # [ 0. 0.]]" b = np.ones((1,2)) # Create an array of all ones print(b) # Prints "[[ 1. 1.]]" c = np.full((2,2), 7) # Create a constant array print(c) # Prints "[[ 7. 7.] # [ 7. 7.]]" d = np.eye(2) # Create a 2x2 identity matrix print(d) # Prints "[[ 1. 0.] # [ 0. 1.]]" e = np.random.random((2,2)) # Create an array filled with random values print(e) # Might print "[[ 0.91940167 0.08143941] # [ 0.68744134 0.87236687]]" import numpy as np # Create the following rank 2 array with shape (3, 4) # [[ 1 2 3 4] # [ 5 6 7 8] # [ 9 10 11 12]] a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) # Use slicing to pull out the subarray consisting of the first 2 rows # and columns 1 and 2; b is the following array of shape (2, 2): # [[2 3] # [6 7]] b = a[:2, 1:3] # A slice of an array is a view into the same data, so modifying it # will modify the original array. print(a[0, 1]) # Prints "2" b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1] print(a[0, 1]) # Prints "77" import numpy as np # Create the following rank 2 array with shape (3, 4) # [[ 1 2 3 4] # [ 5 6 7 8] # [ 9 10 11 12]] a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) # Two ways of accessing the data in the middle row of the array. # Mixing integer indexing with slices yields an array of lower rank, # while using only slices yields an array of the same rank as the # original array: row_r1 = a[1, :] # Rank 1 view of the second row of a row_r2 = a[1:2, :] # Rank 2 view of the second row of a print(row_r1, row_r1.shape) # Prints "[5 6 7 8] (4,)" print(row_r2, row_r2.shape) # Prints "[[5 6 7 8]] (1, 4)" # We can make the same distinction when accessing columns of an array: col_r1 = a[:, 1] col_r2 = a[:, 1:2] print(col_r1, col_r1.shape) # Prints "[ 2 6 10] (3,)" print(col_r2, col_r2.shape) # Prints "[[ 2] # [ 6] # [10]] (3, 1)" ###Output [5 6 7 8] (4,) [[5 6 7 8]] (1, 4) [ 2 6 10] (3,) [[ 2] [ 6] [10]] (3, 1) ###Markdown Integer array indexing: When you index into numpy arrays using slicing, the resulting array view will always be a subarray of the original array. In contrast, integer array indexing allows you to construct arbitrary arrays using the data from another array. Here is an example: ###Code import numpy as np a = np.array([[1,2], [3, 4], [5, 6]]) # An example of integer array indexing. # The returned array will have shape (3,) and print(a[[0, 1, 2], [0, 1, 0]]) # Prints "[1 4 5]" # The above example of integer array indexing is equivalent to this: print(np.array([a[0, 0], a[1, 1], a[2, 0]])) # Prints "[1 4 5]" # When using integer array indexing, you can reuse the same # element from the source array: print(a[[0, 0], [1, 1]]) # Prints "[2 2]" # Equivalent to the previous integer array indexing example print(np.array([a[0, 1], a[0, 1]])) # Prints "[2 2]" ###Output [1 4 5] [1 4 5] [2 2] [2 2] ###Markdown Tricks using Integer Indexing ###Code import numpy as np # Create a new array from which we will select elements a = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) print(a) # prints "array([[ 1, 2, 3], # [ 4, 5, 6], # [ 7, 8, 9], # [10, 11, 12]])" # Create an array of indices b = np.array([0, 2, 0, 1]) # Select one element from each row of a using the indices in b print(a[np.arange(4), b]) # Prints "[ 1 6 7 11]" # Mutate one element from each row of a using the indices in b a[np.arange(4), b] += 10 print(a) # prints "array([[11, 2, 3], # [ 4, 5, 16], # [17, 8, 9], # [10, 21, 12]]) ###Output [[ 1 2 3] [ 4 5 6] [ 7 8 9] [10 11 12]] [ 1 6 7 11] [[11 2 3] [ 4 5 16] [17 8 9] [10 21 12]] ###Markdown Boolean array indexing: Boolean array indexing lets you pick out arbitrary elements of an array. Frequently this type of indexing is used to select the elements of an array that satisfy some condition. Here is an example: ###Code import numpy as np a = np.array([[1,2], [3, 4], [5, 6]]) bool_idx = (a > 2) # Find the elements of a that are bigger than 2; # this returns a numpy array of Booleans of the same # shape as a, where each slot of bool_idx tells # whether that element of a is > 2. print(bool_idx) # Prints "[[False False] # [ True True] # [ True True]]" # We use boolean array indexing to construct a rank 1 array # consisting of the elements of a corresponding to the True values # of bool_idx print(a[bool_idx]) # Prints "[3 4 5 6]" # We can do all of the above in a single concise statement: print(a[a > 2]) # Prints "[3 4 5 6]" print(a[(a > 2)]) ###Output [[False False] [ True True] [ True True]] [3 4 5 6] [3 4 5 6] [3 4 5 6] ###Markdown DatatypesEvery numpy array is a grid of elements of the same type. Numpy provides a large set of numeric datatypes that you can use to construct arrays. Numpy tries to guess a datatype when you create an array, but functions that construct arrays usually also include an optional argument to explicitly specify the datatype. Here is an example: ###Code import numpy as np x = np.array([1, 2]) # Let numpy choose the datatype print(x.dtype) # Prints "int64" x = np.array([1.0, 2.0]) # Let numpy choose the datatype print(x.dtype) # Prints "float64" x = np.array([1, 2], dtype=np.int64) # Force a particular datatype print(x.dtype) # Prints "int64" ###Output int64 float64 int64 ###Markdown Array mathBasic mathematical functions operate elementwise on arrays, and are available both as operator overloads and as functions in the numpy module: ###Code import numpy as np x = np.array([[1,2],[3,4]], dtype=np.float64) y = np.array([[5,6],[7,8]], dtype=np.float64) # Elementwise sum; both produce the array # [[ 6.0 8.0] # [10.0 12.0]] print(x + y) print(np.add(x, y)) print("="*50) # Elementwise difference; both produce the array # [[-4.0 -4.0] # [-4.0 -4.0]] print(x - y) print(np.subtract(x, y)) print("="*50) # Elementwise product; both produce the array # [[ 5.0 12.0] # [21.0 32.0]] print(x * y) print(np.multiply(x, y)) print("="*50) # Elementwise division; both produce the array # [[ 0.2 0.33333333] # [ 0.42857143 0.5 ]] print(x / y) print(np.divide(x, y)) print("="*50) # Elementwise square root; produces the array # [[ 1. 1.41421356] # [ 1.73205081 2. ]] print(np.sqrt(x)) print("="*50) ###Output [[ 6. 8.] [10. 12.]] [[ 6. 8.] [10. 12.]] ================================================== [[-4. -4.] [-4. -4.]] [[-4. -4.] [-4. -4.]] ================================================== [[ 5. 12.] [21. 32.]] [[ 5. 12.] [21. 32.]] ================================================== [[0.2 0.33333333] [0.42857143 0.5 ]] [[0.2 0.33333333] [0.42857143 0.5 ]] ================================================== [[1. 1.41421356] [1.73205081 2. ]] ================================================== ###Markdown Note that unlike MATLAB, * is elementwise multiplication, not matrix multiplication. We instead use the dot function to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. dot is available both as a function in the numpy module and as an instance method of array objects: ###Code import numpy as np x = np.array([[1,2],[3,4]]) y = np.array([[5,6],[7,8]]) v = np.array([9,10]) w = np.array([11, 12]) print("Shape : x {0}, y {1}, v {2}, w {3}".format(x.shape, y.shape, v.shape, w.shape)) # Inner product of vectors; both produce 219 print(v.dot(w)) print(np.dot(v, w)) # Matrix / vector product; both produce the rank 1 array [29 67] print(x.dot(v)) print(np.dot(x, v)) # Matrix / matrix product; both produce the rank 2 array # [[19 22] # [43 50]] print(x.dot(y)) print(np.dot(x, y)) A = np.array([[1,2]]) B = np.array([[3],[4]]) C = np.multiply(A, B) D = np.dot(A,B) E = A*B print(A.shape, B.shape, C.shape, D.shape, E.shape) print(A) print(B) print(C) print(D) print(E) ###Output (1, 2) (2, 1) (2, 2) (1, 1) (2, 2) [[1 2]] [[3] [4]] [[3 6] [4 8]] [[11]] [[3 6] [4 8]] ###Markdown Numpy provides many useful functions for performing computations on arrays; one of the most useful is sum: ###Code import numpy as np x = np.array([[1,2],[3,4]]) print(np.sum(x)) # Compute sum of all elements; prints "10" print(np.sum(x, axis=0)) # Compute sum of each column; prints "[4 6]" print(np.sum(x, axis=1)) # Compute sum of each row; prints "[3 7]" import numpy as np x = np.array([[1,2], [3,4]]) print(x) # Prints "[[1 2] # [3 4]]" print(x.T) # Prints "[[1 3] # [2 4]]" # Note that taking the transpose of a rank 1 array does nothing: v = np.array([1,2,3]) print(v) # Prints "[1 2 3]" print(v.T) # Prints "[1 2 3]" ###Output [[1 2] [3 4]] [[1 3] [2 4]] [1 2 3] [1 2 3] ###Markdown BroadcastingBroadcasting is a powerful mechanism that allows numpy to work with arrays of different shapes when performing arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array multiple times to perform some operation on the larger array.For example, suppose that we want to add a constant vector to each row of a matrix. We could do it like this: ###Code import numpy as np # We will add the vector v to each row of the matrix x, # storing the result in the matrix y x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) v = np.array([1, 0, 1]) y = np.empty_like(x) # Create an empty matrix with the same shape as x # Add the vector v to each row of the matrix x with an explicit loop for i in range(4): y[i, :] = x[i, :] + v # Now y is the following # [[ 2 2 4] # [ 5 5 7] # [ 8 8 10] # [11 11 13]] print(y) ###Output _____no_output_____ ###Markdown Loops above is slow, how abt stacking vectors ###Code import numpy as np # We will add the vector v to each row of the matrix x, # storing the result in the matrix y x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) v = np.array([1, 0, 1]) vv = np.tile(v, (4, 1)) # Stack 4 copies of v on top of each other print(vv) # Prints "[[1 0 1] # [1 0 1] # [1 0 1] # [1 0 1]]" y = x + vv # Add x and vv elementwise print(y) # Prints "[[ 2 2 4 # [ 5 5 7] # [ 8 8 10] # [11 11 13]]" #Even better #Numpy broadcasting allows us to perform this computation without actually creating multiple copies of v. Consider this version, using broadcasting: import numpy as np # We will add the vector v to each row of the matrix x, # storing the result in the matrix y x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) v = np.array([1, 0, 1]) y = x + v # Add v to each row of x using broadcasting print(y) # Prints "[[ 2 2 4] # [ 5 5 7] # [ 8 8 10] # [11 11 13]]" ###Output [[1 0 1] [1 0 1] [1 0 1] [1 0 1]] [[ 2 2 4] [ 5 5 7] [ 8 8 10] [11 11 13]] [[ 2 2 4] [ 5 5 7] [ 8 8 10] [11 11 13]] ###Markdown Broadcasting two arrays together follows these rules: If the arrays do not have the same rank, prepend the shape of the lower rank array with 1s until both shapes have the same length. The two arrays are said to be compatible in a dimension if they have the same size in the dimension, or if one of the arrays has size 1 in that dimension. The arrays can be broadcast together if they are compatible in all dimensions. After broadcasting, each array behaves as if it had shape equal to the elementwise maximum of shapes of the two input arrays. In any dimension where one array had size 1 and the other array had size greater than 1, the first array behaves as if it were copied along that dimension ###Code import numpy as np print(np.zeros((5,5)).size) np.zeros((5,5)).dtype x = 50 def hello(): x = 20 print(x) hello() print(x) np.random.random_sample((2)) + np.random.random_sample((5,3)) ###Output _____no_output_____
Dimensionality Reduction/PCA/SparsePCA_MaxAbsScaler.ipynb
###Markdown Sparse PCA with MaxAbsScaler This code template is for Sparse Principal Component Analysis(SparsePCA) along with MaxAbsScaler in python for dimensionality reduction technique and Data Rescaling. It is used to decompose a multivariate dataset into a set of successive orthogonal components that explain a maximum amount of the variance, keeping only the most significant singular vectors to project the data to a lower dimensional space. Required Packages ###Code import warnings import itertools import numpy as np import pandas as pd import seaborn as se import matplotlib.pyplot as plt import matplotlib.pyplot as plt from mpl_toolkits import mplot3d from sklearn.preprocessing import LabelEncoder,MaxAbsScaler from sklearn.decomposition import SparsePCA from numpy.linalg import eigh warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown InitializationFilepath of CSV file ###Code #filepath file_path= " " ###Output _____no_output_____ ###Markdown List of features which are required for model training . ###Code #x_values features=[] ###Output _____no_output_____ ###Markdown Target feature for prediction. ###Code #y_value target='' ###Output _____no_output_____ ###Markdown Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ###Code df=pd.read_csv(file_path) df=df.drop(["id"],axis=1) df.head() ###Output _____no_output_____ ###Markdown Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y. ###Code X = df[features] Y = df[target] ###Output _____no_output_____ ###Markdown Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ###Code def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ###Output _____no_output_____ ###Markdown Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ###Code f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ###Output _____no_output_____ ###Markdown Data Rescaling MaxAbsScalerScale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity.Reference URL to MaxAbsScaler API :https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html ###Code X_Scaled=MaxAbsScaler().fit_transform(X) X=pd.DataFrame(X_Scaled,columns=X.columns) X.head() ###Output _____no_output_____ ###Markdown Choosing the number of componentsA vital part of using Sparse PCA in practice is the ability to estimate how many components are needed to describe the data. This can be determined by looking at the cumulative explained variance ratio as a function of the number of components.This curve quantifies how much of the total, dimensional variance is contained within the first N components. Explained Variance Explained variance refers to the variance explained by each of the principal components (eigenvectors). It can be represented as a function of ratio of related eigenvalue and sum of eigenvalues of all eigenvectors. The function below returns a list with the values of explained variance and also plots cumulative explained variance ###Code def explained_variance_plot(X): cov_matrix = np.cov(X, rowvar=False) #this function returns the co-variance matrix for the features egnvalues, egnvectors = eigh(cov_matrix) #eigen decomposition is done here to fetch eigen-values and eigen-vectos total_egnvalues = sum(egnvalues) var_exp = [(i/total_egnvalues) for i in sorted(egnvalues, reverse=True)] plt.plot(np.cumsum(var_exp)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); return var_exp var_exp=explained_variance_plot(X) ###Output _____no_output_____ ###Markdown Scree plotThe scree plot helps you to determine the optimal number of components. The eigenvalue of each component in the initial solution is plotted. Generally, you want to extract the components on the steep slope. The components on the shallow slope contribute little to the solution. ###Code plt.plot(var_exp, 'ro-', linewidth=2) plt.title('Scree Plot') plt.xlabel('Principal Component') plt.ylabel('Proportion of Variance Explained') plt.show() ###Output _____no_output_____ ###Markdown ModelSparse PCA is used to decompose a multivariate dataset in a set of successive orthogonal components that explain a maximum amount of the variance. In scikit-learn, Sparse PCA finds the set of sparse components that can optimally reconstruct the data. The amount of sparseness is controllable by the coefficient of the L1 penalty, given by the parameter alpha. Tunning parameters reference : [API](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.SparsePCA.html) ###Code spca = SparsePCA(n_components=15) spcaX = pd.DataFrame(data = spca.fit_transform(X)) ###Output _____no_output_____ ###Markdown Output Dataframe ###Code finalDf = pd.concat([spcaX, Y], axis = 1) finalDf.head() ###Output _____no_output_____ ###Markdown Sparse PCA with MaxAbsScaler This code template is for Sparse Principal Component Analysis(SparsePCA) along with MaxAbsScaler in python for dimensionality reduction technique and Data Rescaling. It is used to decompose a multivariate dataset into a set of successive orthogonal components that explain a maximum amount of the variance, keeping only the most significant singular vectors to project the data to a lower dimensional space. Required Packages ###Code import warnings import itertools import numpy as np import pandas as pd import seaborn as se import matplotlib.pyplot as plt import matplotlib.pyplot as plt from mpl_toolkits import mplot3d from sklearn.preprocessing import LabelEncoder,MaxAbsScaler from sklearn.decomposition import SparsePCA from numpy.linalg import eigh warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown InitializationFilepath of CSV file ###Code #filepath file_path= " " ###Output _____no_output_____ ###Markdown List of features which are required for model training . ###Code #x_values features=[] ###Output _____no_output_____ ###Markdown Target feature for prediction. ###Code #y_value target='' ###Output _____no_output_____ ###Markdown Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ###Code df=pd.read_csv(file_path) df=df.drop(["id"],axis=1) df.head() ###Output _____no_output_____ ###Markdown Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y. ###Code X = df[features] Y = df[target] ###Output _____no_output_____ ###Markdown Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ###Code def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ###Output _____no_output_____ ###Markdown Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ###Code f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ###Output _____no_output_____ ###Markdown Data Rescaling MaxAbsScalerScale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity.Reference URL to MaxAbsScaler API :https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html ###Code X_Scaled=MaxAbsScaler().fit_transform(X) X=pd.DataFrame(X_Scaled,columns=X.columns) X.head() ###Output _____no_output_____ ###Markdown Choosing the number of componentsA vital part of using Sparse PCA in practice is the ability to estimate how many components are needed to describe the data. This can be determined by looking at the cumulative explained variance ratio as a function of the number of components.This curve quantifies how much of the total, dimensional variance is contained within the first N components. Explained Variance Explained variance refers to the variance explained by each of the principal components (eigenvectors). It can be represented as a function of ratio of related eigenvalue and sum of eigenvalues of all eigenvectors. The function below returns a list with the values of explained variance and also plots cumulative explained variance ###Code def explained_variance_plot(X): cov_matrix = np.cov(X, rowvar=False) #this function returns the co-variance matrix for the features egnvalues, egnvectors = eigh(cov_matrix) #eigen decomposition is done here to fetch eigen-values and eigen-vectos total_egnvalues = sum(egnvalues) var_exp = [(i/total_egnvalues) for i in sorted(egnvalues, reverse=True)] plt.plot(np.cumsum(var_exp)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); return var_exp var_exp=explained_variance_plot(X) ###Output _____no_output_____ ###Markdown Scree plotThe scree plot helps you to determine the optimal number of components. The eigenvalue of each component in the initial solution is plotted. Generally, you want to extract the components on the steep slope. The components on the shallow slope contribute little to the solution. ###Code plt.plot(var_exp, 'ro-', linewidth=2) plt.title('Scree Plot') plt.xlabel('Principal Component') plt.ylabel('Proportion of Variance Explained') plt.show() ###Output _____no_output_____ ###Markdown ModelSparse PCA is used to decompose a multivariate dataset in a set of successive orthogonal components that explain a maximum amount of the variance. In scikit-learn, Sparse PCA finds the set of sparse components that can optimally reconstruct the data. The amount of sparseness is controllable by the coefficient of the L1 penalty, given by the parameter alpha. Tunning parameters reference : [API](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.SparsePCA.html) ###Code spca = SparsePCA(n_components=15) spcaX = pd.DataFrame(data = spca.fit_transform(X)) ###Output _____no_output_____ ###Markdown Output Dataframe ###Code finalDf = pd.concat([spcaX, Y], axis = 1) finalDf.head() ###Output _____no_output_____
PSTryGetVariable.ipynb
###Markdown Only works running locallyIt doesn't work yet in Binder because it relies on HTTP communication between the kernel and the Jupyter frontend.Using `interactive.powershell.getVariable` we fetch the variable a value. Multi-languageRun PowerShell and HTML together, and pull data from the PowerShell runspace. ###Code $a = get-process | select -first 5 company, name, handle*,vm, pm | ConvertTo-Html | out-string #!html <p id="p1"/> <script> interactive.powershell.getVariable("a") .then(data => { document.getElementById("p1").innerHTML = data }); </script> ###Output _____no_output_____
notebooks/16.03-GNU-Parallel-Via-Slurm.ipynb
###Markdown *This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks);content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).* Example of Using PyRosetta with GNU Parallel *Note:* In this tutorial, we will write a simple PyRosetta script to disk, and demonstrate how to run it in parallel using GNU parallel. This Jupyter notebook uses parallelization and is **not** meant to be executed within a Google Colab environment.**Please see setup instructions in Chapter 16.00** ###Code import logging logging.basicConfig(level=logging.INFO) import os import sys if 'google.colab' in sys.modules: print("This Jupyter notebook uses parallelization and is therefore not set up for the Google Colab environment.") sys.exit(0) %%file outputs/mutate_residue_from_command_line.py import argparse import os import pyrosetta import uuid __author__ = "My Name" __email__ = "[email protected]" def main(target=None, new_res=None): """Example function to run my custom PyRosetta script.""" # Initialize PyRosetta with custom flags pyrosetta.init("-ignore_unrecognized_res 1 -renumber_pdb 1 -mute all") # Setup pose pose = pyrosetta.pose_from_file("inputs/5JG9.clean.pdb") # Setup directory structure main_dir = os.getcwd() unique_dir = os.path.join("outputs", "testing_" + uuid.uuid4().hex) if not os.path.isdir(unique_dir): os.mkdir(unique_dir) os.chdir(unique_dir) # Create scorefunction scorefxn = pyrosetta.create_score_function("ref2015_cart") # PyRosetta design protocol keep_chA = pyrosetta.rosetta.protocols.grafting.simple_movers.KeepRegionMover( res_start=str(pose.chain_begin(1)), res_end=str(pose.chain_end(1)) ) keep_chA.apply(pose) mutate = pyrosetta.rosetta.protocols.simple_moves.MutateResidue( target=target, new_res=new_res ) mutate.apply(pose) mm = pyrosetta.rosetta.core.kinematics.MoveMap() mm.set_bb(True) mm.set_chi(True) min_mover = pyrosetta.rosetta.protocols.minimization_packing.MinMover() min_mover.set_movemap(mm) min_mover.score_function(scorefxn) min_mover.min_type("lbfgs_armijo_nonmonotone") min_mover.cartesian(True) min_mover.tolerance(0.01) min_mover.max_iter(200) min_mover.apply(pose) total_score = scorefxn(pose) # Setup outputs pdb_output_filename = "_".join(["5JG9.clean", str(target), str(new_res), ".pdb"]) pyrosetta.dump_pdb(pose, pdb_output_filename) # Append scores to scorefile pyrosetta.toolbox.py_jobdistributor.output_scorefile( pose=pose, pdb_name="5JG9.clean", current_name=pdb_output_filename, scorefilepath="score.fasc", scorefxn=scorefxn, nstruct=1, native_pose=None, additional_decoy_info=None, json_format=True ) os.chdir(main_dir) if __name__ == "__main__": # Declare parser object for managing input options parser = argparse.ArgumentParser() parser.add_argument("-t", "--target", type=int, help="Target residue to mutate as integer.") parser.add_argument("-r", "--new_res", type=str, help="Three letter amino acid code to which to mutate target.") args = parser.parse_args() # Run protocol main(target=args.target, new_res=args.new_res) ###Output Writing outputs/mutate_residue_from_command_line.py ###Markdown Now we will run this script in parallel several different ways to demonstrate different types of job submission styles. 1. Parallelize script in an interactive session: On your laptop, you have access to as many cores as are on your machine.On a high-performance computing cluster with SLURM scheduling, you first have to request as many cores as you want to run on in an interactive login session:>qlogin -c 8 --mem=16gwill reserve 8 CPU cores and 16 GB of RAM for you and start your session on a node that has available resources.Then, we need to write a run file to disc specifying our input parameters ###Code with open("outputs/run_file_parallel_interactive.txt", "w") as f: for target in [2, 4, 6, 8, 10]: for new_res in ["ALA", "TRP"]: f.write("{0} outputs/mutate_residue_from_command_line.py -t {1} -r {2}\n".format(sys.executable, target, new_res)) ###Output _____no_output_____ ###Markdown **Note**: it's always a good idea to run just one command first to make sure there aren't any errors in your script!**Note**: if you don't specify the correct python executable, activate the correct conda environment in your interactive session first.Now submit `outputs/run_file_parallel_interactive.txt` to GNU parallel from the command line in your interactive session:>cat outputs/run_file_parallel_interactive.txt | parallel -j 8 --no-notice &**Note**: The `parallel` exectuable is usually located at `/usr/bin/parallel` but the full path may differ on your computer. For installation info, visit: https://www.gnu.org/software/parallel/ 2. Parallelize script on a high-performance computing cluster with Slurm scheduling (non-interactive submission): We use GNU parallel again, but this time there is no need to pre-request server allocations. We can submit jobs to the Slurm scheduler from directly within this Jupyter Notebook or from command line!Useful background information: - "Slurm is an open source, fault-tolerant, and highly scalable cluster management and job scheduling system for large and small Linux clusters. ... As a cluster workload manager, Slurm has three key functions. First, it allocates exclusive and/or non-exclusive access to resources (compute nodes) to users for some duration of time so they can perform work. Second, it provides a framework for starting, executing, and monitoring work (normally a parallel job) on the set of allocated nodes. Finally, it arbitrates contention for resources by managing a queue of pending work." Read further: https://slurm.schedmd.com/overview.html - With the Slurm scheduler we will use the `sbatch` command, therefore it may be useful to review the available options:https://slurm.schedmd.com/sbatch.htmlFirst, write a SLURM submission script to disk specifying the job requirements. In this example, our conda environment is called `pyrosetta-bootcamp`: ###Code with open("outputs/sbatch_parallel.sh", "w") as f: f.write("#!/bin/bash \n") # Bash script f.write("#SBATCH -p short \n") # Specify "short" partition/queue f.write("#SBATCH -n 8 \n") # Specify eight cores f.write("#SBATCH -N 1 \n") # Specify one node f.write("#SBATCH --mem=16g \n") # Specify 16GB RAM over eight cores f.write("#SBATCH -o sbatch_parallel.log \n") # Specify output log filename f.write("conda activate pyrosetta-bootcamp \n") # Activate conda environment f.write("cat outputs/run_file_parallel_interactive.txt | /usr/bin/parallel -j 8 --no-notice \n") # Submit jobs to GNU parallel ###Output _____no_output_____ ###Markdown Then, submit `outputs/sbatch_parallel.sh` to the SLURM scheduler with the `sbatch` command: ###Code if not os.getenv("DEBUG"): !sbatch outputs/sbatch_parallel.sh ###Output Submitted batch job 12942806 ###Markdown We can then periodically check on the status of our jobs: ###Code !squeue -u $USER ###Output JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) 12942806 short sbatch.s klimaj PD 0:00 1 (Priority) 12931193 interacti zsh klimaj R 2:17:02 1 dig1 12931183 interacti jupyter- klimaj R 2:17:37 1 dig5 12935447 interacti zsh klimaj R 58:46 1 dig1 ###Markdown 3. Submit jobs individually to the SLURM scheduler: This time, we submit each job individually using the `sbatch` command directly on our PyRosetta script.**Warning**: do not submit more than ~1000 jobs with this method or risk clogging the SLURM scheduler!First, copy your python executable and paste it on the first line of the PyRosetta script after `!`, followed by `SBATCH` commands for each job: ###Code sys.executable %%file outputs/mutate_residue_from_sbatch.py #!/home/klimaj/anaconda3/envs/pyrosetta-bootcamp/bin/python #SBATCH -p short #SBATCH -n 1 #SBATCH --mem=2g #SBATCH -o sbatch.log import argparse import os import pyrosetta import uuid __author__ = "My Name" __email__ = "[email protected]" def main(target=None, new_res=None): """Example function to run my custom PyRosetta script. """ # Initialize PyRosetta with custom flags pyrosetta.init("-ignore_unrecognized_res 1 -renumber_pdb 1 -mute all") # Setup pose pose = pyrosetta.pose_from_file("inputs/5JG9.clean.pdb") # Setup directory structure main_dir = os.getcwd() unique_dir = os.path.join("outputs", "testing_" + uuid.uuid4().hex) if not os.path.isdir(unique_dir): os.mkdir(unique_dir) os.chdir(unique_dir) # Create scorefunction scorefxn = pyrosetta.create_score_function("ref2015_cart") # PyRosetta design protocol keep_chA = pyrosetta.rosetta.protocols.grafting.simple_movers.KeepRegionMover( res_start=str(pose.chain_begin(1)), res_end=str(pose.chain_end(1)) ) keep_chA.apply(pose) mutate = pyrosetta.rosetta.protocols.simple_moves.MutateResidue( target=target, new_res=new_res ) mutate.apply(pose) mm = pyrosetta.rosetta.core.kinematics.MoveMap() mm.set_bb(True) mm.set_chi(True) min_mover = pyrosetta.rosetta.protocols.minimization_packing.MinMover() min_mover.set_movemap(mm) min_mover.score_function(scorefxn) min_mover.min_type("lbfgs_armijo_nonmonotone") min_mover.cartesian(True) min_mover.tolerance(0.01) min_mover.max_iter(200) min_mover.apply(pose) total_score = scorefxn(pose) # Setup outputs pdb_output_filename = "_".join(["5JG9.clean", str(target), str(new_res), ".pdb"]) pyrosetta.dump_pdb(pose, pdb_output_filename) # Append scores to scorefile pyrosetta.toolbox.py_jobdistributor.output_scorefile( pose=pose, pdb_name="5JG9.clean", current_name=pdb_output_filename, scorefilepath="score.fasc", scorefxn=scorefxn, nstruct=1, native_pose=None, additional_decoy_info=None, json_format=True ) os.chdir(main_dir) if __name__ == "__main__": # Declare parser object for managing input options parser = argparse.ArgumentParser() parser.add_argument("-t", "--target", type=int, help="Target residue to mutate as integer.") parser.add_argument("-r", "--new_res", type=str, help="Three letter amino acid code to which to mutate target.") args = parser.parse_args() # Run protocol main(target=args.target, new_res=args.new_res) ###Output Writing outputs/mutate_residue_from_sbatch.py ###Markdown Then, loop over your input parameters submitting the PyRosetta scripts to the scheduler using the `sbatch` command: ###Code if not os.getenv("DEBUG"): for target in [2, 4, 6, 8, 10]: for new_res in ["ALA", "TRP"]: !sbatch ./outputs/mutate_residue_from_sbatch.py -t $target -r $new_res ###Output Submitted batch job 12946283 Submitted batch job 12946284 Submitted batch job 12946285 Submitted batch job 12946286 Submitted batch job 12946287 Submitted batch job 12946288 Submitted batch job 12946289 Submitted batch job 12946290 Submitted batch job 12946291 Submitted batch job 12946292 ###Markdown We can then periodically check on the status of our jobs: ###Code !squeue -u $USER ###Output JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) 12946286 short mutate_r klimaj PD 0:00 1 (Resources) 12946287 short mutate_r klimaj PD 0:00 1 (Priority) 12946288 short mutate_r klimaj PD 0:00 1 (Priority) 12946289 short mutate_r klimaj PD 0:00 1 (Priority) 12946290 short mutate_r klimaj PD 0:00 1 (Priority) 12946291 short mutate_r klimaj PD 0:00 1 (Priority) 12946292 short mutate_r klimaj PD 0:00 1 (Priority) 12931193 interacti zsh klimaj R 2:19:28 1 dig1 12931183 interacti jupyter- klimaj R 2:20:03 1 dig5 12935447 interacti zsh klimaj R 1:01:12 1 dig1 12946283 short mutate_r klimaj R 0:00 1 dig73 12946284 short mutate_r klimaj R 0:00 1 dig87 12946285 short mutate_r klimaj R 0:00 1 dig100
mytest/MachineLearn/7_prac.ipynb
###Markdown 8. 투표 기반 분류기 ###Code from sklearn.model_selection import train_test_split from sklearn.datasets import fetch_openml mnist = fetch_openml('mnist_784', version=1) X_train_val, X_test, y_train_val, y_test = train_test_split( mnist.data, mnist.target, test_size=10000, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train_val, y_train_val, test_size=10000, random_state=42) ###Output _____no_output_____ ###Markdown 문제: _그런 다음 랜덤 포레스트 분류기, 엑스트라 트리 분류기, SVM 같은 여러 종류의 분류기를 훈련시킵니다._ ###Code from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.svm import LinearSVC from sklearn.neural_network import MLPClassifier random_forest_clf = RandomForestClassifier(n_estimators=100, random_state=42) extra_trees_clf = ExtraTreesClassifier(n_estimators=100, random_state=42) svm_clf = LinearSVC(max_iter=100, tol=20, random_state=42) mlp_clf = MLPClassifier(random_state=42) estimators = [random_forest_clf, extra_trees_clf, svm_clf, mlp_clf] for estimator in estimators: print("Training the", estimator) estimator.fit(X_train, y_train) [estimator.score(X_val, y_val) for estimator in estimators] ###Output _____no_output_____ ###Markdown 선형 SVM이 다른 분류기보다 성능이 많이 떨어집니다. 그러나 투표 기반 분류기의 성능을 향상시킬 수 있으므로 그대로 두겠습니다. 문제: _그리고 검증 세트에서 개개의 분류기보다 더 높은 성능을 내도록 이들을 간접 또는 직접 투표 분류기를 사용하는 앙상블로 연결해보세요._ ###Code from sklearn.ensemble import VotingClassifier named_estimators = [ ("random_forest_clf", random_forest_clf), ("extra_trees_clf", extra_trees_clf), ("svm_clf", svm_clf), ("mlp_clf", mlp_clf), ] voting_clf = VotingClassifier(named_estimators) voting_clf.fit(X_train, y_train) voting_clf.score(X_val, y_val) [estimator.score(X_val, y_val) for estimator in voting_clf.estimators_] ###Output _____no_output_____ ###Markdown SVM 모델을 제거해서 성능이 향상되는지 확인해 보죠. 다음과 같이 `set_params()`를 사용하여 `None`으로 지정하면 특정 예측기를 제외시킬 수 있습니다: ###Code voting_clf.set_params(svm_clf=None) voting_clf.estimators voting_clf.estimators_ ###Output _____no_output_____ ###Markdown `VotingClassifier`를 다시 훈련시키거나 그냥 훈련된 예측기 목록에서 SVM 모델을 제거할 수 있습니다: ###Code del voting_clf.estimators_[2] ###Output _____no_output_____ ###Markdown `VotingClassifier`를 다시 평가해 보죠: ###Code voting_clf.score(X_val, y_val) ###Output _____no_output_____ ###Markdown 훨씬 나아졌네요! SVM 모델이 성능을 저하시켰습니다. 이제 간접 투표 분류기를 사용해 보죠. 분류기를 다시 훈련시킬 필요는 없고 `voting`을 `"soft"`로 지정하면 됩니다: ###Code voting_clf.voting = "soft" voting_clf.score(X_val, y_val) ###Output _____no_output_____ ###Markdown 이 경우는 직접 투표 방식이 낫네요._앙상블을 얻고 나면 테스트 세트로 확인해보세요. 개개의 분류기와 비교해서 성능이 얼마나 향상되나요?_ ###Code voting_clf.voting = "hard" voting_clf.score(X_test, y_test) [estimator.score(X_test, y_test) for estimator in voting_clf.estimators_] ###Output _____no_output_____ ###Markdown 여기서는 투표 기반 분류기가 최선의 모델의 오차율을 아주 조금만 감소시킵니다. 9. 스태킹 앙상블 문제: _이전 연습문제의 각 분류기를 실행해서 검증 세트에서 예측을 만들고 그 결과로 새로운 훈련 세트를 만들어보세요. 각 훈련 샘플은 하나의 이미지에 대한 전체 분류기의 예측을 담은 벡터고 타깃은 이미지의 클래스입니다. 새로운 훈련 세트에 분류기 하나를 훈련시켜 보세요._ ###Code import numpy as np X_val_predictions = np.empty((len(X_val), len(estimators)), dtype=np.float32) for index, estimator in enumerate(estimators): X_val_predictions[:, index] = estimator.predict(X_val) X_val_predictions rnd_forest_blender = RandomForestClassifier(n_estimators=200, oob_score=True, random_state=42) rnd_forest_blender.fit(X_val_predictions, y_val) rnd_forest_blender.oob_score_ ###Output _____no_output_____ ###Markdown 이 블렌더를 세밀하게 튜닝하거나 다른 종류의 블렌더(예를 들어, `MLPClassifier`)를 시도해 볼 수 있습니다. 그런 늘 하던대로 다음 교차 검증을 사용해 가장 좋은 것을 선택합니다.문제: _축하합니다. 방금 블렌더를 훈련시켰습니다. 그리고 이 분류기를 모아서 스태킹 앙상블을 구성했습니다. 이제 테스트 세트에 앙상블을 평가해보세요. 테스트 세트의 각 이미지에 대해 모든 분류기로 예측을 만들고 앙상블의 예측 결과를 만들기 위해 블렌더에 그 예측을 주입합니다. 앞서 만든 투표 분류기와 비교하면 어떤가요?_ ###Code X_test_predictions = np.empty((len(X_test), len(estimators)), dtype=np.float32) for index, estimator in enumerate(estimators): X_test_predictions[:, index] = estimator.predict(X_test) y_pred = rnd_forest_blender.predict(X_test_predictions) from sklearn.metrics import accuracy_score accuracy_score(y_test, y_pred) ###Output _____no_output_____
src/phenotype_train.ipynb
###Markdown STEP 1: Load Beacon, MAF, Reference and other cached variables ###Code features = [ 'EyeColor','HairType','HairColor','TanAbility','Asthma','LactoseIntolerance',#'BloodType', 'EarWax','Freckling','TongueRoller','RingFinger','Intolerance','WidowPeak','ADHD','Acrophobia', 'FingerHair','Myopia','IrritableBowel','IndexLongerBig','Photoptarmis','Migraine','RhProtein'] with open(join(opensnpPath, "OpenSNP_Phenotype.pickle"), 'rb') as handle: pheno = pickle.load(handle) pheno = pheno[features] pheno[pheno=="Auburn"] = "Blonde" pheno[pheno=="Black"] = "Brown" with open(join(opensnpPath, "MAF.pickle"), 'rb') as handle: maf = pickle.load(handle) with open(join(opensnpPath, "Reference.pickle"), 'rb') as handle: reference = pickle.load(handle) reference = reference.values with open(join(opensnpPath, "Beacon.pickle"), 'rb') as handle: beacon = pickle.load(handle) with open(join(opensnpPath, "BinaryBeacon.pickle"), 'rb') as handle: binary = pickle.load(handle) with open(join(opensnpPath, "TernaryBeacon.pickle"), 'rb') as handle: ternary = pickle.load(handle) ###Output _____no_output_____ ###Markdown Constrainted Indices ###Code pheno5People = pheno.iloc[np.where(np.sum(pheno != "-", axis = 1) >=10)[0]].index pheno5People = pheno5People.map(str) pheno5People = np.where(beacon.columns.isin(pheno5People))[0] pheno1People = pheno.iloc[np.where(np.sum(pheno != "-", axis = 1) >= 1)[0]].index pheno1People = pheno1People.map(str) pheno1People = np.where(beacon.columns.isin(pheno1People))[0] phenoAllPeople = np.arange(beacon.shape[1]) ###Output _____no_output_____ ###Markdown STEP 1.2: Function Definitions ###Code # Beacon operations def queryBeacon(beacon_people): return binary[:, beacon_people].any(axis=1) def getMutationAt(index): temp = maf.iloc[index] if temp["minor_freq"] == temp["maf"]: return temp["minor"] + temp["minor"] else: return temp["major"] + temp["major"] def div(n, d): return n / d if d else 0 def rpaCalculate(tp,fp,tn,fn): recall = div(tp,(tp+fn)) precision = div(tp,(tp+fp)) accuracy = div((tp+tn),(tp+fp+tn+fn)) return recall, precision, accuracy def getTrainingData(phenotype, pos, test_people): # Find indices of people who has the specified feature feature_label = pheno[pheno[phenotype] != "-"][phenotype] existing = beacon.columns.isin(feature_label.index.values) existing[test_people] = False # Get training data X = binary[pos][:, existing].T Y = feature_label[beacon.columns[existing]].values return X, Y # Performance method def performance(person, reconstruction, eval_pos, reference): ind = np.logical_and(person[eval_pos] != np.squeeze(reference)[eval_pos], person[eval_pos] != "NN") tp = np.sum(reconstruction[eval_pos][ind] != np.squeeze(reference)[eval_pos][ind]) fn = np.sum(ind) - tp fp = np.sum(reconstruction[eval_pos][~ind] != np.squeeze(reference)[eval_pos][~ind]) tn = np.sum(~ind) - fp return tp, fp, tn, fn def performance_f(test_people, reconstructed, add_count, cluster_count, eval_pos): total_values = np.zeros((4)) best_matches = [] # For all people in victim set for i in range(add_count): all_combinations = np.zeros((4, cluster_count)) rpa = np.zeros((3, cluster_count)) # For each cluster obtained for j in range(cluster_count): all_combinations[:, j] = performance(test_people[i], reconstructed[j], eval_pos, reference) rpa[:, j] = rpaCalculate(*all_combinations[:, j]) ind = np.argmax(rpa[0,:]*rpa[1,:]) #Best-match index best_matches.append(ind) total_values += all_combinations[:, ind] #Add total tp-fp-tn-fn recall, precision, accuracy = rpaCalculate(*total_values) print("Recall_Micro_Avg =", round(recall, 2),"\nPrecision_Micro_Avg =", round(precision, 2)) return (precision,recall,accuracy), total_values, best_matches ###Output _____no_output_____ ###Markdown STEP 2: Choose random people and send query to Beacon to obtain No-Yes answers ###Code def getNoYes(add_count, beacon_size): # Take people for added group added_people = pheno5People.copy() random.shuffle(added_people) added_people = added_people[:add_count] # Take people for beacon beacon_people = np.setdiff1d(phenoAllPeople, added_people) random.shuffle(beacon_people) beacon_people = beacon_people[:beacon_size] # Query Beacon initially before = queryBeacon(beacon_people) # Add people updated_beacon = np.concatenate([added_people,beacon_people]) # Query Beacon again after = queryBeacon(updated_beacon) # Find No-Yes SNPs' indices no_yes_indices = np.where(np.logical_and(before==False, after==True))[0] yes_yes_indices = np.where(np.logical_and(before==True, after==True))[0] print("Number of No-Yes SNP's : ", len(no_yes_indices)) return yes_yes_indices, no_yes_indices, added_people ###Output _____no_output_____ ###Markdown STEP 3: Correlation Model ###Code def builtSNPNetwork(no_yes_indices, model_ind, reference): model = ternary[no_yes_ind][:, model_ind].astype(float) model[model==-1] = np.nan x = pairwise_distances(model, metric = "sokalmichener", n_jobs=-1) x = 1-np.nan_to_num(x) return x ###Output _____no_output_____ ###Markdown Spectral Clustering ###Code def spectralClustering(no_yes_indices, add_count, x, reference, cluster_count=None): if not cluster_count: cluster_count = add_count sc = SpectralClustering(cluster_count, affinity='precomputed', n_init=100, n_jobs=-1) sc.fit(np.array(x)) bins = [] for i in range(cluster_count): temp = [] for element in np.where(sc.labels_==i)[0]: temp.append(no_yes_indices[element]) #print("Bin " + str(i) + " has " + str(len(temp)) + " SNP's") bins.append(temp) reconstructed = np.array([reference.T[0] for i in range(cluster_count)]) for i in range(cluster_count): for j in bins[i]: reconstructed[i][j] = getMutationAt(j) return reconstructed ###Output _____no_output_____ ###Markdown Genome Reconstruction Brute Force Test-Set Search ###Code experiments = [(2, 20, 0.9), (3, 30, 0.8), (5, 50, 0.8), (10, 100, 0.8), (20, 100, 0.65), (30, 100, 0.6), (40, 100, 0.55)] for e in experiments: add_count = e[0] cluster_count = add_count beacon_size = e[1] target = e[2] test_sets = [] for i in range(10): precision, recall = 0, 0 while precision + recall < target * 2: yes_yes_ind, no_yes_ind, added_people = getNoYes(add_count, beacon_size) correlations = builtSNPNetwork(no_yes_ind, pheno5People, reference) reconstructed_spectral = spectralClustering(no_yes_ind, add_count, correlations, reference) (precision,recall,accuracy), _, matches = performance_f(beacon.iloc[:, added_people].values.T,reconstructed_spectral,add_count,cluster_count,no_yes_ind) gs = [yes_yes_ind, no_yes_ind, added_people] test_sets.append(gs) filename = str(e[0]) + "_testset2.pkl" with open(join(beacons, filename), 'wb') as f: pickle.dump(test_sets, f) ###Output _____no_output_____ ###Markdown Phenotype Prediction ###Code def evaluate_ensemble(models, x_test, y_test, add_count, cluster_count): # Predict results = [] for i in models: results.append(i[1].predict_proba(x_test)) labels = [i[0] for i in models] top3, top1 = 0, 0 # For each person for i in range(add_count): test_person = y_test[labels].iloc[i] available_phenotypes = np.where(test_person != "-")[0] # For each reconstructed genome probs = np.zeros((cluster_count)) for j in range(cluster_count): # For each available phenotype for k in available_phenotypes: target_label_ind = np.where(models[k][1].classes_ == test_person[k])[0] probs[j] += results[k][j][target_label_ind]# * models[k][2] # Top k matched_ind = np.argsort(probs)[-3:] print(probs, "\n", matched_ind, "--", matches[i], "\n") if matches[i] in matched_ind: top3 += 1 if matches[i] == matched_ind[-1]: top1 += 1 print("Top-1 Accuracy= ", top1 / add_count, "\tTop-3 Accuracy= ", top3 / add_count) return top1 / add_count, top3 / add_count def train_models(train_snps, test_people): models = [] count = 1 for feature in features: X, Y = getTrainingData(phenotype=feature, pos=train_snps, test_people=test_people) print("\n",count, ".", feature, "\tlabels=", np.unique(Y)) # Upsampling X, Y = SMOTE().fit_sample(X, Y) # Train the model rf = RandomForestClassifier(class_weight='balanced_subsample',oob_score=True,n_jobs=-1) cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=1) model = GridSearchCV(cv=cv, estimator=rf, scoring='f1_macro', param_grid=parameters,verbose=0,n_jobs=-1) result = model.fit(X, Y) print("Best: %f using %s" % (result.best_score_, result.best_params_)) best_model = result.best_estimator_ best_score = (result.best_score_ + best_model.oob_score_) / 2 #best_model.fit(X, Y) if best_score > 1.2 / len(np.unique(Y)): count += 1 print("Train:", round(best_model.score(X, Y), 2), " | Validation:", round(best_score,2)) models.append((feature, model, best_score)) return models Estimators = [100] # n_estimators Depths = [3] # max_depth (None olabilir) MinSample = [0.05, 0.075] # min_samples_leaf MaxFeatures = [0.75] # min_samples_leaf Criterion = ["gini"] # criterion parameters = {"max_depth": Depths, "min_samples_leaf": MinSample, "criterion": Criterion, "n_estimators": Estimators, "max_features": MaxFeatures} ###Output _____no_output_____ ###Markdown Single Experiment ###Code experiments = [(2,20, 0.9),(3,30, 0.8),(5,50, 0.8),(10,100, 0.8),(20,100, 0.65)] e = experiments[1] add_count = e[0] with open(join(testSets, str(add_count) + "_testset2.pkl"), 'rb') as f: test_sets = pickle.load(f) top1s, top3s = [], [] for i in range(10): yes_yes_ind, no_yes_ind, added_people = test_sets[i] model_ind = np.setdiff1d(pheno1People, added_people) # Genome Reconstruction correlations = builtSNPNetwork(no_yes_ind, model_ind, reference) reconstructed_spectral = spectralClustering(no_yes_ind, add_count, correlations, reference) (precision,recall,accuracy), _, matches = performance_f(beacon.iloc[:, added_people].values.T,reconstructed_spectral,add_count,add_count,no_yes_ind) # Phenotype Prediction models = train_models(train_snps=no_yes_ind, test_people=added_people) # Test Data x_test = (reconstructed_spectral[:, no_yes_ind] != reference[no_yes_ind].T).astype(np.int8) y_test = pheno.loc[beacon.columns[added_people]] # Performance top1, top3 = evaluate_ensemble(models, x_test, y_test, add_count, add_count) top1s.append(top1) top3s.append(top3) print("Top-1= ", np.mean(top1s), "\tTop-3= ", np.mean(top3s)) ###Output Recall_Micro_Avg = 0.93 Precision_Micro_Avg = 0.87 1 . EyeColor labels= ['Blue' 'Brown'] Best: 0.523848 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 1 . HairType labels= ['Curly' 'Straight'] Best: 0.602299 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.69 | Validation: 0.62 2 . HairColor labels= ['Blonde' 'Brown'] Best: 0.630348 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.69 | Validation: 0.63 3 . TanAbility labels= ['No' 'Yes'] Best: 0.569997 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 3 . Asthma labels= ['No' 'Yes'] Best: 0.593814 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.7 | Validation: 0.6 4 . LactoseIntolerance labels= ['Intolerant' 'Tolerant'] Best: 0.641129 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.71 | Validation: 0.65 5 . EarWax labels= ['Dry' 'Wet'] Best: 0.665579 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.72 | Validation: 0.67 6 . Freckling labels= ['No' 'Yes'] Best: 0.632225 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.71 | Validation: 0.62 7 . TongueRoller labels= ['No' 'Yes'] Best: 0.720374 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.75 | Validation: 0.72 8 . RingFinger labels= ['No' 'Yes'] Best: 0.646986 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.73 | Validation: 0.64 9 . Intolerance labels= ['Intolerant' 'NoIntolerance'] Best: 0.672959 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.82 | Validation: 0.7 10 . WidowPeak labels= ['No' 'Yes'] Best: 0.642736 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.76 | Validation: 0.64 11 . ADHD labels= ['No' 'Yes'] Best: 0.582331 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} 11 . Acrophobia labels= ['No' 'Yes'] Best: 0.602562 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} Train: 0.7 | Validation: 0.61 12 . FingerHair labels= ['No' 'Yes'] Best: 0.599276 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.78 | Validation: 0.61 13 . Myopia labels= ['High' 'Low'] Best: 0.512397 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} 13 . IrritableBowel labels= ['No' 'Yes'] Best: 0.493295 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} 13 . IndexLongerBig labels= ['BigLonger' 'IndexLonger'] Best: 0.557567 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} 13 . Photoptarmis labels= ['No' 'Yes'] Best: 0.498778 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 13 . Migraine labels= ['No' 'Yes'] Best: 0.521679 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} 13 . RhProtein labels= ['Negative' 'Positive'] Best: 0.699246 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.73 | Validation: 0.7 [6.20 5.78 4.69] [2 1 0] -- 2 [7.52 7.12 6.49] [2 1 0] -- 0 [3.98 3.80 4.28] [1 0 2] -- 2 Top-1 Accuracy= 0.6666666666666666 Top-3 Accuracy= 1.0 Recall_Micro_Avg = 0.92 Precision_Micro_Avg = 0.81 1 . EyeColor labels= ['Blue' 'Brown'] Best: 0.532938 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 1 . HairType labels= ['Curly' 'Straight'] Best: 0.568505 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 1 . HairColor labels= ['Blonde' 'Brown'] Best: 0.624564 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.68 | Validation: 0.63 2 . TanAbility labels= ['No' 'Yes'] Best: 0.566718 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 2 . Asthma labels= ['No' 'Yes'] Best: 0.637837 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.72 | Validation: 0.64 3 . LactoseIntolerance labels= ['Intolerant' 'Tolerant'] Best: 0.670402 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.7 | Validation: 0.67 4 . EarWax labels= ['Dry' 'Wet'] Best: 0.684877 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.75 | Validation: 0.69 5 . Freckling labels= ['No' 'Yes'] Best: 0.574104 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 5 . TongueRoller labels= ['No' 'Yes'] Best: 0.686963 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.74 | Validation: 0.7 6 . RingFinger labels= ['No' 'Yes'] Best: 0.577171 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} 6 . Intolerance labels= ['Intolerant' 'NoIntolerance'] Best: 0.669801 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.8 | Validation: 0.67 7 . WidowPeak labels= ['No' 'Yes'] Best: 0.647682 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.71 | Validation: 0.64 8 . ADHD labels= ['No' 'Yes'] Best: 0.473754 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} 8 . Acrophobia labels= ['No' 'Yes'] Best: 0.587911 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 8 . FingerHair labels= ['No' 'Yes'] Best: 0.625873 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.74 | Validation: 0.62 9 . Myopia labels= ['High' 'Low'] Best: 0.582262 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 9 . IrritableBowel labels= ['No' 'Yes'] Best: 0.539358 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 9 . IndexLongerBig labels= ['BigLonger' 'IndexLonger'] Best: 0.480920 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 9 . Photoptarmis labels= ['No' 'Yes'] Best: 0.497614 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} 9 . Migraine labels= ['No' 'Yes'] Best: 0.453636 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.075, 'n_estimators': 100} 9 . RhProtein labels= ['Negative' 'Positive'] Best: 0.702196 using {'criterion': 'gini', 'max_depth': 3, 'max_features': 0.75, 'min_samples_leaf': 0.05, 'n_estimators': 100} Train: 0.73 | Validation: 0.71 [4.81 5.15 5.66] [0 1 2] -- 2 [3.61 4.10 4.58] [0 1 2] -- 0 [3.36 5.05 5.00] [0 2 1] -- 2 Top-1 Accuracy= 0.3333333333333333 Top-3 Accuracy= 1.0 ###Markdown All ###Code experiments = [(2,20, 0.9),(3,30, 0.8),(5,50, 0.8),(10,100, 0.8)]#,(20,100, 0.65)] res = [] for e in experiments: add_count = e[0] beacon_size = e[1] with open(join(testSets, str(add_count) + "_testset2.pkl"), 'rb') as f: test_sets = pickle.load(f) top1s = [] top3s = [] for i in range(10): yes_yes_ind, no_yes_ind, added_people = test_sets[i] model_ind = np.setdiff1d(pheno1People, added_people) # Genome Reconstruction correlations = builtSNPNetwork(no_yes_ind, model_ind, reference) reconstructed_spectral = spectralClustering(no_yes_ind, add_count, correlations, reference) (precision,recall,accuracy), _, matches = performance_f(beacon.iloc[:, added_people].values.T,reconstructed_spectral,add_count,add_count,no_yes_ind) # Phenotype Prediction models = train_models(train_snps=no_yes_ind, test_people=added_people) # Test Data x_test = (reconstructed_spectral[:, no_yes_ind] != reference[no_yes_ind].T).astype(np.int8) y_test = pheno.loc[beacon.columns[added_people]] # Performance top1, top3 = evaluate_ensemble(models, x_test, y_test, add_count, add_count) top1s.append(top1) top3s.append(top3) print("Top-1= ", np.mean(top1s), "\tTop-3= ", np.mean(top3s)) res.append((top1s,top3s)) with open(join(beacons, str(add_count) + ".pkl"), 'wb') as f: pickle.dump((top1s,top3s), f) ''' def train_models(train_snps, test_people): models = [] count = 1 for feature in features: X, Y = getTrainingData(phenotype=feature, pos=train_snps, test_people=test_people) print("\n",count, ".", feature, "\tlabels=", np.unique(Y), "\t",end="", flush=True) X, Y = SMOTE().fit_sample(X, Y) model = RandomForestClassifier(n_estimators=100, max_depth=4,criterion='entropy',class_weight='balanced_subsample',max_features=X.shape[1]//4, min_samples_leaf=4,bootstrap=True,verbose=0,n_jobs=-1,oob_score=True) #model = GridSearchCV(cv=10, estimator=rf, scoring='f1_macro', param_grid=parameters,verbose=2,n_jobs=-1) result = model.fit(X, Y) #print("Best: %f using %s" % (result.best_score_, result.best_params_)) #best_estimator = result.best_estimator_ #best_estimator.fit(X, Y) print(model.oob_score_) if model.oob_score_ > 1.2 / len(np.unique(Y)): count += 1 print("\nTrain:", round(model.score(X, Y), 2), " | Out-of-Bag:", round(model.oob_score_,2)) models.append((feature, model, model.oob_score_)) return models ''' ''' def train_models(train_snps, test_people): models = [] count = 1 for feature in features: X, Y = getTrainingData(phenotype=feature, pos=train_snps, test_people=test_people) print("\n",count, ".", feature, "\tlabels=", np.unique(Y), "\t",end="", flush=True) X, Y = SMOTE().fit_sample(X, Y) model = RandomForestClassifier(n_estimators=100, max_depth=16,criterion='entropy',class_weight='balanced_subsample',max_features=X.shape[1]//2, min_samples_leaf=2,bootstrap=True,verbose=0,n_jobs=-1,oob_score=True) '''model = BalancedRandomForestClassifier(n_estimators=100, max_depth=16, min_samples_split=2, min_samples_leaf=2, min_weight_fraction_leaf=0, max_features=None, max_leaf_nodes=None, bootstrap=True, oob_score=True, replacement=False, n_jobs=-1, warm_start=False, criterion='entropy', class_weight="balanced_subsample")''' model.fit(X, Y) if model.oob_score_ > 1.2 / len(np.unique(Y)): count += 1 print("\nTrain:", round(model.score(X, Y), 2), " | Out-of-Bag:", round(model.oob_score_,2)) models.append((feature, model, model.oob_score_)) return models ''' ''' def train_models(train_snps, test_people): models = [] count = 1 for feature in features: X, Y = getTrainingData(phenotype=feature, pos=train_snps, test_people=test_people) print("\n",count, ".", feature, "\tlabels=", np.unique(Y), "\t",end="", flush=True) for epoch in range(25): # Train/Val Split x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.2, shuffle=True, stratify=Y) # Upsampling x_train, y_train = SMOTE().fit_sample(x_train, y_train) # Train the model model = RandomForestClassifier(n_estimators=100,max_depth=4,min_samples_leaf=8,criterion="entropy",class_weight='balanced_subsample',bootstrap=True,verbose=0,n_jobs=-1) model.fit(x_train, y_train) y_pred = model.predict(x_val) # Performance result = classification_report(y_val, y_pred, output_dict=True) isBetter = result["macro avg"]["f1-score"] > 1.2 / len(np.unique(y_train)) if isBetter: count += 1 print("\nTrain:", round(model.score(x_train, y_train), 2), " | Test:", round(model.score(x_val, y_val),2)) print(classification_report(y_val, y_pred, output_dict=False)) #model.fit(np.concatenate([x_train, x_val], axis=0), np.concatenate([y_train, y_val], axis=0)) models.append((feature, model, result["macro avg"]["f1-score"])) break print("|", end="", flush=True) return models ''' ''' 1. Reconstruction average performans iyi değil 2. Bilmemiz gereken fenotip sayısı > 10-15 3. Performans nasıl report edeceğiz ? 4. Top-1 olmazsa nasıl Membership kısmına bağlayacağız ''' ###Output _____no_output_____ ###Markdown Model Stash ###Code ''' # SINGLE MODELS model = XGBClassifier(objective="multi:softprob",eval_metric="auc",num_class=len(np.unique(y_train)),n_jobs=-1,learning_rate=0.001,tree_method="hist", gamma=3,reg_lambda=10,max_depth=10,max_delta_step=1,colsample_bytree=0.95,scale_pos_weight=10000,num_parallel_tree=8,booster="dart") model = BalancedRandomForestClassifier(n_estimators=150, max_depth=None, min_samples_split=5, min_samples_leaf=2, min_weight_fraction_leaf=0, max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, replacement=False, n_jobs=-1, warm_start=True, class_weight="balanced") model = LogisticRegression(penalty='l1',random_state=0,solver='saga',multi_class='multinomial',n_jobs=-1,C=10,max_iter=100) model = RandomForestClassifier(n_estimators=128, max_depth=8,class_weight='balanced_subsample',verbose=0,n_jobs=-1) model = BalancedBaggingClassifier() # PIPELINE selecter = SelectKBest(chi2, k=20000) xgb = XGBClassifier(objective="multi:softprob",eval_metric="error",num_class=len(np.unique(y_train)),n_jobs=-1, learning_rate=0.05, gamma=1, max_depth=20,subsample=1, colsample_bytree=1, scale_pos_weight=10000, num_parallel_tree=32) estimators = [('selection', selecter), ('brc', xgb)] model = Pipeline(estimators) # SAMPLING METHODS smote = SMOTE() x_train, y_train = smote.fit_sample(x_train, y_train) rus = RandomUnderSampler() x_train, y_train = rus.fit_resample(x_train, y_train) tom = TomekLinks(ratio="majority") x_train, y_train = tom.fit_sample(x_train, y_train) cc = ClusterCentroids() x_train, y_train = cc.fit_sample(x_train, y_train) # GRID SEARCH cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=1) model = RandomizedSearchCV(cv=5, estimator=xgboost, param_distributions=parameters,n_iter=100,verbose=10,n_jobs=-1) # SAVE MODELS with open(join(models, 'Model_' + feature + '.pkl'), 'wb') as f: pickle.dump(model, f) Gammas = np.linspace(0, 10, num=11) # gamma Depths = np.linspace(4, 10, num=7, dtype=int) # max_depth Deltas = np.logspace(0, 4, num=5, base=2) # max_delta_step Learning = np.logspace(-3, 0, num=4) # learning_rate Lambdas = np.logspace(-3, 0, num=4) # lambda MinChild = np.logspace(0, 5, num=6, base=2) # min_child_weight Scale = np.logspace(0, 6, num=7) # scale_pos_weight Subsample = [1, 0.75] # subsample ColSample = [1, 0.75] # colsample_bytree Forest = [100] # num_parallel_tree parameters = {"learning_rate":Learning, "gamma":Gammas, "max_depth":Depths, "max_delta_step":Deltas, "lambda":Lambdas, "min_child_weight":MinChild, "subsample":Subsample, "colsample_bytree":ColSample, "scale_pos_weight":Scale, "num_parallel_tree":Forest} Estimators= np.logspace(2, 4, num=3, dtype=int) # n_estimators Depths = np.linspace(4, 10, num=7, dtype=int) # max_depth (None olabilir) MinSplit = np.linspace(2, 8, num=7, dtype=int) # min_samples_split MinSample = np.linspace(1, 5, num=6, dtype=int) # min_samples_leaf Impurity = np.logspace(0, 6, num=7, dtype=int) # min_impurity_decrease Criterion = ["gini", "entropy"] # criterion parameters = {"max_depth":Depths, "min_samples_split":MinSplit, "min_samples_leaf":MinSample, "min_impurity_decrease":Impurity, "criterion":Criterion, "n_estimators":Estimators} print("Best: %f using %s" % (result.best_score_, result.best_params_)) means = result.cv_results_['mean_test_score'] stds = result.cv_results_['std_test_score'] params = result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) ''' ''' with open(join(opensnpPath, "QuadBeacon.pickle"), 'rb') as handle: quad = pickle.load(handle) # 0: one minor 1: double minor 2: double major 3: NN gs = [yes_yes_ind, no_yes_ind, added_people] with open(join(beacons, "goodsetup.pkl"), 'wb') as f: pickle.dump(gs, f) with open(join(beacons, "goodsetup.pkl"), 'rb') as f: yes_yes_ind, no_yes_ind, added_people = pickle.load(f) yes_yes_ind, no_yes_ind, added_people #original_x = binary[no_yes_ind][:,added_people] #test_x = original_x.T #matches2 = np.arange(10) #from imblearn.under_sampling import RandomUnderSampler, ClusterCentroids, TomekLinks #from tensorflow.keras.models import Sequential #from tensorflow.keras.layers import Dense, LeakyReLU, Dropout #from tensorflow.keras.layers import BatchNormalization def train_models2(train_snps, test_people, parameters): models = [] for feature in features: X, Y = getTrainingData(phenotype=feature, pos=train_snps, test_people=test_people) print("\n", feature, "\tlabels=", np.unique(Y), "\t",end="", flush=True) for epoch in range(1): # Upsampling smote = SMOTE() X, Y = smote.fit_sample(X, Y) # Train the model rf = RandomForestClassifier(n_estimators=100, max_depth=16,class_weight='balanced_subsample',verbose=0,n_jobs=-1) model = RandomizedSearchCV(cv=10, estimator=rf, param_distributions=parameters,n_iter=100,verbose=10,n_jobs=-1) result = model.fit(X, Y) print("Best: %f using %s" % (result.best_score_, result.best_params_)) best_model = model.best_estimator_ models.append((feature, best_model, np.mean(result.cv_results_['mean_test_score']), result.cv_results_)) break return models ''' ''' plot_confusion_matrix(cm=np.array([[tp_total,fp_total], [fn_total,tn_total]]), target_names=['mutation', 'normal'], title="Confusion Matrix") # Use rare indices or not ? threshold = 0.01 condition = np.logical_and(maf['maf'] < threshold, maf['maf'] > 0) rare_percent = maf[condition].shape[0] / len(giant) * 100 rare_indices = np.where(condition==True)[0] rare_names = maf[condition].index.values print(len(rare_indices)) r = small.columns[np.random.choice(len(small.columns), size=45, replace=False)] %%time # Set NN to MAF values for i in range(mutation_beacon.shape[0]): mutation_beacon[i][ny_beacon[ind].values[i] == "NN"] = maf.iloc[no_yes_ind]["maf"][i] mutation_beacon beacon = pd.read_csv(join(opensnpPath, "Beacon.csv"),sep=',',dtype="category",header=None) le = LabelEncoder() beacon.apply(le.fit_transform) # Confusion matrix plotter method def plot_confusion_matrix(cm,target_names,title='Confusion matrix',cmap=None): accuracy = np.trace(cm) / float(np.sum(cm)) misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') plt.figure(figsize=(8, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) thresh = cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) plt.show() def onehot_initialization_v3(a): ncols = a.max() + 1 labels_one_hot = (a.ravel()[np.newaxis] == np.arange(ncols)[:, np.newaxis]).T labels_one_hot.shape = a.shape + (ncols,) return labels_one_hot x = onehot_initialization_v3(quad.values.T) x = x.astype(np.int8) # Smoothen for t in range(len(results)): idx = np.argmax(results[t], axis=-1) results[t] = np.zeros(results[t].shape ) results[t][np.arange(results[t].shape[0]), idx] = 1 ny_snps = binary[no_yes_ind][:,added_people].T matches = np.arange(add_count) ''' ''' ## 3. Mutual Information def get_pvalues(X, tempy): t11 = np.sum(X.T * tempy, axis=1) t10 = np.sum(X.T * (1-tempy), axis=1) t01 = np.sum((1-X.T) * tempy, axis=1) t00 = np.sum((1-X.T) * (1-tempy), axis=1) t = np.array([np.array([t00[i], t01[i], t10[i], t11[i]]).reshape(2,2) for i in range(X.shape[1])]) values = np.array([stats.fisher_exact(i)[0] for i in t]) probs = np.nan_to_num(values / (1+values), nan=1) probs[probs == 0] = 1e-8 return probs def train_mi(train_snps, test_people): m_infos = [] for feature in features: X, Y = getTrainingData(phenotype=feature, pos=train_snps, test_people=test_people) print(feature, end="", flush=True) labels = np.unique(Y) mis = np.zeros((len(labels), X.shape[1])) for i in range(len(labels)): tempy = Y.copy() tempy[Y != labels[i]] = 0 tempy[Y == labels[i]] = 1 mis[i] = get_pvalues(X, tempy) #tempy[tempy != labels[i]] = "Other" #mis[i] = mutual_info_classif(X, tempy, discrete_features='auto', n_neighbors=3, copy=True) m_infos.append((feature,mis)) return m_infos def test_mi(mis, x_test, y_test): correct = 0 # For each person for i in range(len(y_test)): test_person = y_test.iloc[i] scores = np.ones((len(y_test)), dtype=float) # For each reconstructed genome for j in range(len(y_test)): available_phenotypes = np.where(test_person != "-")[0] for k in available_phenotypes: label = test_person[k] available_labels = np.setdiff1d(pheno.iloc[:, k], "-") pos = np.where(available_labels == label)[0] scores[j] += np.mean(mis[k][1][:, x_test[j]][pos]) #scores[j] += np.log(np.mean(1+1e-8-mis[k][1][:, 1-x_test[j]][pos])) print(scores) matched_ind = np.argsort(scores)[-3:] print(matched_ind, "--", matches[i]) print() if matches[i] in matched_ind: correct += 1 return correct / len(y_test) # Phenotype Prediction x_test = (reconstructed_spectral != reference.T[0])[:, no_yes_ind] y_test = pheno.loc[beacon.columns[added_people]] print("Set: ", i+1) mis = train_mi(train_snps=no_yes_ind, test_people=added_people) accuracy = test_mi(mis, x_test, y_test) print("Accuracy = ", accuracy) overall_accuracy.append(accuracy) ''' ''' # Deep Learning models = [] count = 0 random.shuffle(features) for feature in features: if feature == "Sex": continue # Find indices of people who has the specified feature feature_label = pheno[pheno[feature] != "-"][feature] existing = beacon.columns.isin(feature_label.index.values) existing[added_people] = False X = binary[no_yes_ind][:, existing].T Y = feature_label[beacon.columns[existing]].values from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(handle_unknown='ignore') y = enc.fit_transform(Y.reshape(-1, 1)).toarray() x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True, stratify=y) # Train / Test model = Sequential() model.add(Dense(1000, input_dim=X.shape[1], activation=LeakyReLU(alpha=0.1))) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(len(np.unique(Y)), activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', 'AUC']) model.fit(x_train, y_train, epochs=10, batch_size=len(x_train)) y_pred = model.predict_classes(x_test, verbose=0) # Performance result = classification_report(np.where(y_test)[1], y_pred, output_dict=True) isBetter = result["macro avg"]["f1-score"] > 1.0 / len(np.unique(y_train)) if isBetter: count += 1 print(count, ".", feature, " --> ", np.unique(Y)) #print("Train:", round(model.score(x_train, y_train), 2), " | Test:", round(model.score(x_test, y_test),2)) print(round(result["macro avg"]["f1-score"], 2), ">" , 1.0 / len(np.unique(y_train)), "\n") models.append((feature, model, result["macro avg"]["f1-score"])) ''' ''' # 2. Including Yes-Yes SNP's isRandom = False percentage = 2 if isRandom: yy_count = len(yes_yes_ind) * percentage // 100 yy_pos = np.random.choice(yes_yes_ind, yy_count, replace=False) train_ind = np.sort(np.concatenate([yy_pos, no_yes_ind])) else: yy_count = len(yes_yes_ind) * percentage // 100 yy_pos = yes_yes_ind[np.argsort(np.var(ternary[yes_yes_ind], axis=1))[::-1]][:yy_count] train_ind = np.sort(np.concatenate([yy_pos, no_yes_ind])) # TRAIN # Get no-yes reconstructed snps ny_pos = np.where(np.in1d(train_ind, no_yes_ind))[0] ny_snps = reconstructed_spectral[:, no_yes_ind] ny_snps = np.logical_and(ny_snps == reference[no_yes_ind].T, ny_snps != "NN") ny_snps = ny_snps.astype(np.int8) correct = 0 labels = [i[0] for i in models] test_y = pheno.loc[beacon.columns[added_people]] # For each person for i in range(len(test_y)): test_person = test_y[labels].iloc[i] # Predict each cluster results = [] test_x = binary[train_ind][:, added_people[i]] test_x = np.expand_dims(test_x, axis=0) test_x = np.repeat(test_x,add_count,axis=0) test_x[:, ny_pos] = ny_snps # For each model for m in models: results.append(m[1].predict_proba(test_x)) # For each reconstructed genome probs = np.zeros((len(test_y))) for j in range(len(test_y)): available_phenotypes = np.where(test_person != "-")[0] # For each available phenotype for k in available_phenotypes: target_label_ind = np.where(models[k][1].classes_ == test_person[k])[0] probs[j] += results[k][j][target_label_ind] print(probs) # Top k matched_ind = np.argsort(probs)[-3:] print(matched_ind, "--", matches[i]) print() if matches[i] in matched_ind: correct += 1 acc = correct / len(test_y) acc ''' ###Output _____no_output_____
notebooks/PART2_03_The_NumPy_library.ipynb
###Markdown The NumPy library [NumPy](http://www.numpy.org/) is the fundamental package for scientific computing with Python. It contains among other things:* a powerful N-dimensional array object* sophisticated (broadcasting) functions* tools for integrating C/C++ and Fortran code* useful linear algebra, Fourier transform, and random number capabilities_Pleae note that this walkthrough is heavily inspired by a [tutorial](http://cs231n.github.io/python-numpy-tutorial/) by [Justin Johnson](https://cs.stanford.edu/people/jcjohns/)._ ###Code import numpy as np ###Output _____no_output_____ ###Markdown The array object A numpy array is a grid of values, all of the same type, and is indexed by a tuple of nonnegative integers. We can initialize numpy arrays from nested Python lists, and access elements using square brackets: ###Code x = np.array([1,3,5,7,9,11,13]) x ###Output _____no_output_____ ###Markdown The number of dimensions is the rank of the array; the shape of an array is a tuple of integers giving the size of the array along each dimension. ###Code x.shape ###Output _____no_output_____ ###Markdown Numpy also provides many functions to create arrays ###Code # Create an array of all zeros a = np.zeros((2,2)) print(a.shape) a # Create an array of all ones b = np.ones((4,2)) print(b.shape) b # Create a constant array c = np.full((2,2), 12) c # Create a 2x2 identity matrix d = np.eye(4) d # Create an array filled with random values # np.random.seed(111) # uncomment for reproducible results e = np.random.random((3,3)) e ###Output _____no_output_____ ###Markdown There are many more ways to create an array (see [the documentation](https://docs.scipy.org/doc/numpy/reference/routines.array-creation.htmlroutines-array-creation) for further details). Array indexingNumpy offers several ways to index into arrays.* __Integer array indexing__: Integer array indexing allows you to construct arbitrary arrays using the data from another array.* __Slicing__: Similar to Python lists, numpy arrays can be sliced. Since arrays may be multidimensional, you must specify a slice for each dimension of the array* __Boolean array indexing__: Boolean array indexing lets you pick out arbitrary elements of an array. Frequently this type of indexing is used to select the elements of an array that satisfy some condition. ###Code aa = np.array([[1,2], [3, 4], [5, 6]]) print(aa.shape) aa ###Output (3, 2) ###Markdown __Integer array indexing__ ###Code aa[0] aa[0,1] aa[[0,1], [1,1]] aa aa[[0, 1, 2], [0, 1, 0]] ###Output _____no_output_____ ###Markdown Slicing ###Code bb = np.array(range(12)).reshape(3,4) print(bb.shape) bb ###Output (3, 4) ###Markdown Pull out the subarray consisting of the first 2 rows of the 2$^\text{nd}$ and 3$^\text{rd}$ columns ###Code bb[:2, 1:3] ###Output _____no_output_____ ###Markdown We can also mix integer indexing with slice indexing. However, doing so will yield an array of lower rank than the original array. ###Code # Rank 1 view of the second row of bb bb_ = bb[1, :] print(bb_.shape) bb_ # Rank 2 view of the second row of bb _bb = bb[1:2, :] print(_bb.shape) _bb ###Output (1, 4) ###Markdown We can make the same distinction when accessing columns of an array ###Code bb _bb = bb[:, 1] print(_bb.shape) _bb _bb = bb[:, 1:2] print(_bb.shape) _bb ###Output (3, 1) ###Markdown Boolean array indexing ###Code cc = np.linspace(start=5, stop=25, num=16).reshape((4,4)) print(cc.shape) cc cc > 10 cc[cc > 10] cc[(cc > 10) & (cc < 22)] ###Output _____no_output_____ ###Markdown One useful trick with array indexing is selecting or mutating: ###Code cc[(cc > 10) & (cc < 22)] = -999 cc ###Output _____no_output_____ ###Markdown Array math Basic mathematical functions operate elementwise on arrays, and are available both as operator overloads and as functions in the numpy module. ###Code x = np.array([[1,2],[3,4]], dtype=np.float64) y = np.array([[5,6],[7,8]], dtype=np.float64) print("x:\n", x) print("--------------") print("y:\n", y) # Elementwise sum; both produce the array print(x + y) print("--------------") print(np.add(x, y)) # Elementwise difference; both produce the array print(x - y) print("--------------") print(np.subtract(x, y)) # Elementwise product; both produce the array print(x * y) print("--------------") print(np.multiply(x, y)) # Elementwise division; both produce the array print(x / y) print("--------------") print(np.divide(x, y)) # Elementwise square root; produces the array np.sqrt(x) ###Output _____no_output_____ ###Markdown In numpy `*` is elementwise multiplication, not matrix multiplication. We instead use the `dot` function to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. `dot` is available both as a function in the numpy module and as an instance method of array objects. ###Code # vector v and w v = np.array([9,10]) w = np.array([11, 12]) # 2x2 matrix bb = y print("v:\n", v) print("dim:\n", v.shape) print("--------------") print("w:\n", w) print("dim:\n", w.shape) print("--------------") print("--------------") print("aa:\n", aa) print("dim:\n", aa.shape) print("--------------") print("bb:\n", bb) print("dim:\n", bb.shape) print("--------------") ###Output v: [ 9 10] dim: (2,) -------------- w: [11 12] dim: (2,) -------------- -------------- aa: [[1 2] [3 4] [5 6]] dim: (3, 2) -------------- bb: [[5. 6.] [7. 8.]] dim: (2, 2) -------------- ###Markdown $$M_{p\times q} = A_{p\times n} \times B_{n \times q}$$ ###Code # Inner product of vectors print(v.dot(w)) print("--------------") print(np.dot(v, w)) # Matrix / vector product print(aa.dot(v)) print("--------------") print(np.dot(aa, v)) # Matrix / matrix product print(aa.dot(bb)) print("--------------") print(np.dot(aa, bb)) ###Output [[19. 22.] [43. 50.] [67. 78.]] -------------- [[19. 22.] [43. 50.] [67. 78.]] ###Markdown Numpy provides many useful functions for performing computations on arrays; such as `sum`, `mean`, `max`, `min` and others. You can find the full list of mathematical functions provided by numpy in [the documentation](https://docs.scipy.org/doc/numpy/reference/routines.math.html). ###Code x print(np.sum(x)) # Compute sum of all elements print(np.sum(x, axis=0)) # Compute sum of each column print(np.sum(x, axis=1)) # Compute sum of each row print(np.mean(x)) # Compute mean of all elements print(np.mean(x, axis=0)) # Compute mean of each column print(np.mean(x, axis=1)) # Compute mean of each row print(np.min(x)) # Compute minimum of all elements print(np.min(x, axis=0)) # Compute minimum of each column print(np.min(x, axis=1)) # Compute minimum of each row print(np.max(x)) # Compute maximum of all elements print(np.max(x, axis=0)) # Compute maximum of each column print(np.max(x, axis=1)) # Compute maximum of each row ###Output 4.0 [3. 4.] [2. 4.] ###Markdown Some more useful basic numpy array methodsApart from computing mathematical functions using arrays, we frequently need to reshape or otherwise manipulate data in arrays. ###Code dd = np.arange(start=1, stop=2100, step=100).reshape((7,3)) dd ###Output _____no_output_____ ###Markdown **Transpose an array** ###Code print("Dimensions: ", dd.shape) dd_transposed = dd.T print("Dimensions after transpose ", dd_transposed.shape) dd_transposed ###Output Dimensions: (7, 3) Dimensions after transpose (3, 7) ###Markdown **Reshape an array** ###Code print(dd.shape) dd_reshaped = dd.reshape(-1,1) print(dd_reshaped.shape) dd_reshaped # returns the array, flattened print(dd.shape) dd_flat = dd.ravel() print(dd_flat.shape) dd_flat ###Output (7, 3) (21,) ###Markdown **Stacking together different arrays**Several arrays can be stacked together along different axes. ###Code ee = np.floor(10*np.random.random((2,2))) print("ee:\n", ee) ff = np.floor(10*np.random.random((2,2))) print("ff:\n", ff) # vertical stack np.vstack((ee,ff)) # horizontal stack np.hstack((ee,ff)) ###Output _____no_output_____ ###Markdown > __Final note:__ Please be aware that we only scratched the surface of the functionalities of the numpy library. Check out the official [numpy tutorial](https://docs.scipy.org/doc/numpy-dev/user/quickstart.html) for a dive into numpy.Further you may easily explore numpy's modules and submodules by typing np.into the cell below and press the TAB key for tab completion. ###Code import numpy as np # np. ###Output _____no_output_____
Classificationn_ANN.ipynb
###Markdown EDA( Exploratory Data Analysis) ###Code sns.countplot('target',data=dataset) sns.kdeplot(data['worst symmetry']) sns.distplot(data['mean radius'],kde=False,bins=30) sns.distplot(data['mean texture'],kde=True,bins=45,color='green') #Checking for any missing value or null value fig=plt.subplots(figsize=(16,10)) sns.heatmap(data.isnull(),cbar=False,yticklabels=False,cmap='viridis') #Diving the data into x and y for training the model #X-Contains the features on which the model will classify the type of breast cancer #y-contains the original classifications X=data y=dataset['target'] from sklearn.model_selection import train_test_split #Spliting the data into train and test X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) #feature Scaling from sklearn.preprocessing import StandardScaler scaler=StandardScaler() X_train=scaler.fit_transform(X_train) X_test=scaler.transform(X_test) from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout #Intialising the ANN Model model=Sequential() #Adding the first hidden layer of our ANN Model #units-Number of nodes we want to add in our hidden layer. #kernel_intializer-The function is used to intialise the weights. #input_dim=Number of nodes in the input layer #activation=The Activation Function #Dropout function is used to avoid overfitting model.add(Dense(units=15,kernel_initializer='uniform',activation='relu',input_dim=30)) model.add(Dropout(0.2)) #Adding the Second hidden layer of our ANN Model model.add(Dense(units=15,kernel_initializer='uniform',activation='relu')) model.add(Dropout(0.2)) #Adding the output layer of our ANN Model model.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid')) #Compiling the model model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy']) #fitting the model to our dataset and specifying the number of batch_size and epochs model.fit(X_train,y_train,epochs=100,batch_size=10) #making prediction of our model prediction=model.predict(X_test) #Evaluating the performance of our model from sklearn.metrics import classification_report,confusion_matrix,accuracy_score prediction=(prediction>0.5) #Confusion Matrix confusion_matrix(y_test,prediction) #Classifiaction Report print(classification_report(y_test,prediction)) #Accuracy of our Model accuracy_score(y_test,prediction) ###Output _____no_output_____
TL_MOF.ipynb
###Markdown Import required modules ###Code # help function from transfer_learning import NeuralNet from dataset_loader import data_loader, all_filter, get_descriptors, one_filter, data_scaler # modules import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import os, sys import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error from tqdm import tqdm from scipy.stats import pearsonr import matplotlib.pyplot as plt %matplotlib inline # file name and data path device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') base_path = os.getcwd() file_name = 'data/CrystGrowthDesign_SI.csv' ###Output _____no_output_____ ###Markdown Load the dataset ###Code """ Data description. Descriptors: 'void fraction', 'Vol. S.A.', 'Grav. S.A.', 'Pore diameter Limiting', 'Pore diameter Largest' Source task: 'H2@100 bar/243K (wt%)' Target tasks: 'H2@100 bar/130K (wt%)' 'CH4@100 bar/298 K (mg/g)' '5 bar Xe mol/kg' '5 bar Kr mol/kg' """ descriptor_columns = ['void fraction', 'Vol. S.A.', 'Grav. S.A.', 'Pore diameter Limiting', 'Pore diameter Largest'] one_filter_columns = ['H2@100 bar/243K (wt%)'] another_filter_columns = ['H2@100 bar/130K (wt%)'] # load data data = data_loader(base_path, file_name) # extract descriptors and gas adsorptions one_property = one_filter(data, another_filter_columns) descriptors = get_descriptors(data, descriptor_columns) # prepare training inputs and outputs X = np.array(descriptors.values, dtype=np.float32) y = np.array(one_property.values, dtype=np.float32).reshape(len(X), ) print(X.shape) print(y.shape) X = data_scaler(X) y = data_scaler(y.reshape(-1, 1)).reshape(len(X),) ###Output (13506, 5) (13506,) ###Markdown Source task training ###Code ## hyper-parameters input_size = 5 hidden_size_1 = 128 hidden_size_2 = 64 output_size = 1 learning_rate = 0.00002 ## model, loss, and optimizer model = NeuralNet(input_size, hidden_size_1, hidden_size_2, output_size).to(device) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) ## train, val, test data split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=1) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=1) # Training scores_epochs = list() num_epochs = 5000 for epoch in tqdm(range(num_epochs)): inputs = torch.from_numpy(X_train) labels = torch.from_numpy(y_train) outputs = model(inputs).view(-1,) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 5 == 0: inputs_val = torch.from_numpy(X_val) labels_val = torch.from_numpy(y_val) outputs_val = model(inputs_val).view(-1,) score = r2_score(labels_val.data.numpy(), outputs_val.data.numpy()) # print('Predictive accuracy on validation set at epoch {}/{} is {}'.format(epoch, num_epochs, score)) scores_epochs.append(score) # if len(scores_epochs) >= 2: # if score < scores_epochs[-2]: # break # torch.save(model.state_dict(), 'model_H2.ckpt') plt.plot(np.arange(0, num_epochs, 5), scores_epochs, color='red') plt.xlabel('Epochs', size=15) plt.ylabel('Pre Acc on Val set', size=15) plt.savefig('Manuscript/H2_val.png', bbox_inches='tight', dpi=500) print('The predictive accuracy on test set is {}'.format( r2_score(torch.from_numpy(y_test).data.numpy(), model(torch.from_numpy(X_test)).view(-1,).data.numpy()))) ###Output 100%|██████████| 5000/5000 [00:50<00:00, 98.68it/s] ###Markdown Direct learning and transfer learning on target tasks ###Code def transfer_learning(transfer=False, nsamples=None, nbatches=None,name): seeds = np.arange(nbatches) Ns = list() scores_epochs = list() scores_test = list() scores_train = list() pred_tests = list() grt_train_X = list() grt_test_X = list() grt_tests = list() for seed in tqdm(seeds): data_small = data.sample(n=nsamples, random_state=seed) another_property = one_filter(data_small, another_filter_columns) descriptors_small = get_descriptors(data_small, descriptor_columns) X_small = np.array(descriptors_small.values, dtype=np.float32) y_small = np.array(another_property.values, dtype=np.float32).reshape(len(X_small), ) X_small = data_scaler(X_small) y_small = data_scaler(y_small.reshape(-1, 1)).reshape(len(X_small),) ## hyper-parameters input_size = 5 hidden_size_1 = 128 hidden_size_2 = 64 output_size = 1 learning_rate = 0.00002 ## model, loss, and optimizer if transfer: model = NeuralNet(input_size, hidden_size_1, hidden_size_2, output_size).to(device) model.load_state_dict(torch.load('model_H2.ckpt')) model.fc1.weight.requires_grad = False model.fc1.bias.requires_grad = False model.fc2.weight.requires_grad = False model.fc2.bias.requires_grad = False criterion = nn.MSELoss() optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate) else: model = NeuralNet(input_size, hidden_size_1, hidden_size_2, output_size).to(device) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) ## train, val, test data split X_train, X_test, y_train, y_test = train_test_split(X_small, y_small, test_size=0.1, random_state=1) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=1) scores_epoch = list() num_epochs = 10000 N = 0 for epoch in range(num_epochs): inputs = torch.from_numpy(X_train) labels = torch.from_numpy(y_train) outputs = model(inputs).view(-1,) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() N += 1 if epoch % 5 == 0: inputs_val = torch.from_numpy(X_val) labels_val = torch.from_numpy(y_val) outputs_val = model(inputs_val).view(-1,) score = r2_score(labels_val.data.numpy(), outputs_val.data.numpy()) # print('Predictive accuracy on validation set at epoch {}/{} is {}'.format(epoch, num_epochs, score)) scores_epoch.append(score) if len(scores_epoch) >= 2: if score < scores_epoch[-2]: break scores_epochs.append(scores_epoch) Ns.append(N) score_train = r2_score(torch.from_numpy(y_train).data.numpy(), model(torch.from_numpy(X_train)).view(-1,).data.numpy()) # score_train = mean_squared_error(torch.from_numpy(y_train).data.numpy(), model(torch.from_numpy(X_train)).view(-1,).data.numpy()) scores_train.append(score_train) pred_tests.append(model(torch.from_numpy(X_test)).view(-1,).data.numpy()) grt_train_X.append(torch.from_numpy(X_train).data.numpy()) grt_test_X.append(torch.from_numpy(X_test).data.numpy()) grt_tests.append(torch.from_numpy(y_test).data.numpy()) score_test = r2_score(torch.from_numpy(y_test).data.numpy(), model(torch.from_numpy(X_test)).view(-1,).data.numpy()) # score_test = mean_squared_error(torch.from_numpy(y_test).data.numpy(), model(torch.from_numpy(X_test)).view(-1,).data.numpy()) scores_test.append(score_test) torch.save(model, f'{name}.pt') return scores_train, scores_test, grt_train_X, grt_test_X ###Output _____no_output_____ ###Markdown Direct learning ###Code scores_train_H2_130K_wo_transfer, scores_test_H2_130K_wo_transfer, grt_train_X_wo_transfer, grt_test_X_wo_transfer = transfer_learning(transfer=False, nsamples=100, nbatches=1000,"One") ###Output 100%|██████████| 1000/1000 [1:09:00<00:00, 4.14s/it] ###Markdown Transfer learning ###Code scores_train_H2_130K_w_transfer, scores_test_H2_130K_w_transfer, grt_train_X_w_transfer, grt_test_X_w_transfer = transfer_learning(transfer=True, nsamples=100, nbatches=1000,"Two") ###Output 100%|██████████| 1000/1000 [35:26<00:00, 2.13s/it]
_notebooks/2021-08-17-NLP_10_Aivivn_Product_Review_Sentiment_Analysis.ipynb
###Markdown AIVIVN Product Review Sentiment Analysis [Pytorch Lightning Sample]> Training the sentiment classifier (TextCNN) for AIVIVN product review dataset using Pytorch Lightning.- toc: true - badges: false- comments: true- categories: [implementation]- image: images/TextCNN.png ###Code from google.colab import drive drive.mount('/content/drive') ###Output Mounted at /content/drive ###Markdown Install required packages ###Code %%capture !pip install pytorch-lightning !pip install torchmetrics !pip install pyvi !pip install torch-summary ###Output _____no_output_____ ###Markdown Import required packages ###Code import re import copy from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union from os.path import abspath import torchmetrics import pandas as pd ###Output _____no_output_____ ###Markdown Have a closer look at the datasetThe data contains user's reviews following two categories: "positive" and "negative". There are 27068 sentences in total.* **Train**: 16087 sentences * **Test**: 10981 sentences (public: 5454 sentences, private: 5527 sentences)* **Labels**: 0 (positive), 1 (negative)You can download the dataset [here](https://drive.google.com/drive/folders/10LAyFnAuhnAoq0pshmDV3E4KVV5tCiOu?usp=sharing!). ###Code train_path = "/content/drive/MyDrive/SLSOPS/dataset/Aivivn_vietnamese_dataset/train.crash" test_path = "/content/drive/MyDrive/SLSOPS/dataset/Aivivn_vietnamese_dataset/test.crash" def split_array(arr, condition): if len(arr) == 0: return [] result = [] accumulated = [arr[0]] for ele in arr[1:]: if condition(ele): result.append(copy.deepcopy(accumulated)) accumulated = [copy.deepcopy(ele)] else: accumulated.append(copy.deepcopy(ele)) result.append(copy.deepcopy(accumulated)) return result def read_file(file_path, is_train=True): file_path = abspath(file_path) data_lines = list( filter(lambda x: x != '', open(file_path).read().split('\n'))) pattern = ('train' if is_train else 'test') + '_[0-9]{5}' datas = split_array(data_lines, lambda x: bool(re.match(pattern, x))) if is_train: result_array = list(map( lambda x: [x[0], ' '.join(x[1:-1]), int(x[-1])], datas)) else: result_array = list(map(lambda x: [x[0], ' '.join(x[1:])], datas)) columns = ['name', 'text', 'label'] if is_train else ['name', 'text'] return pd.DataFrame(result_array, columns=columns) train_df = read_file(train_path) test_df = read_file(test_path, is_train=False) # Having a look at the dataset 0: Postitive, 1: Negative train_df.head() # Having a look at the test set test_df.head() ###Output _____no_output_____ ###Markdown Define dataset and dataloader classes ###Code from typing import List, Tuple import torchtext from collections import Counter, OrderedDict from torch.nn.utils.rnn import pad_sequence from torchtext.vocab import Vectors, Vocab class Tokenizer(): def __init__(self, tokenizer: Any): self.counter = Counter(['<pad>', '<unk>']) self.tokenizer = tokenizer self.vocab = None self.update_vocab() def update_vocab(self): # sorted_by_freq_tuples = sorted(self.counter.items()[2:], key=lambda x: x[1], reverse=True) ordered_dict = OrderedDict(self.counter.items()) self.vocab = torchtext.vocab.vocab(ordered_dict, min_freq=1) def fit_on_texts(self, texts: List[str]): """ Updates internal vocabulary based on a list of texts. """ for text in texts: tokens = [t.text for t in self.tokenizer(text)] self.counter.update(tokens) self.update_vocab() def texts_to_sequences(self, texts: List[str], tensor: bool=True) -> List[int]: word2idx = self.vocab.get_stoi() sequences = [] for text in texts: seq = [word2idx.get(token.text, word2idx['<unk>']) for token in self.tokenizer(text)] if tensor: seq = torch.tensor(seq) sequences.append(seq) return sequences def _load_data_from(data_path: Union[str, Path]): df = read_file(data_path) sents = list(df['text'].str.strip().str.lower()) sentiments = list(df['label']) return sents, sentiments def _save_to_csv(file_path: Union[str, Path], data): sents, sentiments = data df = pd.DataFrame({ "text": sents, "label": sentiments, }) df.to_csv(file_path, index=False) return file_path def _preprocess_data(data: Tuple[List[str], List[str]], tokenizer: Tokenizer): sentences, sentiments = data sequences = tokenizer.texts_to_sequences(sentences) sentiment_tensor = torch.tensor(sentiments) # pad sequences sequences = pad_sequence(sequences, batch_first=True) assert len(sequences) == len(sentiments) all_data = [] for i in range(len(sentiments)): sample = { 'sequence': sequences[i], 'sentiment': sentiment_tensor[i] } all_data.append(sample) return all_data def build_vocab(tokenizer, data): sentences = data[0] tokenizer.fit_on_texts(sentences) from gensim.models import KeyedVectors from gensim.test.utils import datapath import numpy as np def load_pretrained_word_embeddings(w2v_path: str): return KeyedVectors.load_word2vec_format(datapath(w2v_path), binary=False) def create_embedding_matrix(w2v_model, vocab: Vocab, path: Union[str, Path]): if os.path.exists(path): print(f'loading embedding matrix from {path}') embedding_matrix = pickle.load(open(path, 'rb')) else: # Calculate vector for OOV token OOV_vec = torch.from_numpy(np.mean(w2v_model.vectors, axis=0)) embedding_matrix = torch.zeros((len(vocab), w2v_model.vector_size), dtype=torch.float) # words that are not availabel in the pretrained word embeddings will be zeros for word, index in vocab.get_stoi().items(): if word in w2v_model.vocab: embedding_matrix[index] = torch.from_numpy(w2v_model[word]) else: if word == "<pad>": continue embedding_matrix[index] = OOV_vec # save embedding matrix pickle.dump(embedding_matrix, open(path, 'wb')) return embedding_matrix ###Output _____no_output_____ ###Markdown Now we create the datamodule class for the dataset using Pytorch-Lightning Framework. You can read more [here](https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html). ###Code import torch from torch.utils.data import DataLoader, Dataset, random_split import pytorch_lightning as pl class AIVIVNDataset(Dataset): def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] class AIVIVN(pl.LightningDataModule): def __init__(self, tokenizer, opts: Dict[str, Any]): super().__init__() self.tokenizer = tokenizer self.batch_size = opts['batch_size'] self.num_workers = opts['num_workers'] self.on_gpu = opts['on_gpu'] self.train_ds = None self.val_ds = None self.mapping = {"negative": 1, "positive": 0} self.inverse_mapping = {v: k for k, v in enumerate(self.mapping)} def prepare_data(self, *args, **kwargs) -> None: self.train_path = '/content/drive/MyDrive/SLSOPS/dataset/Aivivn_vietnamese_dataset/train.crash' def setup(self, stage: str = None) -> None: if stage == "fit" or stage is None: # Load data from files train_data = _load_data_from(self.train_path) preprocessed_data = _preprocess_data(train_data, self.tokenizer) dataset = AIVIVNDataset(preprocessed_data) lengths = [int(len(dataset)*0.85), len(dataset) - int(len(dataset)*0.85)] self.train_ds, self.val_ds = random_split(dataset, lengths) def train_dataloader(self): return DataLoader( self.train_ds, shuffle=True, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.on_gpu ) def val_dataloader(self): return DataLoader( self.val_ds, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.on_gpu, ) def __repr__(self): basic = f"AIVIVN Product Review Dataset\nNum classes: {len(self.mapping)}\nMapping: {self.mapping}\n" if self.train_ds is None and self.val_ds is None: return basic batch = next(iter(self.train_dataloader())) sequences, sentiments = batch['sequence'], batch['sentiment'] data = ( f"Train/val sizes: {len(self.train_ds)}, {len(self.val_ds)}\n" f"Batch sequences stats: {(sequences.shape, sequences.dtype)}\n" f"Batch sentiments stats: {(sentiments.shape, sentiments.dtype)}\n" ) return basic + data ###Output _____no_output_____ ###Markdown Implementation (TextCNN) ###Code import torch.nn as nn import torch.nn.functional as F class ConvPool(nn.Module): def __init__(self, in_channels, out_channels, conv_kernel_sz, pool_kernel_sz): super(ConvPool, self).__init__() self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=conv_kernel_sz) self.pool = nn.MaxPool1d(pool_kernel_sz) def forward(self, x): out = self.conv(x) out = F.relu(out) out = self.pool(out) return out class TextCNN(pl.LightningModule): def __init__(self, embeddings, num_classes=2, batch_first=True, lr=1e-3, dropout=0, l2reg=0.01): super().__init__() embedding_dim = embeddings.shape[1] self.embedding = nn.Embedding.from_pretrained(embeddings) kernel_sizes = [3,4,5] self.filters = nn.ModuleList([ConvPool(embedding_dim, 128, conv_kernel_sz=conv_kernel_size, pool_kernel_sz=5) for conv_kernel_size in kernel_sizes]) self.conv_pool1 = ConvPool(128, 128, 5, 5) self.conv_pool2 = ConvPool(128, 128, 5, 30) self.flatten = nn.Flatten(start_dim=1) self.linear1 = nn.Linear(256, 128) self.linear2 = nn.Linear(128, num_classes) self.lr = lr self.l2reg = l2reg self.train_acc = torchmetrics.Accuracy() self.val_acc = torchmetrics.Accuracy() self.val_f1 = torchmetrics.F1(num_classes=2, average='macro') self.test_acc = torchmetrics.Accuracy() self.test_f1 = torchmetrics.F1(num_classes=2, average='macro') def configure_optimizers(self): optim = torch.optim.Adam(self.parameters(), lr=self.lr) return optim def forward(self, input): sequences = input['sequence'] # BxS embeds = self.embedding(sequences).permute(0, 2, 1) # BxSxH out_1 = self.filters[0](embeds) out_2 = self.filters[1](embeds) out_3 = self.filters[2](embeds) out = torch.cat((out_1, out_2, out_3), dim=2) out = self.conv_pool1(out) out = self.conv_pool2(out) out = self.flatten(out) out = self.linear1(out) out = F.relu(out) logit = self.linear2(out) return logit def training_step(self, batch, batch_idx): sentiments = batch['sentiment'] logits = self.forward(batch) loss = F.cross_entropy(logits, sentiments) scores = F.softmax(logits, dim=-1) self.train_acc(scores, sentiments) self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True) self.log('train_acc', self.train_acc, on_step=False, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, batch, batch_idx): # pylint: disable=unused-argument sentiments = batch['sentiment'] logits = self.forward(batch) scores = F.softmax(logits, dim=-1) self.val_acc(scores, sentiments) self.val_f1(scores, sentiments) self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True) self.log('val_acc', self.val_acc, on_step=False, on_epoch=True, prog_bar=True, logger=True) self.log('val_f1', self.val_f1, on_step=False, on_epoch=True, prog_bar=True, logger=True) def test_step(self, batch, batch_idx): # pylint: disable=unused-argument sentiments = batch['sentiment'] logits = self.forward(batch) scores = F.softmax(logits, dim=-1) self.test_acc(scores, sentiments) self.test_f1(scores, sentiments) self.log('test_acc', self.test_acc, on_step=False, on_epoch=True, logger=True) self.log('test_f1', self.test_f1, on_step=False, on_epoch=True, logger=True) ###Output _____no_output_____ ###Markdown nn.ModuleList does not have a forward() method because it does not define any neural network, that is, there is no connection between each of the nn.Module's that it stores. You may use it to store nn.Module's, just like you use Python lists to store other types of objects (integers, strings, etc). **The advantage of using nn.ModuleList** instead of using conventional Python lists to store nn.Module's is that Pytorch is “aware” of the existence of the nn.Module's inside an nn.ModuleList, which is not the case for Python lists. When using a Python list instead of a nn.ModuleList, the optimizer() will raise the error saying that the model has no parameters. This is because PyTorch does not see the parameters of the layers stored in a Python list. If you use a nn.ModuleList instead, you’ll get no error. Training ###Code # Load pretrained w2v model w2v_path = "/content/drive/MyDrive/SLSOPS/pretrained_w2v/word2vec_vi_words_100dims.txt" w2v_model = load_pretrained_word_embeddings(w2v_path) # Load dataset train_path = '/content/drive/MyDrive/SLSOPS/dataset/Aivivn_vietnamese_dataset/train.crash' train_data = _load_data_from(train_path) # Create Tokenizer from spacy.lang.vi import Vietnamese nlp = Vietnamese() tokenizer = Tokenizer(nlp) # Build vocabulary build_vocab(tokenizer, [train_data[0]]) import os import pickle # Create embedding matrix from pretrained w2v embedding_matrix = create_embedding_matrix(w2v_model, tokenizer.vocab, "embedding_matrix.dat") options = { "on_gpu": True, "batch_size": 16, "num_workers": 2 } # Create DataModule datamodule = AIVIVN(tokenizer, options) from pytorch_lightning.callbacks import ModelCheckpoint checkpoint_callback = ModelCheckpoint( monitor='val_acc', # save the model with the best validation accuracy dirpath='checkpoints', mode='max', ) # Set hyper-parameters lr = 1e-3 num_epochs = 20 l2reg = 1e-5 dropout = 0.0 trainer = pl.Trainer(gpus=1, max_epochs=num_epochs, callbacks=[checkpoint_callback], deterministic=True) # trainer = pl.Trainer(fast_dev_run=True, gpus=1) #Debug # trainer = pl.Trainer(overfit_batches=0.1, max_epochs=num_epochs, gpus=1) #Debug model = TextCNN(embedding_matrix, lr=lr, l2reg=l2reg, dropout=dropout) trainer.fit(model, datamodule) ###Output GPU available: True, used: True TPU available: False, using: 0 TPU cores IPU available: False, using: 0 IPUs LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0] | Name | Type | Params ------------------------------------------ 0 | embedding | Embedding | 1.2 M 1 | filters | ModuleList | 153 K 2 | conv_pool1 | ConvPool | 82.0 K 3 | conv_pool2 | ConvPool | 82.0 K 4 | flatten | Flatten | 0 5 | linear1 | Linear | 32.9 K 6 | linear2 | Linear | 258 7 | train_acc | Accuracy | 0 8 | val_acc | Accuracy | 0 9 | val_f1 | F1 | 0 ------------------------------------------ 351 K Trainable params 1.2 M Non-trainable params 1.6 M Total params 6.283 Total estimated model params size (MB) ###Markdown Save model and tokenizer for inference ###Code # Load best model from training new_model = TextCNN.load_from_checkpoint('/content/checkpoints/epoch=1-step=1709.ckpt', embeddings=embedding_matrix) # Test the loaded model with the validation set to double check trainer.test(my_model, datamodule.val_dataloader()) # Save tokenizer import pickle with open('tokenizer.pkl', 'wb') as outp: pickle.dump(tokenizer, outp, pickle.HIGHEST_PROTOCOL) # Save entire model torch.save(new_model, "model") ###Output _____no_output_____ ###Markdown InferenceTo do the inference, we have to do 2 steps:1. Loading model and the tokenizer.2. Define the preprocessing function to preprocess the input before feeding into the model.3. (Optional) Convert the predictions to labels. ###Code import torch.nn.functional as F inputs = [":(( Mình hơi thất vọng 1 chút vì mình đã kỳ vọng cuốn sách khá nhiều hi vọng nó sẽ nói về việc học tập của cách sinh viên trường Harvard ra sao những nỗ lực của họ như thế nào 4h sáng? tại sao họ lại phải thức dậy vào thời khắc đấy? sau đó là cả một câu chuyện ra sao. Cái mình thực sự cần ở đây là câu chuyện ẩn dấu trong đó để tự bản thân mỗi người cảm nhận và đi sâu vào lòng người hơn. Còn cuốn sách này chỉ đơn thuần là cuốn sách dạy kĩ năng mà hầu như sách nào cũng đã có. BUồn...", "Chất lượng sản phẩm tuyệt vời nhưng k có hộp k có dây giày đen k có tất"] # preprocess input def _preprocess_data_for_inference(sentences: List[str], tokenizer: Tokenizer): sequences = tokenizer.texts_to_sequences(sentences, tensor=True) # pad sequences sequences = torch.stack([F.pad(seq, (0, 557 - len(seq)), 'constant', 0) for seq in sequences]) return {"sequence": sequences} input_data = _preprocess_data_for_inference(inputs, tokenizer) new_model.eval() predictions = new_model(input_data) torch.argmax(predictions, axis=-1) train_df.iloc[3]['text'] ###Output _____no_output_____ ###Markdown Debug ###Code # Random check the pretrain word embeddings A = embedding_matrix[tokenizer.vocab.get_stoi()['ăn_nằm']] B = w2v_model["ăn_nằm"] np.array_equal(A,B) ###Output _____no_output_____
ipython/2. simulation/fragment_ode_simulation_with_reattachment.ipynb
###Markdown Fragment Mech Simulation with Reattachment ###Code import os import matplotlib.pyplot as plt %matplotlib inline from rmgpy.chemkin import * from rmgpy.species import Species from afm.simulator_original import OdeSimulator import afm.utils import afm.simulator_original ###Output /home/yentingw/RMG/RMG-Py/rmgpy/tools/plot.py:31: UserWarning: This call to matplotlib.use() has no effect because the backend has already been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot, or matplotlib.backends is imported for the first time. The backend was *originally* set to 'module://ipykernel.pylab.backend_inline' by the following code: File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/runpy.py", line 174, in _run_module_as_main "__main__", fname, loader, pkg_name) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/runpy.py", line 72, in _run_code exec code in run_globals File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/ipykernel_launcher.py", line 16, in <module> app.launch_new_instance() File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/traitlets/config/application.py", line 658, in launch_instance app.start() File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/ipykernel/kernelapp.py", line 478, in start self.io_loop.start() File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/zmq/eventloop/ioloop.py", line 177, in start super(ZMQIOLoop, self).start() File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/tornado/ioloop.py", line 888, in start handler_func(fd_obj, events) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events self._handle_recv() File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv self._run_callback(callback, msg) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback callback(*args, **kwargs) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/ipykernel/kernelbase.py", line 281, in dispatcher return self.dispatch_shell(stream, msg) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/ipykernel/kernelbase.py", line 232, in dispatch_shell handler(stream, idents, msg) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/ipykernel/kernelbase.py", line 397, in execute_request user_expressions, allow_stdin) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/ipykernel/ipkernel.py", line 208, in do_execute res = shell.run_cell(code, store_history=store_history, silent=silent) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/ipykernel/zmqshell.py", line 533, in run_cell return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2728, in run_cell self.events.trigger('post_run_cell') File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/IPython/core/events.py", line 74, in trigger func(*args, **kwargs) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/ipykernel/pylab/backend_inline.py", line 160, in configure_once activate_matplotlib(backend) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/IPython/core/pylabtools.py", line 308, in activate_matplotlib matplotlib.pyplot.switch_backend(backend) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/matplotlib/pyplot.py", line 232, in switch_backend matplotlib.use(newbackend, warn=False, force=True) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/matplotlib/__init__.py", line 1305, in use reload(sys.modules['matplotlib.backends']) File "/home/yentingw/anaconda2/envs/rmg_env/lib/python2.7/site-packages/matplotlib/backends/__init__.py", line 14, in <module> line for line in traceback.format_stack() mpl.use('Agg') ###Markdown 1. specify condition and simulate ###Code temperature = 673.15 # K pressure = 350*3 # bar initial_mol_fraction = { "ArCCCCR":1.0, "LCCCCR":1.0, "LCCCC":1.0 } hr = 14 termination_time = 3600*hr # hrs model = 'two-sided_newcut1' working_dir = os.path.join('../', 'data', 'pdd_chemistry', model) chemkin_path = os.path.join(working_dir, 'chem_annotated.inp') species_dict_path = os.path.join(working_dir, 'species_dictionary.txt') smiles_dict_path = os.path.join(working_dir, 'fragment_smiles.txt') ode_simulator = OdeSimulator(chemkin_path, species_dict_path, smiles_dict_path, temperature, pressure) alldata = ode_simulator.simulate(initial_mol_fraction, termination_time) results_path = os.path.join(working_dir, 'results') if not os.path.exists(results_path): os.mkdir(results_path) ###Output _____no_output_____ ###Markdown 2. reactant conversion ###Code # prepare moles data time, dataList, _ = alldata[0] TData = dataList[0] PData = dataList[1] VData = dataList[2] total_moles = PData.data*VData.data/8.314/TData.data moles_dict = {} for data in dataList[3:]: spe_label = data.label moles_dict[spe_label] = max(data.data[-1]*total_moles[-1],0) ArCCCCR_mf = dataList[3].data print dataList[3].label ArCCCCR_moles = ArCCCCR_mf*total_moles ArCCCCR_conv = (ArCCCCR_moles[0]-ArCCCCR_moles)/ArCCCCR_moles[0] plt.plot(time.data, ArCCCCR_conv) numpy.savetxt(os.path.join(results_path, 'reactant_conv.csv'), (time.data, ArCCCCR_conv)) ###Output _____no_output_____ ###Markdown 3. molecular weight distribution ###Code from afm.simulator_original import categorize_fragments fragmental_weight_distri = ode_simulator.get_molecular_weight_distribution(alldata) mws = [tup[0]*1000 for tup in fragmental_weight_distri] moles = [tup[1] for tup in fragmental_weight_distri] molefracs = moles/sum(moles) numpy.savetxt(os.path.join(results_path, 'mwd_{0}hr.csv'.format(hr)), (mws, molefracs)) ###Output _____no_output_____
_notebooks/2021-03-29-Naive-Bees-Deep-Learning-with-Images.ipynb
###Markdown "Naive Bees Deep Learning with Images"> "DataCamp Project: Naive Bees Deep Learning with Images"- toc: true- branch: master- badges: true- comments: true- categories: [datacamp, projects, python]- hide: false 1. Import Python librariesA honey bee (Apis).Can a machine identify a bee as a honey bee or a bumble bee? These bees have different behaviors and appearances, but given the variety of backgrounds, positions, and image resolutions, it can be a challenge for machines to tell them apart.Being able to identify bee species from images is a task that ultimately would allow researchers to more quickly and effectively collect field data. Pollinating bees have critical roles in both ecology and agriculture, and diseases like colony collapse disorder threaten these species. Identifying different species of bees in the wild means that we can better understand the prevalence and growth of these important insects.A bumble bee (Bombus).This notebook walks through building a simple deep learning model that can automatically detect honey bees and bumble bees and then loads a pre-trained model for evaluation. ###Code import pickle from pathlib import Path from skimage import io import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report # import keras library # ... YOUR CODE FOR TASK 1 ... import keras # import Sequential from the keras models module # ... YOUR CODE FOR TASK 1 ... from keras.models import Sequential # import Dense, Dropout, Flatten, Conv2D, MaxPooling2D from the keras layers module from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D ###Output _____no_output_____ ###Markdown 2. Load image labelsNow that we have all of our imports ready, it is time to look at the labels for our data. We will load our labels.csv file into a DataFrame called labels, where the index is the image name (e.g. an index of 1036 refers to an image named 1036.jpg) and the genus column tells us the bee type. genus takes the value of either 0.0 (Apis or honey bee) or 1.0 (Bombus or bumble bee). ###Code # load labels.csv from datasets folder using pandas labels = pd.read_csv('datasets/labels.csv', index_col=0) # print value counts for genus print(labels['genus'].value_counts()) # assign the genus label values to y y = labels['genus'].values ###Output 0.0 827 1.0 827 Name: genus, dtype: int64 ###Markdown 3. Examine RGB values in an image matrixImage data can be represented as a matrix. The width of the matrix is the width of the image, the height of the matrix is the height of the image, and the depth of the matrix is the number of channels. Most image formats have three color channels: red, green, and blue.For each pixel in an image, there is a value for every channel. The combination of the three values corresponds to the color, as per the RGB color model. Values for each color can range from 0 to 255, so a purely blue pixel would show up as (0, 0, 255).Let's explore the data for a sample image. ###Code # load an image and explore example_image = io.imread('datasets/{}.jpg'.format(labels.index[0])) # show image # ... YOUR CODE FOR TASK 3 ... plt.imshow(example_image) # print shape print('Example image has shape: ', example_image.shape) # print color channel values for top left pixel print('RGB values for the top left pixel are:', example_image[0, 0, :]) ###Output Example image has shape: (50, 50, 3) RGB values for the top left pixel are: [127 108 95] ###Markdown 4. Normalize image dataNow we need to normalize our image data. Normalization is a general term that means changing the scale of our data so it is consistent.In this case, we want each feature to have a similar range so our neural network can learn effectively across all the features. As explained in the sklearn docs, "If a feature has a variance that is orders of magnitude larger than others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected."We will scale our data so that it has a mean of 0 and standard deviation of 1. We'll use sklearn's StandardScaler to do the math for us, which entails taking each value, subtracting the mean, and then dividing by the standard deviation. We need to do this for each color channel (i.e. each feature) individually. ###Code # initialize standard scaler ss = StandardScaler() image_list = [] for i in labels.index: # load image img = io.imread('datasets/{}.jpg'.format(i)).astype(np.float64) # for each channel, apply standard scaler's fit_transform method for channel in range(img.shape[2]): img[:, :, channel] = ss.fit_transform(img[:, :, channel]) # append to list of all images image_list.append(img) # convert image list to single array X = np.array(image_list) # print shape of X print(X.shape) ###Output (1654, 50, 50, 3) ###Markdown 5. Split into train, test, and evaluation setsNow that we have our big image data matrix, X, as well as our labels, y, we can split our data into train, test, and evaluation sets. To do this, we'll first allocate 20% of the data into our evaluation, or holdout, set. This is data that the model never sees during training and will be used to score our trained model.We will then split the remaining data, 60/40, into train and test sets just like in supervised machine learning models. We will pass both the train and test sets into the neural network. ###Code # split out evaluation sets (x_eval and y_eval) x_interim, x_eval, y_interim, y_eval = train_test_split(X, y, test_size=0.2, random_state=52) # split remaining data into train and test sets # ... YOUR CODE FOR TASK 5 ... x_train, x_test, y_train, y_test = train_test_split(x_interim, y_interim, test_size=0.4, random_state=52) # examine number of samples in train, test, and validation sets print('x_train shape:', x_train.shape) print(len(y_train), 'train samples') print(x_test.shape[0], 'test samples') print(x_eval.shape[0], 'eval samples') ###Output x_train shape: (793, 50, 50, 3) 793 train samples 530 test samples 331 eval samples ###Markdown 6. Model building (part i)It's time to start building our deep learning model, a convolutional neural network (CNN). CNNs are a specific kind of artificial neural network that is very effective for image classification because they are able to take into account the spatial coherence of the image, i.e., that pixels close to each other are often related.Building a CNN begins with specifying the model type. In our case, we'll use a Sequential model, which is a linear stack of layers. We'll then add two convolutional layers. To understand convolutional layers, imagine a flashlight being shown over the top left corner of the image and slowly sliding across all the areas of the image, moving across the image in the same way your eyes move across words on a page. Convolutional layers pass a kernel (a sliding window) over the image and perform element-wise matrix multiplication between the kernel values and the pixel values in the image. ###Code # set model constants num_classes = 1 # define model as Sequential model = Sequential() # first convolutional layer with 32 filters model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(50, 50, 3))) # add a second 2D convolutional layer with 64 filters # ... YOUR CODE FOR TASK 6 ... model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) ###Output _____no_output_____ ###Markdown 7. Model building (part ii)Let's continue building our model. So far our model has two convolutional layers. However, those are not the only layers that we need to perform our task. A complete neural network architecture will have a number of other layers that are designed to play a specific role in the overall functioning of the network. Much deep learning research is about how to structure these layers into coherent systems.We'll add the following layers:MaxPooling. This passes a (2, 2) moving window over the image and downscales the image by outputting the maximum value within the window.Conv2D. This adds a third convolutional layer since deeper models, i.e. models with more convolutional layers, are better able to learn features from images.Dropout. This prevents the model from overfitting, i.e. perfectly remembering each image, by randomly setting 25% of the input units to 0 at each update during training.Flatten. As its name suggests, this flattens the output from the convolutional part of the CNN into a one-dimensional feature vector which can be passed into the following fully connected layers.Dense. Fully connected layer where every input is connected to every output (see image below).Dropout. Another dropout layer to safeguard against overfitting, this time with a rate of 50%.Dense. Final layer which calculates the probability the image is either a bumble bee or honey bee.To take a look at how it all stacks up, we'll print the model summary. Notice that our model has a whopping 3,669,249 paramaters. These are the different weights that the model learns through training and what are used to generate predictions on a new image. ###Code # reduce dimensionality through max pooling model.add(MaxPooling2D(pool_size=(2, 2))) # third convolutional layer with 64 filters model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) # add dropout to prevent over fitting model.add(Dropout(0.25)) # necessary flatten step preceeding dense layer model.add(Flatten()) # fully connected layer model.add(Dense(128, activation='relu')) # add additional dropout to prevent overfitting # ... YOUR CODE FOR TASK 7 ... model.add(Dropout(0.5)) # prediction layers model.add(Dense(num_classes, activation='sigmoid', name='preds')) # show model summary # ... YOUR CODE FOR TASK 7 ... model.summary() ###Output Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 48, 48, 32) 896 _________________________________________________________________ conv2d_1 (Conv2D) (None, 46, 46, 64) 18496 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 23, 23, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 21, 21, 64) 36928 _________________________________________________________________ dropout (Dropout) (None, 21, 21, 64) 0 _________________________________________________________________ flatten (Flatten) (None, 28224) 0 _________________________________________________________________ dense (Dense) (None, 128) 3612800 _________________________________________________________________ dropout_1 (Dropout) (None, 128) 0 _________________________________________________________________ preds (Dense) (None, 1) 129 ================================================================= Total params: 3,669,249 Trainable params: 3,669,249 Non-trainable params: 0 _________________________________________________________________ ###Markdown 8. Compile and train modelNow that we've specified the model architecture, we will compile the model for training. For this we need to specify the loss function (what we're trying to minimize), the optimizer (how we want to go about minimizing the loss), and the metric (how we'll judge the performance of the model).Then, we'll call .fit to begin the trainig the process. "Neural networks are trained iteratively using optimization techniques like gradient descent. After each cycle of training, an error metric is calculated based on the difference between prediction and target…Each neuron’s coefficients (weights) are then adjusted relative to how much they contributed to the total error. This process is repeated iteratively." ML CheatsheetSince training is computationally intensive, we'll do a 'mock' training to get the feel for it, using just the first 10 images in the train and test sets and training for just 5 epochs. Epochs refer to the number of iterations over the data. Typically, neural networks will train for hundreds if not thousands of epochs.Take a look at the printout for each epoch and note the loss on the train set (loss), the accuracy on the train set (acc), and loss on the test set (val_loss) and the accuracy on the test set (val_acc). We'll explore this more in a later step. ###Code model.compile( # set the loss as binary_crossentropy loss=keras.losses.binary_crossentropy, # set the optimizer as stochastic gradient descent optimizer=keras.optimizers.SGD(lr=0.001), # set the metric as accuracy metrics=['accuracy'] ) # mock-train the model using the first ten observations of the train and test sets model.fit( x_train[:10, :, :, :], y_train[:10], epochs=5, verbose=1, validation_data=(x_test[:10, :, :, :], y_test[:10]) ) ###Output Epoch 1/5 1/1 [==============================] - 2s 2s/step - loss: 0.6402 - accuracy: 0.7000 - val_loss: 0.6825 - val_accuracy: 0.5000 Epoch 2/5 1/1 [==============================] - 0s 99ms/step - loss: 0.7146 - accuracy: 0.7000 - val_loss: 0.6835 - val_accuracy: 0.5000 Epoch 3/5 1/1 [==============================] - 0s 98ms/step - loss: 0.6180 - accuracy: 0.8000 - val_loss: 0.6820 - val_accuracy: 0.5000 Epoch 4/5 1/1 [==============================] - 0s 128ms/step - loss: 0.6396 - accuracy: 0.8000 - val_loss: 0.6816 - val_accuracy: 0.5000 Epoch 5/5 1/1 [==============================] - 0s 138ms/step - loss: 0.5663 - accuracy: 0.8000 - val_loss: 0.6820 - val_accuracy: 0.5000 ###Markdown 9. Load pre-trained model and scoreNow we'll load a pre-trained model that has the architecture we specified above and was trained for 200 epochs on the full train and test sets we created above.Let's use the evaluate method to see how well the model did at classifying bumble bees and honey bees for the test and validation sets. Recall that accuracy is the number of correct predictions divided by the total number of predictions. Given that our classes are balanced, a model that predicts 1.0 for every image would get an accuracy around 0.5.Note: it may take a few seconds to load the model. Recall that our model has over 3 million parameters (weights), which are what's being loaded. ###Code # load pre-trained model pretrained_cnn = keras.models.load_model('datasets/pretrained_model.h5') # evaluate model on test set score = pretrained_cnn.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) print("") # evaluate model on holdout set eval_score = pretrained_cnn.evaluate(x_eval, y_eval, verbose=0) # print loss score print('Eval loss:', eval_score[0]) # print accuracy score print('Eval accuracy:', eval_score[1]) ###Output WARNING:tensorflow:Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer. Test loss: 0.6423928737640381 Test accuracy: 0.6641509532928467 Eval loss: 0.654895007610321 Eval accuracy: 0.6495468020439148 ###Markdown 10. Visualize model training historyIn addition to scoring the final iteration of the pre-trained model as we just did, we can also see the evolution of scores throughout training thanks to the History object. We'll use the pickle library to load the model history and then plot it.Notice how the accuracy improves over time, eventually leveling off. Correspondingly, the loss decreases over time. Plots like these can help diagnose overfitting. If we had seen an upward curve in the validation loss as times goes on (a U shape in the plot), we'd suspect that the model was starting to memorize the test set and would not generalize well to new data. ###Code # load history with open('datasets/model_history.pkl', 'rb') as f: pretrained_cnn_history = pickle.load(f) # print keys for pretrained_cnn_history dict print(pretrained_cnn_history) fig = plt.figure(1) plt.subplot(211) # plot the validation accuracy plt.plot(pretrained_cnn_history['val_acc']) plt.title('Validation accuracy and loss') plt.ylabel('Accuracy') plt.subplot(212) # plot the validation loss plt.plot(pretrained_cnn_history['val_loss'], 'r') plt.xlabel('Epoch') plt.ylabel('Loss value'); ###Output {'val_loss': [0.6973548315606027, 0.6949519305858972, 0.6938502622100542, 0.6938419162102465, 0.6921774981156835, 0.691008841316655, 0.6904466395108205, 0.6908388506691411, 0.690298109234504, 0.6896439885193447, 0.6890573636540827, 0.6878997674528158, 0.6877660085570138, 0.687450434126944, 0.6864860181538563, 0.6857865929603577, 0.6854122814142479, 0.6852205658858677, 0.6844912373794699, 0.6839377567453204, 0.6835917376122385, 0.6830667650924539, 0.68289057038865, 0.6822199160197996, 0.681749959036989, 0.6813096154410884, 0.6808326183624988, 0.680350206258162, 0.6798245346771097, 0.6794466407793873, 0.6790301604090997, 0.6786370871202001, 0.6780787004614776, 0.6774498498664712, 0.6773286668759472, 0.6768853522696585, 0.6764395120008937, 0.6764187572137365, 0.6755043072520562, 0.6755023447972424, 0.6747962701995418, 0.6748723560909056, 0.6741743521870307, 0.6736826048707062, 0.6730284241010558, 0.6731890658162675, 0.6724795757599596, 0.6720847644895878, 0.671594364013312, 0.6708701347405056, 0.6702886750113289, 0.6700042987769504, 0.6692965521002715, 0.6689484994366484, 0.6692490910584072, 0.6684379280738111, 0.6680986305452743, 0.6672339358419742, 0.666900885105133, 0.6669266691747702, 0.666627643468245, 0.6668524751123392, 0.666532779639622, 0.6660595893859863, 0.6663234141637694, 0.6640820010653082, 0.6647802271932925, 0.6644656390514014, 0.6646909866692885, 0.6635346293449402, 0.6620154612469223, 0.661884997475822, 0.6612221713336008, 0.6607731328820282, 0.6604519014088612, 0.6599598081606739, 0.6594767844901894, 0.6595204045187752, 0.6589159349225602, 0.6580378401954219, 0.657763165347981, 0.6577120792190984, 0.6575329078818267, 0.6575919214284645, 0.6567454367313745, 0.6560180893484152, 0.6556380136957708, 0.6553035230006812, 0.6548657367814262, 0.6558716839214541, 0.6542430731485475, 0.6548173274634019, 0.6558191562598606, 0.6545853187453072, 0.6537650699885387, 0.6536643808742739, 0.6533196827150741, 0.6523577777844555, 0.6521409934421756, 0.6530955406854737, 0.651645087296108, 0.6512778952436628, 0.6513771554209151, 0.6514387459125159, 0.6538986905565801, 0.6531333945832163, 0.650129293495754, 0.6499695532726791, 0.6503407127452346, 0.650624522398103, 0.65003444653637, 0.6515556463655435, 0.6493381648693445, 0.6494280959075351, 0.6493060352667323, 0.6492863889010447, 0.6502830435644905, 0.6489436887345224, 0.6491816480204744, 0.6502597858320992, 0.6487753449745898, 0.6473832699487794, 0.647148665617097, 0.646953526532875, 0.6469740089380516, 0.6467888254039692, 0.6467267315342741, 0.6470954229246896, 0.6465090409764703, 0.6459426726935045, 0.6457727976565091, 0.645441040227998, 0.6455394717882265, 0.6455985195231888, 0.6491911600220878, 0.646003349322193, 0.6484261967101187, 0.645657229423523, 0.6448964737496286, 0.6450504766320283, 0.6448395852772695, 0.6452005343617133, 0.64511269938271, 0.6451621759612606, 0.6450724945878082, 0.6446082796690599, 0.6447435415016031, 0.6443353270584682, 0.6451325706715854, 0.6462242702268205, 0.6450359981015044, 0.6447009635421465, 0.6459125586275785, 0.6466515871713746, 0.6458257216327595, 0.6439035474129443, 0.6433645165191506, 0.6434951532561825, 0.645443630443429, 0.643107929769552, 0.6426359475783582, 0.642248886036423, 0.6439068164465562, 0.642824750351456, 0.6428220254070354, 0.6424781943267246, 0.6425938871671568, 0.642436413719969, 0.6431071866233394, 0.6423645194971337, 0.642575939646307, 0.6423488088373868, 0.6437250643406274, 0.6419217568523479, 0.6417660033927773, 0.6420647160062251, 0.6419619839146452, 0.647363669242499, 0.643349750986639, 0.643465511753874, 0.643357885333727, 0.6419335916357221, 0.641562087805766, 0.6432529512441383, 0.642104346347305, 0.641738914993574, 0.6426482614481224, 0.6429501128646563, 0.6422279301679359, 0.6441797362183624, 0.6428858093495639, 0.643737822883534, 0.6434570618395535, 0.6422331533342037, 0.6422527115300016, 0.6465478762140814, 0.642058812447314, 0.6425080216155862, 0.6423455717428675, 0.6423929070526699], 'val_acc': [0.4811320752467749, 0.49056603751092587, 0.49056603751092587, 0.48679245372988145, 0.5150943405223343, 0.5358490575034663, 0.5471698122204475, 0.5037735846807372, 0.5169811327502413, 0.5245283016618693, 0.53207547147319, 0.5603773582656428, 0.5339622639260202, 0.5377358488316806, 0.5660377365238262, 0.5679245289766564, 0.5584905667125054, 0.5566037733599825, 0.5584905667125054, 0.5584905667125054, 0.5622641516181658, 0.5660377365238262, 0.5679245289766564, 0.5698113214294865, 0.5754716987879771, 0.5811320761464677, 0.5886792459577884, 0.5792452839185607, 0.5773584903411145, 0.5792452839185607, 0.5867924535049582, 0.5830188685992979, 0.5792452827939447, 0.5811320752467749, 0.5905660384106186, 0.584905661052128, 0.5867924526052655, 0.5924528297388328, 0.5962264146444932, 0.584905661052128, 0.5981132082219394, 0.594339623316279, 0.6000000006747696, 0.592452829963756, 0.5886792452830188, 0.6037735855804299, 0.5943396235412022, 0.5867924528301887, 0.5924528301886792, 0.5981132073222466, 0.5962264148694164, 0.5962264148694164, 0.5962264148694164, 0.5943396235412022, 0.6113207553917507, 0.5962264148694164, 0.6018867933525229, 0.5962264148694164, 0.6075471695863975, 0.6150943393977183, 0.6132075469448881, 0.6037735855804299, 0.6094339620392277, 0.6113207544920579, 0.6075471695863975, 0.6150943393977183, 0.6094339629389205, 0.6113207553917507, 0.6075471695863975, 0.6094339629389205, 0.6169811318505485, 0.622641509209039, 0.6207547167562089, 0.6207547178808248, 0.6283018865675296, 0.6132075469448881, 0.6339622639260202, 0.6264150950143922, 0.6264150941146994, 0.622641510333655, 0.6283018876921456, 0.6358490563788504, 0.6339622639260202, 0.6207547176559016, 0.6301886790203598, 0.6377358499562965, 0.6377358499562965, 0.6377358488316806, 0.6396226412845107, 0.6169811327502412, 0.6339622639260202, 0.6320754723728828, 0.6226415101087318, 0.6301886799200526, 0.6377358497313733, 0.6320754723728828, 0.6358490572785431, 0.6396226412845107, 0.6415094348619569, 0.6301886799200526, 0.6433962270898639, 0.6452830195426941, 0.6415094346370337, 0.6339622648257129, 0.6264150950143922, 0.6264150950143922, 0.6452830195426941, 0.6415094346370337, 0.6377358497313733, 0.6396226421842035, 0.6433962270898639, 0.6339622648257129, 0.6396226421842035, 0.6415094346370337, 0.6415094346370337, 0.6433962270898639, 0.6377358497313733, 0.6377358497313733, 0.6415094346370337, 0.6377358497313733, 0.6377358497313733, 0.6396226412845107, 0.6396226412845107, 0.6396226412845107, 0.6396226421842035, 0.6396226421842035, 0.6377358497313733, 0.6415094346370337, 0.6339622648257129, 0.6433962261901711, 0.6415094337373409, 0.6433962261901711, 0.6528301893540148, 0.6415094346370337, 0.6377358497313733, 0.6358490572785431, 0.6320754723728828, 0.6396226421842035, 0.6471698119955243, 0.6415094346370337, 0.6452830195426941, 0.6471698110958315, 0.6415094346370337, 0.6415094346370337, 0.6433962270898639, 0.6490566035486617, 0.6415094346370337, 0.6471698119955243, 0.6452830195426941, 0.6396226421842035, 0.6433962270898639, 0.6415094346370337, 0.6339622648257129, 0.6396226421842035, 0.6396226421842035, 0.6528301884543221, 0.6509433969011846, 0.6471698119955243, 0.6433962270898639, 0.6452830195426941, 0.6566037742596752, 0.6528301893540148, 0.6433962270898639, 0.6509433969011846, 0.6528301893540148, 0.6566037742596752, 0.6471698119955243, 0.6509433969011846, 0.6433962270898639, 0.6433962270898639, 0.6452830195426941, 0.6452830195426941, 0.6415094346370337, 0.6452830195426941, 0.6433962270898639, 0.6528301893540148, 0.6622641516181658, 0.6358490572785431, 0.6415094346370337, 0.6415094346370337, 0.6433962270898639, 0.6433962270898639, 0.6566037742596752, 0.6415094346370337, 0.6471698119955243, 0.6584905667125054, 0.6471698119955243, 0.654716981806845, 0.6566037742596752, 0.6377358497313733, 0.6528301893540148, 0.6471698119955243, 0.6490566044483544, 0.6660377365238261, 0.664150944070996, 0.6415094346370337, 0.6584905667125054, 0.6603773591653356, 0.664150944070996, 0.664150944070996], 'loss': [0.69763381393981, 0.7105913674846424, 0.6981844531482586, 0.6908804896049211, 0.7002310421397677, 0.6890532414970302, 0.69824444360721, 0.6925303032539532, 0.6882943342643006, 0.6847970917877303, 0.6892719542393919, 0.6811439971454808, 0.6804057132701705, 0.6962115321153349, 0.690611312022576, 0.6841962133049213, 0.6866795372542036, 0.6801165358115264, 0.6866726247690064, 0.6759685026892798, 0.6863591304192176, 0.6773401813729564, 0.6787760719224755, 0.6762892521946007, 0.6749107879768554, 0.6734781117186721, 0.6806472305391655, 0.6796143139145263, 0.6746330553616431, 0.672908148654322, 0.6722468303100759, 0.6770635078112686, 0.6666129841028817, 0.6671468047295963, 0.6698146642832858, 0.6700134639631755, 0.6683179240058379, 0.6779207236228645, 0.6643883649067199, 0.6650336900394772, 0.6696022650482977, 0.6614861129812455, 0.6600286917006984, 0.658476460175436, 0.651583011935067, 0.6634568578809086, 0.6586981167709091, 0.6611106627224974, 0.6577388405198558, 0.6600017563391753, 0.6549731925937898, 0.6607008828190807, 0.648250998494604, 0.6523348943131869, 0.6567776120475682, 0.6565243668694514, 0.6454335395505119, 0.6487699701235845, 0.6457342886533893, 0.648123957244926, 0.6571452222285324, 0.6447995707937628, 0.6410667152903871, 0.648511490034125, 0.6413717778754264, 0.6411334361885715, 0.6383996775742588, 0.6369847764283381, 0.6404116564861312, 0.6337847475897319, 0.6443416303373676, 0.6443707319105709, 0.63334813597524, 0.6325182960583613, 0.644397125117866, 0.6382584153177759, 0.6337508021808061, 0.6429568907201365, 0.6289279573652212, 0.6258862121559181, 0.6261933187368087, 0.6220835613422706, 0.6342826009848981, 0.6247802379002035, 0.6279644746648108, 0.6283307004846811, 0.6256605759544902, 0.6217865353123687, 0.6224836437278257, 0.6191687452387239, 0.6243988964777125, 0.6270894629206664, 0.6257247989445286, 0.6237024566414678, 0.6189798515050439, 0.6149748739649038, 0.6257139176769184, 0.6176760356032382, 0.6276540018120149, 0.6174791526674172, 0.619012543788323, 0.6106568153238237, 0.6193357128062735, 0.6225488496307767, 0.6149553010923715, 0.604280179039978, 0.6226016235682282, 0.60585598874964, 0.615926036167265, 0.5998287342235089, 0.6156509514865178, 0.6121839409668238, 0.6092188401901707, 0.6055421651837805, 0.5941866580655265, 0.6019582847628888, 0.6077927731325167, 0.6002118931742665, 0.5953195139408712, 0.5982312272205329, 0.6059641718563787, 0.5947810216779817, 0.5969902821928075, 0.5942135366034538, 0.5936596707768579, 0.5954421294141387, 0.6033372079288825, 0.5962651069047021, 0.5853695799543759, 0.5837327086309166, 0.5885515291934356, 0.5971618210532776, 0.5978993003034652, 0.5922351438190417, 0.5812147997817656, 0.5956043443944385, 0.5938164893946462, 0.5986057903246501, 0.5866343139249169, 0.5980406838738151, 0.582888429657959, 0.5848387156579112, 0.5950872894343633, 0.5880918234473971, 0.6022420950224541, 0.5841102724869044, 0.5885366174341902, 0.5770780989832115, 0.581971282014931, 0.586966152524888, 0.5831243059072939, 0.5824071075696813, 0.5852913161244098, 0.5744164795773327, 0.5830465313916249, 0.5751700151011743, 0.5795485738848076, 0.5775529757273603, 0.5733420762408427, 0.5730965090128693, 0.5781392470885345, 0.5767807905640909, 0.5743001880591634, 0.5669449113807341, 0.5652631724918024, 0.5751946515123, 0.5611410435632078, 0.5710295546250265, 0.5677784168705212, 0.5701494404827662, 0.5722405681994944, 0.5636668739673619, 0.575879023550137, 0.5608540414861893, 0.5681426379298803, 0.5614112029310132, 0.569335764060255, 0.5594246730978822, 0.5643792042666095, 0.560409062672383, 0.5510438877577138, 0.5599546730894158, 0.5587624121283523, 0.5680901284927378, 0.5534953542645612, 0.5614012778932781, 0.5437359361901108, 0.5572455290512359, 0.5504018927432249, 0.5644353377112524, 0.5447469376600705, 0.5559871595864939, 0.5486648840381098, 0.5518640301943126, 0.5540478557987141, 0.5455840836181617, 0.5499462104234539, 0.52293714486335, 0.5501674566262907, 0.5585674545052374], 'acc': [0.5233291300744156, 0.4854981078476202, 0.4930643128116074, 0.5321563691239038, 0.504413619242881, 0.5321563682971054, 0.5119798228539254, 0.5245901638592627, 0.5359394704408633, 0.5510718786024957, 0.5561160147941728, 0.5674653223153171, 0.5624211861236399, 0.5145018914759084, 0.5485498115965277, 0.5435056740519078, 0.5346784358667996, 0.5548549818361241, 0.5548549804831814, 0.5800756615918943, 0.5498108445545764, 0.566204286914455, 0.5687263551606206, 0.5737704927052405, 0.5926860027099768, 0.5926860025972316, 0.5725094579808498, 0.5498108444418311, 0.5813366957525588, 0.5775535933457286, 0.581336696015631, 0.5510718791662219, 0.5977301396157071, 0.5977301389392358, 0.5813366957525588, 0.5977301389392358, 0.5813366968424293, 0.5624211847706971, 0.5964691054174608, 0.6103404792681016, 0.5674653211127013, 0.6166456501841695, 0.6116015127898766, 0.6242118534193964, 0.645649432609842, 0.5838587649382679, 0.6368221945750608, 0.595208070279671, 0.6040353086526878, 0.6153846156101058, 0.6343001258027509, 0.6279949567657701, 0.635561161053286, 0.6166456488312267, 0.6065573778008152, 0.6040353083520339, 0.641866330616411, 0.6557377043167248, 0.617906683029473, 0.6292559906633625, 0.6103404785916303, 0.6242118534193964, 0.6355611603768145, 0.6229508197472946, 0.6481715008184258, 0.6380832283223261, 0.6380832277961818, 0.6506935696283175, 0.6582597739158332, 0.6406052964181648, 0.6418663312928824, 0.6544766712459309, 0.6544766716217484, 0.6532156368973577, 0.6380832289987975, 0.6431273641006041, 0.6481715015324788, 0.6292559890849293, 0.6658259775268776, 0.6721311482926184, 0.6645649441554297, 0.6708701142446992, 0.6595208066108097, 0.6595208074376081, 0.6456494327225871, 0.6519546023232939, 0.6595208071369542, 0.6708701137185548, 0.6519546023232939, 0.6582597737655063, 0.6557377058199946, 0.6696090803471069, 0.6746532158623126, 0.6544766705694596, 0.6746532158623126, 0.6746532165387841, 0.6633039087545677, 0.6645649428024869, 0.6481715013821518, 0.6645649436292853, 0.6645649436292853, 0.675914249910232, 0.6494325343777824, 0.6519546024736209, 0.6834804535212763, 0.6948297613054928, 0.6733921823405378, 0.6973518292510044, 0.6771752846346226, 0.6860025229700577, 0.6645649433286314, 0.6746532158623126, 0.6607818414855274, 0.6872635558153611, 0.6872635561911786, 0.6633039089048947, 0.675914249910232, 0.6948297604786944, 0.7061790660080062, 0.6683480447959178, 0.6696090801967799, 0.6759142490834336, 0.6796973513775184, 0.6973518280483886, 0.6885245897129535, 0.6860025217298602, 0.6796973527304612, 0.6834804540474207, 0.6960907940004692, 0.7074401005820699, 0.6935687259046307, 0.691046657959119, 0.6746532158623126, 0.6986128627727792, 0.7049180336888471, 0.682219419623684, 0.6960907948272677, 0.6847414875691956, 0.7061790666844776, 0.6872635564918326, 0.7074401004317429, 0.693568726581102, 0.6935687257543037, 0.6872635563415056, 0.6847414875691956, 0.6998738974971699, 0.696090795203085, 0.7150063053957301, 0.7099621686779085, 0.7074401016343587, 0.6986128634492507, 0.69482975995255, 0.708701135832605, 0.7225725096832458, 0.6986128627727792, 0.7301387139707616, 0.7087011351561335, 0.7023959638642483, 0.7112232027258277, 0.7162673389175049, 0.7250945779294113, 0.7187894082159593, 0.7263556118270037, 0.7061790665341506, 0.7200504415874073, 0.6973518280483886, 0.7276166465513944, 0.7175283734915686, 0.6973518280483886, 0.7124842371495645, 0.708701135832605, 0.7225725095329188, 0.6973518288751869, 0.7200504409109358, 0.7099621697301973, 0.7250945779294113, 0.7087011353064605, 0.7238335433553477, 0.7200504415874073, 0.706179067210622, 0.7213114753346725, 0.7200504414370803, 0.73518284911015, 0.7124842366234201, 0.7200504415874073, 0.7137452713478107, 0.7250945779294113, 0.7288776795470249, 0.7326607820666001, 0.7049180330123757, 0.7238335426788763, 0.7225725103597171, 0.738965952982668, 0.7162673389175049, 0.7288776805993137, 0.75283732578102, 0.7351828508389101, 0.7717528382285697, 0.7313997481690079, 0.7250945784555558]} ###Markdown 11. Generate predictionsPreviously, we calculated an overall score for our pre-trained model on the validation set. To end this notebook, let's access probabilities and class predictions for individual images using the .predict() method and np.round().We now have a deep learning model that can be used to identify honey bees and bumble bees in images! The next step is to explore transfer learning, which harnesses the prediction power of models that have been trained on far more images than the mere 1600 in our dataset. ###Code # predicted probabilities for x_eval # ... YOUR CODE FOR TASK 11 ... y_proba = pretrained_cnn.predict(x_eval) print("First five probabilities:") print(y_proba[:5]) print("") # predicted classes for x_eval # ... YOUR CODE FOR TASK 11 ... y_pred = np.round(y_proba).astype('int') print("First five class predictions:") print(y_pred[:5]) print("") ###Output First five probabilities: [[0.4059349 ] [0.39441615] [0.3882965 ] [0.39129484] [0.42224088]] First five class predictions: [[0] [0] [0] [0] [0]]
notebooks/Stackoverflow_Survey_2017.ipynb
###Markdown Analysis of the Stackoverflow Survey of 2017In this notebook, some analysis on the stackoverflow survey of 2017 will be done.The analysis is done by following the steps in the table of contents. Table of ContentsI. [Data gathering](Gathering)II. [Data assess - Which questions should be answered?](Assess)III. [Data cleaning](Clean)VI. [Data Modelling and Analysis](Modelling) ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline from sklearn.preprocessing import LabelEncoder from sklearn.metrics import f1_score from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline import seaborn as sns from collections import defaultdict ###Output _____no_output_____ ###Markdown Part I : Data gatheringStackoverflow performs an anual survey, this data is available [here](https://insights.stackoverflow.com/survey/). The survey covers a wide range of topics. In order to find interesting questions for the analysis of the data, the existing statements were read crosswise.From the previous work with this data set in the udacity classes, no big explanatory analysis has been done. ###Code df = pd.read_csv('../data/survey_results_public.csv') df_schema = pd.read_csv('../data/survey_results_schema.csv') # Show df to get an idea of the data df.head() df_schema.head() assert df.shape[1] == df_schema.shape[0] print('The survey contained {} questions. In total, there are {} survey responses.'.format(df.shape[1], df.shape[0])) ###Output The survey contained 154 questions. In total, there are 19102 survey responses. ###Markdown Part II : Data assess - Which questions should be answered?The schema and the data are used to find interessting questions which should be answered in the following analysis. ###Code df_schema.head(n=10) ###Output _____no_output_____ ###Markdown **Questions which will be answered in the following parts:**|| Question | Helpful columns || ---| :--- | :---||1| Exists a correlation between "Overpaid" and the programming experience? | YearsProgram, Overpaid ||2| Is there a programing language specific correlation between "OtherPeoplesCode - Maintaining other people's code is a form of torture" and "EnjoyDebugging -I enjoy debugging code"?| HaveWorkedLanguage, OtherPeoplesCode, EnjoyDebugging||3|How many people, who program in Python, follow the PEP8 guidelines and use spaces instead of tabs?|HaveWorkedLanguage, TabsSpaces|The questions have been found by looking at the df_schema in detail to find interesting questions. Part III: Data cleaningIn [Part II](Assess) the needed columns are defined. In the following, the data is preperaded for each question. Only the needed columns are modified and cleaned.The column *HaveWorkedLanguage* is split into separated columns, one for each programming language. This information is needed for question 2 and 3, so it is done once at the beginning on the original data.Here are short cuts for the different questions:- [Question 1](Clean-q1)- [Question 2](Clean-q2)- [Question 3](Clean-q3) ###Code # HAVE WORKED LANGUAGE - Extract available values # convert all 'HaveWorkedLanguage' entries into lists: df_q3.HaveWorkedLanguage.str.split(';') df['HaveWorkedLanguageList'] = df.HaveWorkedLanguage.str.split(';') #flatten all lists to one prog_languages = set() for entry in df.HaveWorkedLanguageList: if isinstance(entry, float): # list(entry) is empty, so NaN is returned continue else: [prog_languages.add(elem.strip()) for elem in entry] # convert available programming languages in columns print(df.columns) df[list(prog_languages)] = pd.DataFrame(data = np.zeros((df.shape[0],len(prog_languages))), columns = list(prog_languages)) print(df.columns) # Convert HaveWorkedList into binary values for the different programming languages for idx, prog_lang_list in df.HaveWorkedLanguageList.iteritems(): if isinstance(prog_lang_list, float): # list(entry) is empty, so NaN is returned continue else: for entry in prog_lang_list: entry = entry.strip() df.loc[df.index[idx], entry] = 1 # Pick the needed columns for each question df_q1 = df[['YearsProgram', 'Overpaid']] df_q2 = df[list(prog_languages)+['OtherPeoplesCode', 'EnjoyDebugging']] df_q3 = df[['Python', 'TabsSpaces']] ###Output _____no_output_____ ###Markdown Data cleaning for question 1: Exists a correlation between "Overpaid" and the programming experience? ###Code # Calculation makes only sense if a value for *Overpaid* is available df_q1 = df_q1.dropna(subset = ['Overpaid']) print('The number of survey responsdents reduced from {} to {}.'.format(df.shape[0], df_q1.shape[0])) ## YEARS PROGRAM # Create a nested dictionary which matches the years of experience with the overpaid rate q1_dict_experience = defaultdict(dict) for key, data in df_q1.groupby(by=['YearsProgram', 'Overpaid']): experience = key[0] overpaid_target = key[1] value = data.YearsProgram.count() q1_dict_experience[experience][overpaid_target] = value print(q1_dict_experience) ###Output defaultdict(<class 'dict'>, {'1 to 2 years': {'Greatly underpaid': 17, 'Neither underpaid nor overpaid': 31, 'Somewhat overpaid': 4, 'Somewhat underpaid': 31}, '10 to 11 years': {'Greatly overpaid': 2, 'Greatly underpaid': 34, 'Neither underpaid nor overpaid': 113, 'Somewhat overpaid': 20, 'Somewhat underpaid': 135}, '11 to 12 years': {'Greatly overpaid': 1, 'Greatly underpaid': 21, 'Neither underpaid nor overpaid': 72, 'Somewhat overpaid': 9, 'Somewhat underpaid': 95}, '12 to 13 years': {'Greatly overpaid': 1, 'Greatly underpaid': 17, 'Neither underpaid nor overpaid': 69, 'Somewhat overpaid': 13, 'Somewhat underpaid': 92}, '13 to 14 years': {'Greatly underpaid': 14, 'Neither underpaid nor overpaid': 68, 'Somewhat overpaid': 14, 'Somewhat underpaid': 60}, '14 to 15 years': {'Greatly overpaid': 3, 'Greatly underpaid': 21, 'Neither underpaid nor overpaid': 96, 'Somewhat overpaid': 13, 'Somewhat underpaid': 128}, '15 to 16 years': {'Greatly overpaid': 1, 'Greatly underpaid': 23, 'Neither underpaid nor overpaid': 76, 'Somewhat overpaid': 14, 'Somewhat underpaid': 110}, '16 to 17 years': {'Greatly overpaid': 1, 'Greatly underpaid': 12, 'Neither underpaid nor overpaid': 40, 'Somewhat overpaid': 13, 'Somewhat underpaid': 59}, '17 to 18 years': {'Greatly overpaid': 1, 'Greatly underpaid': 17, 'Neither underpaid nor overpaid': 58, 'Somewhat overpaid': 13, 'Somewhat underpaid': 68}, '18 to 19 years': {'Neither underpaid nor overpaid': 30, 'Somewhat overpaid': 6, 'Somewhat underpaid': 31}, '19 to 20 years': {'Greatly overpaid': 1, 'Greatly underpaid': 10, 'Neither underpaid nor overpaid': 59, 'Somewhat overpaid': 10, 'Somewhat underpaid': 57}, '2 to 3 years': {'Greatly overpaid': 3, 'Greatly underpaid': 32, 'Neither underpaid nor overpaid': 64, 'Somewhat overpaid': 13, 'Somewhat underpaid': 76}, '20 or more years': {'Greatly overpaid': 2, 'Greatly underpaid': 82, 'Neither underpaid nor overpaid': 457, 'Somewhat overpaid': 83, 'Somewhat underpaid': 478}, '3 to 4 years': {'Greatly overpaid': 3, 'Greatly underpaid': 39, 'Neither underpaid nor overpaid': 68, 'Somewhat overpaid': 17, 'Somewhat underpaid': 88}, '4 to 5 years': {'Greatly overpaid': 2, 'Greatly underpaid': 30, 'Neither underpaid nor overpaid': 92, 'Somewhat overpaid': 21, 'Somewhat underpaid': 143}, '5 to 6 years': {'Greatly overpaid': 1, 'Greatly underpaid': 33, 'Neither underpaid nor overpaid': 91, 'Somewhat overpaid': 19, 'Somewhat underpaid': 157}, '6 to 7 years': {'Greatly underpaid': 41, 'Neither underpaid nor overpaid': 98, 'Somewhat overpaid': 16, 'Somewhat underpaid': 120}, '7 to 8 years': {'Greatly overpaid': 4, 'Greatly underpaid': 46, 'Neither underpaid nor overpaid': 83, 'Somewhat overpaid': 14, 'Somewhat underpaid': 130}, '8 to 9 years': {'Greatly underpaid': 30, 'Neither underpaid nor overpaid': 86, 'Somewhat overpaid': 11, 'Somewhat underpaid': 99}, '9 to 10 years': {'Greatly overpaid': 1, 'Greatly underpaid': 37, 'Neither underpaid nor overpaid': 141, 'Somewhat overpaid': 24, 'Somewhat underpaid': 178}, 'Less than a year': {'Greatly overpaid': 1, 'Greatly underpaid': 3, 'Neither underpaid nor overpaid': 7, 'Somewhat underpaid': 10}}) ###Markdown Cleaning for question 2: Is there a programing language specific correlation between "OtherPeoplesCode - Maintaining other people's code is a form of torture" and "EnjoyDebugging -I enjoy debugging code"? ###Code # Calculation makes only sense if both values 'OtherPeoplesCode' and 'EnjoyDebugging' are available df_q2 = df_q2.dropna(subset = ['EnjoyDebugging', 'OtherPeoplesCode'], how = 'any') #df_q2 = df_q2.reset_index(drop = True) print('The number of survey responsdents reduced from {} to {}.'.format(df.shape[0], df_q2.shape[0])) # OTHER PEOPLE CODE tmp = pd.get_dummies(df_q2.OtherPeoplesCode, prefix='OtherPeoplesCode', prefix_sep='_') df_q2[tmp.columns] = tmp df_q2 = df_q2.drop(labels=['OtherPeoplesCode'], axis = 1) # ENJOY DEBUGGING tmp = pd.get_dummies(df_q2.EnjoyDebugging, prefix='EnjoyDebugging', prefix_sep='_') df_q2[tmp.columns] = tmp df_q2 = df_q2.drop(labels=['EnjoyDebugging'], axis = 1) df_q2.head() ###Output _____no_output_____ ###Markdown Cleaning for question 3: How many people, who program in Python, follow the PEP8 guidelines and use spaces instead of tabs? ###Code # Extract only the people who programm in Python print(df_q3.head()) df_q3 = df_q3[df_q3.Python == 1] print(df_q3.head()) # TABS SPACES tmp = pd.get_dummies(df_q3.TabsSpaces, prefix='TabsSpaces', prefix_sep='_') #print(tmp.shape) df_q3[tmp.columns] = tmp #print(df_q1.shape) df_q3 = df_q3.drop(labels=['TabsSpaces'], axis = 1) ###Output _____no_output_____ ###Markdown Part VI: Data Modelling and AnalysisHere are short cuts for the different questions:- [Question 1](Model-q1)- [Question 2](Model-q2)- [Question 3](Model-q3) Analysis for question 1: Exists a correlation between *Overpaid* and the programming experience? ###Code df_q1.head() # To get comparable values, the values have to be set into relation to the total entries in this YearsProgram value for key1, elem in df_q1.groupby(by=['YearsProgram'])['Overpaid']: for key2 in q1_dict_experience[key1]: q1_dict_experience[key1][key2] = (q1_dict_experience[key1][key2]/elem.count())*100 key2 = 'Greatly underpaid' greatly_underpaid = [q1_dict_experience[key1][key2] if key2 in q1_dict_experience[key1].keys() else 0 for key1 in q1_dict_experience.keys()] key2 = 'Somewhat underpaid' underpaid = [q1_dict_experience[key1][key2] if key2 in q1_dict_experience[key1].keys() else 0 for key1 in q1_dict_experience.keys()] key2 = 'Neither underpaid nor overpaid' neither = [q1_dict_experience[key1][key2] if key2 in q1_dict_experience[key1].keys() else 0 for key1 in q1_dict_experience.keys()] key2 = 'Somewhat overpaid' overpaid = [q1_dict_experience[key1][key2] if key2 in q1_dict_experience[key1].keys() else 0 for key1 in q1_dict_experience.keys()] key2 = 'Greatly overpaid' greatly_overpaid = [q1_dict_experience[key1][key2] if key2 in q1_dict_experience[key1].keys() else 0 for key1 in q1_dict_experience.keys()] bottom_p3 = [x + y for x, y in zip(underpaid, greatly_underpaid)] bottom_p4 = [x + y for x, y in zip(bottom_p3, neither)] bottom_p5 = [x + y for x, y in zip(bottom_p4, overpaid)] N = len(greatly_overpaid) ind = np.arange(N) # the x locations for the groups width = 0.55 # the width of the bars: can also be len(x) sequence p1 = plt.bar(ind, greatly_underpaid, width) p2 = plt.bar(ind, underpaid, width, bottom=greatly_underpaid) p3 = plt.bar(ind, neither, width, bottom=bottom_p3) p4 = plt.bar(ind, overpaid, width, bottom=bottom_p4) p5 = plt.bar(ind, greatly_overpaid, width, bottom=bottom_p5) plot_xticks = [elem.split(' years')[0] for elem in list(q1_dict_experience.keys())] plt.ylabel('Percentage') plt.title('Scores by group and gender') plt.xticks(ind, plot_xticks, rotation='vertical')#('0-5', '5-10', '10-15', '15+')) plt.xlabel('Years of experience') #plt.yticks(np.arange(0, 81, 10)) plt.legend((p1[0], p2[0], p3[0], p4[0], p5[0]), ('Greatly underpaid', 'Somewhat underpaid', 'Neither underpaid nor overpaid', 'Somewhat overpaid', 'Greatly overpaid'),loc='center left', bbox_to_anchor=(1, 0.5)) plt.show() ###Output _____no_output_____ ###Markdown Analysis for question 2: Is there a programing language specific correlation between "OtherPeoplesCode - Maintaining other people's code is a form of torture" and "EnjoyDebugging -I enjoy debugging code"? ###Code df_q2.head() target_columns = [elem for elem in df_q2.columns if elem not in list(prog_languages)] print(target_columns) corr_df_q2 = df_q2[target_columns] #print(corr_df_q2) corr = corr_df_q2.corr() #print(corr) plt.title('Independent of programming language') sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values) # Organizing plots fig, axs = plt.subplots(7,5, figsize=(20, 20), facecolor='w', edgecolor='k') #axs = axs.ravel() #print(target_columns) # for idx, language in enumerate(prog_languages): # Only users who use this language are regarded tmp = df_q2[df_q2[language] == 1] #print(tmp) corr_df_q2 = tmp[target_columns] corr = corr_df_q2.corr() plt.subplot(7,5,idx+1) # https://jakevdp.github.io/PythonDataScienceHandbook/04.08-multiple-subplots.html#plt.subplot:-Simple-Grids-of-Subplots plt.title(language + ' (Responses: '+str(tmp.shape[0])+')') sns.heatmap(corr, xticklabels=[], yticklabels=[]) ###Output _____no_output_____ ###Markdown Analysis for question 3: How many people, who program in Python, follow the PEP8 guidlines and use spaces instead of tabs? ###Code df_q3.head() ###Output _____no_output_____ ###Markdown This question aims at a percentage, not a prediction. Therefore, no attempt to apply a machine learning model was made. ###Code sum_spaces = df_q3.TabsSpaces_Spaces.sum() sum_tabs = df_q3.TabsSpaces_Tabs.sum() sum_both = df_q3.TabsSpaces_Both.sum() print('{} Stackoverflow users who programm in Python use Spaces, ' 'as it is suggested by the PEP8 guidelines. This are {:.2f}% of all python users in this survey.' .format(sum_spaces, (sum_spaces/df_q3.shape[0])*100)) print('{} of the survey participants which programm in Python have not ' 'heard about PEP8 yet and use tabs for indentation. This are {:.2f}%. Hopefully this number will decrease.' .format(sum_tabs,(sum_tabs/df_q3.shape[0])*100)) print('Only {:.2f}% of the Python programmers write a mixture of tabs and spaces. If they are only programming ' 'Python, this means that they use Python 2, because Python 3 does not allow ' 'a mixing of tabs and spaces anymore.' .format((sum_both/df_q3.shape[0])*100)) ###Output 2119 Stackoverflow users who programm in Python use Spaces, as it is suggested by the PEP8 guidelines. This are 50.46% of all python users in this survey. 1457 of the survey participants which programm in Python have not heard about PEP8 yet and use tabs for indentation. This are 34.70% Hopefully this number will decrease. Only 14.57% of the Python programmers write a mixture of tabs and spaces. If they are only programming Python, this means that they use Python 2, because Python 3 does not allow a mixing of tabs and spaces anymore.
notebooks/pods/datasets/mauna-loa.ipynb
###Markdown Datasets: Downloading Data from the Mauna Loa Observatory Open Data Science Initiative 28th May 2014 Neil D. LawrenceThis data set collection is from the Mauna Loa observatory which records atmospheric carbon levels. The data was used by [Rasmussen and Williams (2006)](http://www.gaussianprocess.org/gpml/chapters/RW5.pdf) to demonstrate hyperparameter setting in Gaussian processes. When first called, or if called with `refresh_data=True` the latest version of the data set is downloaded. Otherwise, the cached version of the data set is loaded from disk. ###Code import pods import pylab as plt %matplotlib inline data = pods.datasets.mauna_loa() ###Output _____no_output_____ ###Markdown Here, because I've downloaded the data before I have a cached version. To download a fresh version of the data I can set `refresh_data=True`. ###Code data = pods.datasets.mauna_loa(refresh_data=True) ###Output _____no_output_____ ###Markdown The data dictionary contains the standard keys 'X' and 'Y' which give a unidimensional regression problem. ###Code plt.plot(data['X'], data['Y'], 'rx') plt.xlabel('year') plt.ylabel('CO$_2$ concentration in ppm') ###Output _____no_output_____ ###Markdown Additionally there are keys `Xtest` and `Ytest` which provide test data. The number of points considered to be *training data* is controlled by the argument `num_train` argument, which defaults to 545. This number is chosen as it matches that used in the [Gaussian Processes for Machine Learning](http://www.gaussianprocess.org/gpml/chapters/RW5.pdf) book. Below we plot the test and training data. ###Code plt.plot(data['X'], data['Y'], 'rx') plt.plot(data['Xtest'], data['Ytest'], 'go') plt.xlabel('year') plt.ylabel('CO$_2$ concentration in ppm') ###Output _____no_output_____ ###Markdown Of course we have included the citation information for the data. ###Code print(data['citation']) ###Output _____no_output_____ ###Markdown And extra information about the data is included, as standard, under the keys `info` and `details`. ###Code print(data['info']) print() print(data['details']) ###Output _____no_output_____ ###Markdown And, importantly, for reference you can also check the license for the data: ###Code print(data['license']) ###Output _____no_output_____
Quassifier_OSU.ipynb
###Markdown This notebook is an example of typical classification codes for the OSU meeting QSO data-challenge. To use the KNN and BDT classifiers you will need the `sklearn`. For the last example (MLP) you will need the `keras` package. ###Code %pylab inline from astropy.table import Table from astropy.table import join import astropy.io.fits as fits import speclite #To downsample import os ###Output _____no_output_____ ###Markdown The data from DESI will be delivered in `brick` files. This function reads them. ###Code def readBricks(path_in,brick_name): hdus = [] for channel in 'brz': filename = 'brick-{}-{}.fits'.format(channel,brick_name) hdulist = fits.open(os.path.join(path_in,filename)) hdus.append(hdulist) return hdus hdus = readBricks(os.environ['FAKE_QSO_PATH'],'all-osu') print hdus[0][0].shape, hdus[1][0].shape, hdus[2][0].shape ###Output (3000, 4760) (3000, 4232) (3000, 4798) ###Markdown In this example we decided to downsample to have a more tractable input signal ###Code def downsample(camera,nspec,ndownsample, si=0): data = np.ones((nspec,len(hdus[camera][2].data[si:])), dtype=[('flux', float), ('ivar',float)]) data['flux']=hdus[camera][0].data[:,si:] data['ivar']=hdus[camera][1].data[:,si:] return speclite.downsample(data,ndownsample,axis=1,weight='ivar') flux_b = downsample(0,3000,20) flux_r = downsample(1,3000,23) flux_z = downsample(2,3000,35,si=3) print flux_b.shape, flux_r.shape, flux_z.shape qso_flux = np.hstack([flux_b,flux_r,flux_z]) print qso_flux.shape table_train = Table.read(os.path.join(os.environ['FAKE_QSO_PATH'],'training_table_all.fits.gz')) from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler #We use LabelEncoder that translates a label into an integer encoder = LabelEncoder() encoder.fit(table_train['TRUE_OBJTYPE']) encoded_Y = encoder.transform(table_train['TRUE_OBJTYPE']) #Now we have our data prepared #We use 750 samples to train our algorithms and the rest for testing ntrain=750 qso_class_train = encoded_Y[:ntrain] qso_class_test = encoded_Y[ntrain:len(encoded_Y)] qso_flux_train = qso_flux[:ntrain] qso_flux_test = qso_flux[ntrain:len(encoded_Y)] ###Output _____no_output_____ ###Markdown KNN classifier ###Code def quassifier_knn(kn): from sklearn.neighbors import KNeighborsClassifier efficiency = np.zeros(len(kn)) purity = np.zeros(len(kn)) i_best = 0 qso_fit_best = None for i, knn in enumerate(kn): knc = KNeighborsClassifier(n_neighbors=knn) knc.fit(qso_flux_train, qso_class_train) qso_fit_train = knc.predict(qso_flux_train) qso_fit = knc.predict(qso_flux_test) good = np.count_nonzero(qso_fit==qso_class_test)/(1.0*len(qso_class_test)) efficiency[i]=good if(good>qso_fit_best): qso_fit_best = good i_best = i plt.scatter(kn,efficiency) plt.xlabel('Number of neighbors') plt.ylabel('Efficiency') knc = KNeighborsClassifier(n_neighbors=kn[i_best]) knc.fit(qso_flux_train,qso_class_train) results = knc.predict(qso_flux) return results results = quassifier_knn(np.arange(2,10)) ###Output _____no_output_____ ###Markdown BDT classifier (adaboost) https://en.wikipedia.org/wiki/AdaBoost ###Code def quassify_adaboost(number_estimators): from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier efficiency = np.zeros(len(number_estimators)) purity = np.zeros(len(number_estimators)) i_best =0 qso_fit_best = None for i, ne in enumerate(number_estimators): bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME",n_estimators=ne) bdt.fit(qso_flux_train,qso_class_train) qso_fit = bdt.predict(qso_flux_test) good = np.count_nonzero(qso_fit==qso_class_test)/(1.0*len(qso_class_test)) efficiency[i]=good if(good>qso_fit_best): qso_fit_best = good i_best = i plt.scatter(number_estimators,efficiency) plt.xlabel('Number of estimators') plt.ylabel('Efficiency') bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME",n_estimators=number_estimators[i_best]) bdt.fit(qso_flux_train,qso_class_train) results = bdt.predict(qso_flux) return results predicted_adaboost = quassify_adaboost(np.arange(50,200,25)) plt.hist(predicted_adaboost); ###Output _____no_output_____ ###Markdown Dense ANN ###Code from keras.models import Sequential from keras.layers import Dense, Dropout from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold, KFold from sklearn.pipeline import Pipeline from keras.layers.recurrent import LSTM from keras.utils import np_utils dummy_y = np_utils.to_categorical(encoded_Y) seed = 7 numpy.random.seed(seed) def create_baseline(): model = Sequential() model.add(Dense(128, input_dim=qso_flux.shape[1], init='normal', activation='relu')) model.add(Dense(32, init='normal', activation='relu')) model.add(Dropout(0.25)) model.add(Dense(16, init='normal', activation='relu')) model.add(Dense(8, init='normal', activation='relu')) model.add(Dense(4, init='normal', activation='sigmoid')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model estimator = KerasClassifier(build_fn=create_baseline, nb_epoch=100, batch_size=100, verbose=0) kfold = KFold(n_splits=10, shuffle=True, random_state=seed) results = cross_val_score(estimator, qso_flux[0:len(encoded_Y)], dummy_y, cv=kfold) print("Results: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100)) ###Output _____no_output_____
assets/EMSE6574/Week1_Assignment.ipynb
###Markdown Practice Assignment for week 08/31 Question 1 : "Palindrome" script -- take any string, and find out if it is a palindrome ###Code import pandas x = 'test' y = '' for i in x: y = i + y print(y) if (x == y): print("Yes, it is a palindrome") else: print("No, it is not a palindrome") ###Output Yes, it is a palindrome ###Markdown Question 2: Create a huge dataset of fake data (or real data) using a list of dicts as a data structure. Iterate through that list; if a record matches some condition, print it Example 1: ###Code import random import string word = string.ascii_lowercase final_word = ''.join(random.choice(word) for i in range(3)) print(final_word) my_dict = {"A"+str(i):final_word for i in range(100,999)} #my_dict for i in list(my_dict.keys()): if i == "A100": print(i, my_dict[i]) ###Output A100 elh ###Markdown Example 2: ###Code new_dict = {} for i in range(200): new_dict['EMSE'+str(i)] = random.randrange(200) print(new_dict) for key, value in new_dict.items(): if value >= 4 and value < 9: print(key, value) ###Output _____no_output_____
EdX/Microsoft Courses/Microsoft: DAT203.2x - Principles of Machine Learning/Module 1/Demo/ClassificationDemo-R.ipynb
###Markdown Understanding Classification and Logistic Regression with R IntroductionThis notebook contains a short introduction to the basic principles of classification and logistic regression. A simple R simulation is used to illustrate these principles. Specifically, the following steps are performed:- A data set is created. The label has binary `TRUE` and `FALSE` labels. Values for two features are generated from two bivariate Normal distribion, one for each label class.- A plot is made of the data set, using color to show the two label classes. - A plot of a logistic function is computed. - For each of three data sets a logistic regression model is computed, scored and a plot created using color to show class and shape to show correct and incorrect scoring. Create the data setThe code in the cell below computes the two class data set. The feature values for each label level are computed from a bivariate Normal distribution. Run this code and examine the first few rows of the data frame. ###Code sim.logt.data = function(x1, y1, n1, sd1, x2, y2, n2, sd2){ wx1 = rnorm(n1, x1, sd1) wy1 = rnorm(n1, y1, sd1) z1 = rep(TRUE, n1) wx2 = rnorm(n2, x2, sd2) wy2 = rnorm(n2, y2, sd2) z2 = rep(FALSE, n2) df1 = data.frame(x = wx1, y = wy1, z = z1) df2 = data.frame(x = wx2, y = wy2, z = z2) rbind(df1, df2) } sim.data = sim.logt.data(1.0, 1.0, 50, 1, -1.0, -1.0, 50, 1) head(sim.data) ###Output _____no_output_____ ###Markdown Plot the data setThe code in the cell below plots the data set using color to show the two classes of the labels. Execute this code and examine the results. Notice that the posion of the points from each class overlap with each other. ###Code require(repr) options(repr.plot.width=7, repr.plot.height=5) plot.class = function(df){ require(ggplot2) df$z = as.factor(df$z) ggplot(df, aes(x, y)) + geom_point(aes(color = z, size = 3)) + ggtitle('X vs. Y for two classes') } plot.class(sim.data) ###Output _____no_output_____ ###Markdown Plot the logistic functionLogistic regression computes a binary {0,1} score using a logistic function. A value of the logistic function above the cutoff (typically 0.5) are scored as a 1 or true, and values less than the cutoff are scored as a 0 or false. Execute the code and examine the resulting logistic function. ###Code plot.logistic = function(upper = 6, lower = -6){ require(ggplot2) x = seq(lower, upper, length = 100) y = exp(x)/(1 + exp(x)) df = data.frame(x = x, y = y) ggplot(df, aes(x,y)) + geom_line(color = 'red', size = 3) + geom_vline(xintercept = 0, color = 'blue', size = 1.5) + geom_hline(yintercept = 0.5, color = 'blue', size = 1.5)+ ggtitle('Logistic function for two-class classification') + ylab('Probabiltiy of positive response') + theme_grey(base_size = 14) } plot.logistic() ###Output _____no_output_____ ###Markdown Compute and score a logistic regression modelThere is a considerable anount of code in the cell below. The fist function uses computes and scores a logsitic regression model.The second function computes the evaluation of the logistic regression model in the following steps:- Plot the correctly and incorrectly scored cases, using color and shape to identify class and classification correctness.- Compute the elements of theh confusion matrix.- Commonly used performance statistics are computed.Execute this code and examine the results. Notice that most of the cases have been correctly classified. Classification errors appear along a boundary between those two classes. ###Code options(repr.plot.width=6, repr.plot.height=5) logistic.mod <- function(df){ glm(z ~ 0 + x + y, data = df, family = binomial) } logistic.pred = function(mod, df, prob = 0.5){ df$score = ifelse(predict(mod, type = 'response') > prob, TRUE, FALSE) df } logistic.eval <- function(mod, df, prob = 0.5){ require(ggplot2) df = logistic.pred(mod, df, prob = prob) df$conf = ifelse(df$z == TRUE & df$score == TRUE, 'TP', ifelse(df$z == FALSE & df$score == TRUE, 'FP', ifelse(df$z == FALSE & df$score == FALSE, 'TN', 'FN'))) df$error = ifelse((df$conf == 'FP' | df$conf == 'FN'), 'error', 'correct') cols = c('z', 'error') df[, cols] = lapply(df[, cols], as.factor) p1 = ggplot(df, aes(x, y)) + geom_point(aes(color = z, size = 3, shape = error)) + ggtitle('X vs. Y for two classes') print(p1) TP = length(df[df$conf == 'TP', 'conf']) FP = length(df[df$conf == 'FP', 'conf']) TN = length(df[df$conf == 'TN', 'conf']) FN = length(df[df$conf == 'FN', 'conf']) print(paste('accuracy =', as.character((TP + TN)/(TP + TN + FP + FN)))) print(paste('precision =', as.character(signif(TP/(TP + FP)), digits = 2))) print(paste('recall =', as.character(TP/(TP + FN)))) out = data.frame(Positive = c(TP, FP), Negative = c(FN, TN)) row.names(out) = c('TruePos', 'TrueNeg') print(out) } mod = logistic.mod(sim.data) logistic.eval(mod, sim.data) ###Output [1] "accuracy = 0.9" [1] "precision = 0.884615" [1] "recall = 0.92" Positive Negative TruePos 46 4 TrueNeg 6 44 ###Markdown Moving the decision boundaryThe example above uses a cutoff at the midpoint of the logistic function. However, you can change the trade-off between correctly classifying the positive cases and correctly classifing the negative cases. The code in the cell below computes and scores a logistic regressiion model for three different cutoff points. Run the code in the cell and carefully compare the results for the three cases. Notice, that as the logistic cutoff changes the decision boundary moves on the plot, with progressively more positive cases are correctly classified. In addition, accuracy and precision decrease and recall increases. ###Code run.demo.prob <- function(){ logt <- sim.logt.data(0.5, 0.5, 50, 1, -0.5, -0.5, 50, 1) probs = c(0.5, 0.25, 0.125) for(p in probs){ logMod <- logistic.mod(logt) logistic.eval(logMod, logt, p) } } run.demo.prob() ###Output [1] "accuracy = 0.74" [1] "precision = 0.75" [1] "recall = 0.72" Positive Negative TruePos 36 14 TrueNeg 12 38
LOD Cloud metadata experiments.ipynb
###Markdown Identify the different Media Types used within the LOD Cloud dataset metadata ###Code import rdflib from rdflib import Graph from rdflib.plugin import register, Serializer, Parser register('rdfa', Parser,'rdflib.plugins.parsers.rdfa', 'RDFaParser') rdfaCounter = 0 counter = 0 # Different Media Types Used def __extractMediaType(item): global rdfaCounter global counter mtype = item["media_type"] if (";" in mtype): mtype = mtype[:mtype.find(';')] mtype = str(mtype) if mtype == "text/html": if "access_url" in item: try: h = urllib2.urlopen(str(item["access_url"]).strip(), timeout=10) if (h.getcode() > 399): g = Graph() g.parse(str(item["access_url"]).strip(), 'rdfa') if (len(g) > 1): rdfaCounter = rdfaCounter + 1 except : pass if (len(mtype) == 0): mtype = "None" if (mtype in mediaTypes): mediaTypes[mtype] = mediaTypes[mtype] + 1 else: mediaTypes[mtype] = 1 mediaTypes = dict({}) # Get Different Media Types for key in lod_data: full_download = lod_data[key]["full_download"] other_download = lod_data[key]["other_download"] if (len(full_download) > 0): for item in full_download: if ("media_type" in item): __extractMediaType(item) if (len(other_download) > 0): for item in other_download: if ("media_type" in item): __extractMediaType(item) # Create Chart toPlot = dict({}) toPlot["Others"] = 0 for (k,v) in mediaTypes.items(): if k == "text/html": # skip text/html as it would be large to print continue if (v > 25): toPlot[k] = v else: toPlot["Others"] = toPlot["Others"] + 1 # Display Table print("\033[4mTabular View\033[0m") print("") print("") toPlot["text/html"] = mediaTypes["text/html"] sorted_toPlot = sorted(toPlot.items(), key=lambda kv: kv[1]) print "{:<50} {:<10}".format('\033[1m' +'Media Type','Frequency'+'\033[0m') for k, v in sorted_toPlot: print "{:<50} {:<10}".format(k, v) print "Total text/html in RDFa: "+ str(rdfaCounter) ###Output _____no_output_____ ###Markdown Identify the Dataset's Accessibility based on the LOD Cloud available metadata ###Code acceptable_media_types = set() acceptable_media_types.add("application/x-ntriples") acceptable_media_types.add("application/rdf+xml") acceptable_media_types.add("text/turtle") acceptable_media_types.add("application/x-nquads") acceptable_media_types.add("application/trig") acceptable_media_types.add("application/n-triples") acceptable_media_types.add("gzip:ntriples") acceptable_media_types.add("application/x-gzip") acceptable_media_types.add("application/octet-stream") acceptable_media_types.add("application/x-ntriples") acceptable_media_types.add("RDF") acceptable_media_types.add("plain/text") from SPARQLWrapper import SPARQLWrapper, JSON, XML from SPARQLWrapper.SPARQLExceptions import EndPointInternalError from rdflib.plugin import register, Serializer, Parser def __query_endpoint(uri): try: sparql = SPARQLWrapper(uri) sparql.setQuery('ASK {?s ?p ?o}') sparql.setReturnFormat(XML) sparql.setTimeout(3) results = sparql.query().convert() for result in results.getElementsByTagName('boolean'): return True return False except (EndPointInternalError, AttributeError) as epex: try: params = urllib.urlencode({'query': 'ASK {?s ?p ?o}'}) opener = urllib2.build_opener(urllib2.HTTPHandler) request = urllib2.Request(uri+'?'+params) request.get_method = lambda: 'GET' request.add_header('Accept', 'application/sparql-results+json') url = opener.open(request, timeout=3) data = url.read() results = json.loads(data) if(results['boolean'] is not None): return True else: return False except Exception as noContentNego: try: params = urllib.urlencode({'query': 'ASK {?s ?p ?o}'}) opener = urllib2.build_opener(urllib2.HTTPHandler) request = urllib2.Request(uri+'?'+params) request.get_method = lambda: 'GET' url = opener.open(request, timeout=3) data = url.read() if (data): return True else: return False except: e = sys.exc_info()[0] return False except: e = sys.exc_info()[0] return False def __query_void(url, voidurl): if (__checkstatus(voidurl)): try: graph = rdflib.Graph() graph.parse(voidurl) accessible = False result = graph.query('ASK { ?s a <http://rdfs.org/ns/void#Dataset> . }') for row in result: accessible = bool(row) return accessible except: e = sys.exc_info()[0] return False else: return False def __checkstatus(host): try: h = urllib2.urlopen(host, timeout=10) if (h.getcode() > 399): return False except Exception as e: return False return True def availableSPARQLEntryPoint(record): sparqlEndpoint = None if ("sparql" in record): if (len(record["sparql"]) >= 1): sparqlEndpoint = record["sparql"][0]["access_url"] if (not("FAIL" in record["sparql"][0]["status"])): if (__checkstatus(sparqlEndpoint)): return __query_endpoint(sparqlEndpoint) else: return False else: return False else: return False else: return False def availableDatadumpEntryPoint(record): datadumpLocation = [] full_download = record["full_download"] other_download = record["other_download"] if (len(full_download) > 0): for item in full_download: if ("media_type" in item): mtype = item["media_type"] if (";" in mtype): mtype = mtype[:mtype.find(';')] if (item["media_type"] in acceptable_media_types): if (not(".well-known/" in item["download_url"])): if (__checkstatus(item["download_url"])): datadumpLocation.append(item["download_url"]) elif (len(other_download) > 0): for item in other_download: if (not(".well-known/" in item["access_url"])): if (item["media_type"] in acceptable_media_types): if (__checkstatus(item["access_url"])): datadumpLocation.append(item["access_url"]) return len(datadumpLocation) > 0 def availableVoidEntryPoint(record): voidLocation = [] full_download = record["full_download"] other_download = record["other_download"] if (len(full_download) > 0): for item in full_download: if ("media_type" in item): mtype = item["media_type"] if (";" in mtype): mtype = mtype[:mtype.find(';')] if (item["media_type"] == "meta/void"): if (__checkstatus(item["download_url"])): voidLocation.append(item["download_url"]) elif (".well-known/" in item["download_url"]): if (__checkstatus(item["download_url"])): voidLocation.append(item["download_url"]) elif (len(other_download) > 0): for item in other_download: if (item["media_type"] == "meta/void"): if (__checkstatus(item["access_url"])): voidLocation.append(item["access_url"]) elif (".well-known/" in item["access_url"]): voidLocation.append(item["access_url"]) return len(voidLocation) > 0 dataSources = dict({}) # key, (dd,sparql,void) 1 = available 0 = not available for key in lod_data: dataSources[key] = (availableDatadumpEntryPoint(lod_data[key]),availableSPARQLEntryPoint(lod_data[key]), availableVoidEntryPoint(lod_data[key])) ###Output _____no_output_____ ###Markdown The next snippet checks the number of datasets that have no access point ###Code noAccessPoint = 0 for key in lod_data: full_download = lod_data[key]["full_download"] other_download = lod_data[key]["other_download"] sparql = lod_data[key]["sparql"] if ((len(full_download) == 0) and (len(other_download) == 0) and (len(sparql) == 0)): noAccessPoint = noAccessPoint + 1 print "Number of datasets without an access point: "+str(noAccessPoint) ###Output _____no_output_____ ###Markdown This snippet will create a JSON file that can be used to recreate the LOD cloud visualisation. The LOD cloud diagram code can be found here: https://github.com/lod-cloud/lod-cloud-draw ###Code available_lod_data = loadLODData() modified_dataSources = dict(dataSources) for (k,v) in dataSources.items(): if v == (0,0,0): del modified_dataSources[k] del available_lod_data[k] print json.dumps(available_lod_data) ###Output _____no_output_____ ###Markdown The following code snippet will identify the different access points of datasets ###Code dd = 0 sparql = 0 void = 0 ddSparql = 0 ddVoid = 0 SparqlVoid = 0 allthree = 0 nothing = 0 for (k,v) in dataSources.items(): if v == (0,0,0): nothing+=1 if v == (1,0,0): dd+=1 if v == (0,1,0): sparql+=1 if v == (0,0,1): void+=1 if v == (1,1,0): ddSparql+=1 if v == (1,0,1): ddVoid+=1 if v == (0,1,1): SparqlVoid+=1 if v == (1,1,1): allthree+=1 print("Only Datadump: "+ str(dd)) print("Only SPARQL: "+ str(sparql)) print("Only VOID: "+ str(void)) print("Only Datadump and SPARQL: "+ str(ddSparql)) print("Only Datadump and VOID: "+ str(ddVoid)) print("Only SPARQL and VOID: "+ str(SparqlVoid)) print("All three entry points: "+ str(allthree)) print("No entry points: "+ str(nothing)) labels = 'Datadump', 'SPARQL', 'voID', 'More than 1\nDiscoverability\nEntry', 'None' sizes = [dd, sparql, void, (ddSparql + ddVoid + SparqlVoid + allthree), nothing] fig, ax = plt.subplots(figsize = (7,7)) patches, texts, autotexts = ax.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=False, startangle=90, textprops={'fontsize': 12}) texts[0].set_fontsize(15) texts[1].set_fontsize(15) texts[2].set_fontsize(15) texts[3].set_fontsize(15) texts[4].set_fontsize(15) ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() ###Output _____no_output_____ ###Markdown Identifying LicensesLicences are the heart of Open Data. They define whether third parties can re-use data or otherwise, and to what extent. For this experiment we parsed through each dataset in the data file and looked for the value attributed to the `license` key. The next code snippet parses through the LOD Cloud data and checks the license for each dataset. The variable `licensesUsed` stores all licenses used and their frequency. ###Code licensesUsed = dict({}) for key in lod_data: if ("license" in lod_data[key]): theLicence = lod_data[key]["license"] if theLicence == "": continue if not(theLicence in licensesUsed): licensesUsed[theLicence] = 1 else: licensesUsed[theLicence] = licensesUsed[theLicence] + 1 ###Output _____no_output_____ ###Markdown Visualising the licenses used ###Code # Class used to visualse the data def visualiseLicenseData(licensesUsed): fig, ax = plt.subplots(figsize = (10,8)) ax.barh(licensesUsed.keys(), licensesUsed.values(), height=0.5) ax.set_yticks(np.arange(len(licensesUsed.keys()))) ax.set_yticklabels(licensesUsed.keys(), fontsize=12, fontweight='bold') ax.invert_yaxis() plt.xlabel("Frequency", fontsize=15, fontweight='bold') for i, v in enumerate(licensesUsed.values()): ax.text(v + 2, i + 0.08 , str(v), color='black', fontweight='bold', fontsize=10) # Display Table print("\033[4mTabular View\033[0m") print("") print("") sorted_licensesUsed = sorted(licensesUsed.items(), key=lambda kv: kv[1]) print "{:<60} {:<40} {:<10}".format('\033[1m' +'License','Frequency','Percentage'+'\033[0m') totalItems = len(lod_data) for k, v in sorted_licensesUsed: print "{:<60} {:<40} {:<10}".format(k, v, str((v * 100.0)/totalItems)+"%") # Display Plot print("") print("") print("\033[4mBar View\033[0m") plt.show() # Visualise all data visualiseLicenseData(licensesUsed) # Visualise Summerised Data licenseLabels = dict({}) licenseLabels['https://creativecommons.org/licenses/by/3.0/'] = "CC-BY-3.0" licenseLabels['http://reference.data.gov.uk/id/open-government-licence'] = "OGL-UK" licenseLabels['http://www.opendefinition.org/licenses/odc-by'] = "ODC-BY" licenseLabels['http://www.opendefinition.org/licenses/odc-pddl'] = "ODC-PDDL" licenseLabels['http://www.opendefinition.org/licenses/odc-odbl'] = "ODC-ODBL" licenseLabels['http://creativecommons.org/licenses/by-nc/2.0/'] = "CC-BY-NC-2.0" licenseLabels['http://www.opendefinition.org/licenses/cc-zero'] = "CC0" licenseLabels['http://www.opendefinition.org/licenses/cc-by-sa'] = "CC-BY-SA" licenseLabels['http://www.opendefinition.org/licenses/cc-by'] = "CC-BY" summLicensesUsed = dict({}) summLicensesUsed['Other'] = 0 for k,v in licensesUsed.items(): if (v < 10): summLicensesUsed['Other'] = summLicensesUsed['Other'] + v else: if k in licenseLabels: summLicensesUsed[licenseLabels[k]] = v visualiseLicenseData(summLicensesUsed) ###Output _____no_output_____ ###Markdown In the next experiment, we use a regular expression to identify the dataset which potentially have a license assigned to its description ###Code import re def __tryDecoding(text): try: text = unicode(text, 'utf-8') return text except TypeError: return text def __licenseStringExtractor(text): potentialText = __tryDecoding(text) if (potentialText): str_list = potentialText.splitlines() str_list = filter(None, str_list) new_desc = ' '.join([__tryDecoding(x) for x in str_list]) p = re.compile(r'.*(licensed?|copyrighte?d?).*(under|grante?d?|rights?).*',re.IGNORECASE | re.MULTILINE) m = p.match(new_desc) return not(m == None) else: return False potentialLicence = [] for key in lod_data: if ("description" in lod_data[key]): text = lod_data[key]["description"]['en'] if (__licenseStringExtractor(text)): potentialLicence.append(str(key)) print(potentialLicence) ###Output _____no_output_____
Assignments/Assignment_2/Q1/q1_Arch2_Line.ipynb
###Markdown Decreasing number of filters to 16 ###Code import numpy as np import keras from keras.models import Sequential from matplotlib import pyplot as plt from keras.layers import Dense,Flatten from keras.layers import Conv2D, MaxPooling2D,BatchNormalization from keras.utils import np_utils from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score, classification_report class AccuracyHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.acc = [] self.loss = [] self.val_f1s = [] self.val_recalls = [] self.val_precisions = [] def on_epoch_end(self, batch, logs={}): self.acc.append(logs.get('acc')) self.loss.append(logs.get('loss')) X_val, y_val = self.validation_data[0], self.validation_data[1] y_predict = np.asarray(model.predict(X_val)) y_val = np.argmax(y_val, axis=1) y_predict = np.argmax(y_predict, axis=1) self.val_recalls.append(recall_score(y_val, y_predict, average=None)) self.val_precisions.append(precision_score(y_val, y_predict, average=None)) self.val_f1s.append(f1_score(y_val,y_predict, average=None)) data = np.load('/home/aj/assignments/assign2/outfile.npz') X_train=data["X_train.npy"] X_test=data["X_test.npy"] y_train=data["y_train.npy"] y_test=data["y_test.npy"] # reshape to be [samples][pixels][width][height] X_train = X_train.reshape(X_train.shape[0],28, 28,3).astype('float32') X_test = X_test.reshape(X_test.shape[0],28, 28,3).astype('float32') # normalize inputs from 0-255 to 0-1 X_train = X_train / 255 X_test = X_test / 255 num_classes = y_test.shape[1] input_shape=(28,28,3) history = AccuracyHistory() def create_model(filters,filt1_size,conv_stride,pool_size,pool_stride,opt,loss): model=Sequential() model.add(Conv2D(filters, kernel_size=(filt1_size, filt1_size), strides=(conv_stride, conv_stride),activation='relu',input_shape=input_shape)) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(pool_size, pool_size), strides=(pool_stride,pool_stride), padding='valid')) model.add(Flatten()) model.add(Dense(1024,activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer=opt,loss=loss,metrics=['accuracy']) return model model = create_model(16,7,1,2,2,'adam','categorical_crossentropy') print(model.summary()) def fit_model(epochs,batch_size): model.fit(X_train, y_train,batch_size=batch_size,epochs=epochs,validation_split=0.05,callbacks=[history]) score = model.evaluate(X_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) y_pred = model.predict_classes(X_test) cnf_mat = confusion_matrix(np.argmax(y_test,axis=1), y_pred) return cnf_mat,score,y_pred epochs=10 batch_size = 512 cnf_mat,score,y_pred = fit_model(epochs,batch_size) from keras.models import load_model model.save('dec_filter_Line.h5') fscore=f1_score(np.argmax(y_test,axis=1), y_pred,average=None) recall=recall_score(np.argmax(y_test,axis=1), y_pred,average=None) prec=precision_score(np.argmax(y_test,axis=1), y_pred,average=None) def plot(r1,r2,data,Info): plt.plot(range(r1,r2),data) plt.xlabel('Epochs') plt.ylabel(Info) plt.show() plot(1,epochs+1,history.acc,'Accuracy') plot(1,epochs+1,history.loss,'Loss') plt.plot(recall,label='Recall') plt.plot(prec,label='Precision') plt.xlabel('Class') plt.ylabel('F-score vs Recall vs Precision') plt.plot(fscore,label='F-score') plt.legend() avg_fscore=np.mean(fscore) print(avg_fscore) avg_precision=np.mean(prec) print(avg_precision) avg_recall=np.mean(recall) print(avg_recall) cnf_mat = confusion_matrix(np.argmax(y_test,axis=1), y_pred) import numpy as np import matplotlib import matplotlib.pyplot as plt conf = cnf_mat fig, ax = plt.subplots(figsize=(30,30)) im = ax.imshow(conf,alpha=0.5) # plt.show() # We want to show all ticks... ax.set_xticks(np.arange(cnf_mat.shape[0])) ax.set_yticks(np.arange(cnf_mat.shape[1])) # ... and label them with the respective list entries ax.set_xticklabels(np.arange(0,96)) ax.set_yticklabels(np.arange(0,96)) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. for i in range(cnf_mat.shape[0]): for j in range(cnf_mat.shape[1]): text = ax.text(j, i, conf[i, j], ha="center", va="center",color="black",fontsize=10) ax.set_title("Confusion matrix",fontsize=20) fig.tight_layout() # fig.savefig('plot1_cnf.png') plt.show() del model ###Output _____no_output_____
Generative/wall-of-letters.ipynb
###Markdown Wall of Letters---- Author: Diego Inácio- GitHub: [github.com/diegoinacio](https://github.com/diegoinacio)- Notebook: [wall-of-letters.ipynb](https://github.com/diegoinacio/creative-coding-notebooks/blob/master/Generative/wall-of-letters.ipynb)---A responsive experiment with character styling and layout. ###Code from IPython.display import HTML, display import random ###Output _____no_output_____ ###Markdown The purpose of this experiment is to reproduce nice visuals of text blocks, imitating bricks on the wall. The main idea is to use css style layout for the blocks and make them responsive by displaying the generated HTML. To make the process less verbose, *Python* will be used as the script language to produce the HTML code. Set of letters---For this project, it will be used the following set of characters:- **Uppercase**: _ABCDEFGHIJKLMNOPQRSTUVWXYZ_- **Lowercase**: _abcdefghijklmnopqrstuvwxyz_- **Symbols**: _!$%&*?@^~_- **Numbers**: _0123456789_This set will be used to generate random chacteres fot the blocks. ###Code # Init set of characters SET = "" # Include uppercase SET += "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # Include lowercase SET += "abcdefghijklmnopqrstuvwxyz" # Include symbols SET += "!#$%&*?@^~" # Include numbers SET += "0123456789" SET = list(SET) ###Output _____no_output_____ ###Markdown Useful functions---Here will be declared some useful functions to make it easier to generate the HTML code. ###Code def containerClass(count): """ Generate the text-container class """ return f''' .text-container {{ -webkit-column-count: {count}; -moz-column-count: {count}; column-count: {count}; }} ''' def mediaQuery(maxWidth, content): """ Generate the media query for responsive design """ return f''' @media screen and (max-width: {maxWidth}px) {{{content}}} ''' ###Output _____no_output_____ ###Markdown Style code---Here it will be created the style part of the HTML. Two classes will be used to model the structure of the code.- **text-container**: It is the container div which parents the blocks and it is responsible for the layout and responsiveness- **text-block**: It is the div which has the group of lettersAs a result, the _style_ element will be created and aggregated to the output code. ###Code STYLE = f''' <style> .text-block {{ overflow-wrap: break-word; font-family: "Courier New", Courier, monospace; font-weight: normal; line-height: 50%; text-align: justify; vertical-align: middle; margin-bottom: 1em; }} {containerClass(6)} {mediaQuery(1680, containerClass(5))} {mediaQuery(1280, containerClass(4))} {mediaQuery(880, containerClass(3))} {mediaQuery(480, containerClass(2))} </style> ''' ###Output _____no_output_____ ###Markdown Container code---Here we are going to generate the container code. This div will contain all blocks with letter and each letter will be stylized randomly by chaging color, size or any other attributes that may make the visual a little bit more appealing. ###Code # Parameters N_BLOCKS = 64 # Deterministic random function random.seed(1234) # Build blocks BLOCKS = "" for _ in range(64): BG_HUE = random.randint(0, 360) N_LETTERS = random.randint(100, 500) LETTERS = "" for _ in range(N_LETTERS): letter_index = random.randint(0, len(SET) - 1) letter_hue = BG_HUE + random.randint(-100, 100) letter_size = random.randint(5, 20) letter_style = f'color: hsl({letter_hue},50%,50%); font-size: {letter_size}px' LETTERS += f'<font style="{letter_style}">{SET[letter_index]}</font>' block_style = f'background-color: hsla({BG_HUE}, 50%, 50%, 0.05)' BLOCKS += f'<div class="text-block" style="{block_style}">{LETTERS}</div>' CONTAINER = f'<div class="text-container">{BLOCKS}</div>' OUTPUT = STYLE + CONTAINER ###Output _____no_output_____ ###Markdown Output---Display the output HTML generated code. ###Code display(HTML(OUTPUT)) ###Output _____no_output_____ ###Markdown Wall of Emojis---As a fun experiment, let's use emojis instead of letters and see how it looks like 😅Let's first declare the set of emojis.*p.s.: The list of emojis was get from the (getemoji)[https://getemoji.com/] website* ###Code SET = "🧳🌂☂️🧵🧶👓🕶🥽🥼🦺👔👕👖🧣🧤🧥🧦👗👘🥻🩱🩲🩳👙👚👛" SET += "👜👝🎒👞👟🥾🥿👠👡🩰👢👑👒🎩🎓🧢😀😃😄😁😆😅😂🤣" SET = list(SET) ###Output _____no_output_____ ###Markdown Having the emoji set, let's execute the same piece of code but discarding the random color since emoji has its own color aspect. ###Code # Parameters N_BLOCKS = 64 # Deterministic random function random.seed(1234) # Build blocks BLOCKS = "" for _ in range(64): BG_HUE = random.randint(0, 360) N_LETTERS = random.randint(50, 200) LETTERS = "" for _ in range(N_LETTERS): letter_index = random.randint(0, len(SET) - 1) letter_hue = BG_HUE + random.randint(-100, 100) letter_size = random.randint(10, 20) letter_style = f'font-size: {letter_size}px' LETTERS += f'<font style="{letter_style}">{SET[letter_index]}</font>' block_style = f'background-color: hsla({BG_HUE}, 50%, 50%, 0.1)' BLOCKS += f'<div class="text-block" style="{block_style}">{LETTERS}</div>' CONTAINER = f'<div class="text-container">{BLOCKS}</div>' OUTPUT = STYLE + CONTAINER ###Output _____no_output_____ ###Markdown Finally, let's display the result. ###Code display(HTML(OUTPUT)) ###Output _____no_output_____
Methanotroph_biofilm_model/GSMMS/P. denitrificans/Manual curation/Adjusting Paracoccus Model-create cobra model-redifined respiration.ipynb
###Markdown Load model and modules ###Code from __future__ import print_function import cobra.test import os from os.path import join from __future__ import absolute_import import re from math import isinf, isnan from os.path import isfile from warnings import warn from six import iteritems from cobra.core import Metabolite, Model, Reaction from cobra.util.solver import set_objective try: import libsbml except ImportError: libsbml = None # Source: https://cobrapy.readthedocs.io/en/latest/_modules/cobra/io/sbml.html#write_cobra_model_to_sbml_file def parse_legacy_sbml_notes(note_string, note_delimiter=':'): """Deal with various legacy SBML format issues. """ note_dict = {} start_tag = '<p>' end_tag = '</p>' if '<html:p>' in note_string: start_tag = '<html:p>' end_tag = '</html:p>' while start_tag in note_string and end_tag in note_string: note_start = note_string.index(start_tag) note_end = note_string.index(end_tag) the_note = note_string[ (note_start + len(start_tag)):note_end].lstrip(' ').rstrip( ' ') if note_delimiter in the_note: note_delimiter_index = the_note.index(note_delimiter) note_field = the_note[:note_delimiter_index].lstrip( ' ').rstrip(' ').replace('_', ' ').upper() note_value = the_note[ (note_delimiter_index + 1):].lstrip(' ').rstrip(' ') if note_field in note_dict: note_dict[note_field].append(note_value) else: note_dict[note_field] = [note_value] note_string = note_string[(note_end + len(end_tag)):] if ('CHARGE' in note_dict and note_dict['CHARGE'][0].lower() in ['none', 'na', 'nan']): note_dict.pop('CHARGE') # Remove non-numeric charges if 'CHARGE' in note_dict and note_dict['CHARGE'][0].lower() in ['none', 'na', 'nan']: note_dict.pop('CHARGE') # Remove non-numeric charges return note_dict def parse_legacy_id(the_id, the_compartment=None, the_type='metabolite', use_hyphens=False): """Deals with a bunch of problems due to bigg.ucsd.edu not following SBML standards Parameters ---------- the_id: String. the_compartment: String the_type: String Currently only 'metabolite' is supported use_hyphens: Boolean If True, double underscores (__) in an SBML ID will be converted to hyphens Returns ------- string: the identifier """ if use_hyphens: the_id = the_id.replace('__', '-') if the_type == 'metabolite': if the_id.split('_')[-1] == the_compartment: # Reformat Ids to match convention in Palsson Lab. the_id = the_id[:-len(the_compartment) - 1] the_id += '[%s]' % the_compartment return the_id def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_metabolite=False, print_time=False, use_hyphens=False): """convert an SBML XML file into a cobra.Model object. Supports SBML Level 2 Versions 1 and 4. The function will detect if the SBML fbc package is used in the file and run the converter if the fbc package is used. Parameters ---------- sbml_filename: string old_sbml: bool Set to True if the XML file has metabolite formula appended to metabolite names. This was a poorly designed artifact that persists in some models. legacy_metabolite: bool If True then assume that the metabolite id has the compartment id appended after an underscore (e.g. _c for cytosol). This has not been implemented but will be soon. print_time: bool deprecated use_hyphens: bool If True, double underscores (__) in an SBML ID will be converted to hyphens Returns ------- Model : The parsed cobra model """ if not libsbml: raise ImportError('create_cobra_model_from_sbml_file ' 'requires python-libsbml') __default_lower_bound = -1000 __default_upper_bound = 1000 __default_objective_coefficient = 0 # Ensure that the file exists if not isfile(sbml_filename): raise IOError('Your SBML file is not found: %s' % sbml_filename) # Expressions to change SBML Ids to Palsson Lab Ids metabolite_re = re.compile('^M_') reaction_re = re.compile('^R_') compartment_re = re.compile('^C_') if print_time: warn("print_time is deprecated", DeprecationWarning) model_doc = libsbml.readSBML(sbml_filename) if model_doc.getPlugin("fbc") is not None: from libsbml import ConversionProperties, LIBSBML_OPERATION_SUCCESS conversion_properties = ConversionProperties() conversion_properties.addOption( "convert fbc to cobra", True, "Convert FBC model to Cobra model") result = model_doc.convert(conversion_properties) if result != LIBSBML_OPERATION_SUCCESS: raise Exception("Conversion of SBML+fbc to COBRA failed") sbml_model = model_doc.getModel() sbml_model_id = sbml_model.getId() sbml_species = sbml_model.getListOfSpecies() sbml_reactions = sbml_model.getListOfReactions() sbml_compartments = sbml_model.getListOfCompartments() compartment_dict = dict([(compartment_re.split(x.getId())[-1], x.getName()) for x in sbml_compartments]) if legacy_metabolite: # Deal with the palsson lab appending the compartment id to the # metabolite id new_dict = {} for the_id, the_name in compartment_dict.items(): if the_name == '': new_dict[the_id[0].lower()] = the_id else: new_dict[the_id] = the_name compartment_dict = new_dict legacy_compartment_converter = dict( [(v, k) for k, v in iteritems(compartment_dict)]) cobra_model = Model(sbml_model_id) metabolites = [] metabolite_dict = {} # Convert sbml_metabolites to cobra.Metabolites for sbml_metabolite in sbml_species: # Skip sbml boundary species if sbml_metabolite.getBoundaryCondition(): continue if (old_sbml or legacy_metabolite) and \ sbml_metabolite.getId().endswith('_b'): # Deal with incorrect sbml from bigg.ucsd.edu continue tmp_metabolite = Metabolite() metabolite_id = tmp_metabolite.id = sbml_metabolite.getId() tmp_metabolite.compartment = compartment_re.split( sbml_metabolite.getCompartment())[-1] if legacy_metabolite: if tmp_metabolite.compartment not in compartment_dict: tmp_metabolite.compartment = legacy_compartment_converter[ tmp_metabolite.compartment] tmp_metabolite.id = parse_legacy_id( tmp_metabolite.id, tmp_metabolite.compartment, use_hyphens=use_hyphens) if use_hyphens: tmp_metabolite.id = metabolite_re.split( tmp_metabolite.id)[-1].replace('__', '-') else: # Just in case the SBML ids are ill-formed and use - tmp_metabolite.id = metabolite_re.split( tmp_metabolite.id)[-1].replace('-', '__') tmp_metabolite.name = sbml_metabolite.getName() tmp_formula = '' tmp_metabolite.notes = parse_legacy_sbml_notes( sbml_metabolite.getNotesString()) if sbml_metabolite.isSetCharge(): tmp_metabolite.charge = sbml_metabolite.getCharge() if "CHARGE" in tmp_metabolite.notes: note_charge = tmp_metabolite.notes["CHARGE"][0] try: note_charge = float(note_charge) if note_charge == int(note_charge): note_charge = int(note_charge) except: warn("charge of %s is not a number (%s)" % (tmp_metabolite.id, str(note_charge))) else: if ((tmp_metabolite.charge is None) or (tmp_metabolite.charge == note_charge)): tmp_metabolite.notes.pop("CHARGE") # set charge to the one from notes if not assigend before # the same tmp_metabolite.charge = note_charge else: # tmp_metabolite.charge != note_charge msg = "different charges specified for %s (%d and %d)" msg = msg % (tmp_metabolite.id, tmp_metabolite.charge, note_charge) warn(msg) # Chances are a 0 note charge was written by mistake. We # will default to the note_charge in this case. if tmp_metabolite.charge == 0: tmp_metabolite.charge = note_charge for the_key in tmp_metabolite.notes.keys(): if the_key.lower() == 'formula': tmp_formula = tmp_metabolite.notes.pop(the_key)[0] break if tmp_formula == '' and old_sbml: tmp_formula = tmp_metabolite.name.split('_')[-1] tmp_metabolite.name = tmp_metabolite.name[:-len(tmp_formula) - 1] tmp_metabolite.formula = tmp_formula metabolite_dict.update({metabolite_id: tmp_metabolite}) metabolites.append(tmp_metabolite) cobra_model.add_metabolites(metabolites) # Construct the vectors and matrices for holding connectivity and numerical # info to feed to the cobra toolbox. # Always assume steady state simulations so b is set to 0 cobra_reaction_list = [] coefficients = {} for sbml_reaction in sbml_reactions: if use_hyphens: # Change the ids to match conventions used by the Palsson lab. reaction = Reaction(reaction_re.split( sbml_reaction.getId())[-1].replace('__', '-')) else: # Just in case the SBML ids are ill-formed and use - reaction = Reaction(reaction_re.split( sbml_reaction.getId())[-1].replace('-', '__')) cobra_reaction_list.append(reaction) # reaction.exchange_reaction = 0 reaction.name = sbml_reaction.getName() cobra_metabolites = {} # Use the cobra.Metabolite class here for sbml_metabolite in sbml_reaction.getListOfReactants(): tmp_metabolite_id = sbml_metabolite.getSpecies() # This deals with boundary metabolites if tmp_metabolite_id in metabolite_dict: tmp_metabolite = metabolite_dict[tmp_metabolite_id] cobra_metabolites[tmp_metabolite] = - \ sbml_metabolite.getStoichiometry() for sbml_metabolite in sbml_reaction.getListOfProducts(): tmp_metabolite_id = sbml_metabolite.getSpecies() # This deals with boundary metabolites if tmp_metabolite_id in metabolite_dict: tmp_metabolite = metabolite_dict[tmp_metabolite_id] # Handle the case where the metabolite was specified both # as a reactant and as a product. if tmp_metabolite in cobra_metabolites: warn("%s appears as a reactant and product %s" % (tmp_metabolite_id, reaction.id)) cobra_metabolites[ tmp_metabolite] += sbml_metabolite.getStoichiometry() # if the combined stoichiometry is 0, remove the metabolite if cobra_metabolites[tmp_metabolite] == 0: cobra_metabolites.pop(tmp_metabolite) else: cobra_metabolites[ tmp_metabolite] = sbml_metabolite.getStoichiometry() # check for nan for met, v in iteritems(cobra_metabolites): if isnan(v) or isinf(v): warn("invalid value %s for metabolite '%s' in reaction '%s'" % (str(v), met.id, reaction.id)) reaction.add_metabolites(cobra_metabolites) # Parse the kinetic law info here. parameter_dict = {} # If lower and upper bounds are specified in the Kinetic Law then # they override the sbml reversible attribute. If they are not # specified then the bounds are determined by getReversible. if not sbml_reaction.getKineticLaw(): if sbml_reaction.getReversible(): parameter_dict['lower_bound'] = __default_lower_bound parameter_dict['upper_bound'] = __default_upper_bound else: # Assume that irreversible reactions only proceed from left to # right. parameter_dict['lower_bound'] = 0 parameter_dict['upper_bound'] = __default_upper_bound parameter_dict[ 'objective_coefficient'] = __default_objective_coefficient else: for sbml_parameter in \ sbml_reaction.getKineticLaw().getListOfParameters(): parameter_dict[ sbml_parameter.getId().lower()] = sbml_parameter.getValue() if 'lower_bound' in parameter_dict: reaction.lower_bound = parameter_dict['lower_bound'] elif 'lower bound' in parameter_dict: reaction.lower_bound = parameter_dict['lower bound'] elif sbml_reaction.getReversible(): reaction.lower_bound = __default_lower_bound else: reaction.lower_bound = 0 if 'upper_bound' in parameter_dict: reaction.upper_bound = parameter_dict['upper_bound'] elif 'upper bound' in parameter_dict: reaction.upper_bound = parameter_dict['upper bound'] else: reaction.upper_bound = __default_upper_bound objective_coefficient = parameter_dict.get( 'objective_coefficient', parameter_dict.get( 'objective_coefficient', __default_objective_coefficient)) if objective_coefficient != 0: coefficients[reaction] = objective_coefficient # ensure values are not set to nan or inf if isnan(reaction.lower_bound) or isinf(reaction.lower_bound): reaction.lower_bound = __default_lower_bound if isnan(reaction.upper_bound) or isinf(reaction.upper_bound): reaction.upper_bound = __default_upper_bound reaction_note_dict = parse_legacy_sbml_notes( sbml_reaction.getNotesString()) # Parse the reaction notes. # POTENTIAL BUG: DEALING WITH LEGACY 'SBML' THAT IS NOT IN A # STANDARD FORMAT # TODO: READ IN OTHER NOTES AND GIVE THEM A reaction_ prefix. # TODO: Make sure genes get added as objects if 'GENE ASSOCIATION' in reaction_note_dict: rule = reaction_note_dict['GENE ASSOCIATION'][0] try: rule.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): warn("gene_reaction_rule '%s' is not ascii compliant" % rule) if rule.startswith("&quot;") and rule.endswith("&quot;"): rule = rule[6:-6] reaction.gene_reaction_rule = rule if 'GENE LIST' in reaction_note_dict: reaction.systematic_names = reaction_note_dict['GENE LIST'][0] elif ('GENES' in reaction_note_dict and reaction_note_dict['GENES'] != ['']): reaction.systematic_names = reaction_note_dict['GENES'][0] elif 'LOCUS' in reaction_note_dict: gene_id_to_object = dict([(x.id, x) for x in reaction._genes]) for the_row in reaction_note_dict['LOCUS']: tmp_row_dict = {} the_row = 'LOCUS:' + the_row.lstrip('_').rstrip('#') for the_item in the_row.split('#'): k, v = the_item.split(':') tmp_row_dict[k] = v tmp_locus_id = tmp_row_dict['LOCUS'] if 'TRANSCRIPT' in tmp_row_dict: tmp_locus_id = tmp_locus_id + \ '.' + tmp_row_dict['TRANSCRIPT'] if 'ABBREVIATION' in tmp_row_dict: gene_id_to_object[tmp_locus_id].name = tmp_row_dict[ 'ABBREVIATION'] if 'SUBSYSTEM' in reaction_note_dict: reaction.subsystem = reaction_note_dict.pop('SUBSYSTEM')[0] reaction.notes = reaction_note_dict # Now, add all of the reactions to the model. cobra_model.id = sbml_model.getId() # Populate the compartment list - This will be done based on # cobra.Metabolites in cobra.Reactions in the future. cobra_model.compartments = compartment_dict cobra_model.add_reactions(cobra_reaction_list) set_objective(cobra_model, coefficients) return cobra_model # Loading the original model MODEL = create_cobra_model_from_sbml_file('Paracoccus_autoTemplate_completeMedia.sbml', old_sbml=False, legacy_metabolite=False, print_time=False, use_hyphens=False) ###Output _____no_output_____ ###Markdown Add general (de)nitrification reactions ###Code # Add missing transport and exchange reactions nitrogen metabolism # (denitrification and heterotrophic nitrification) # Nitrogen diffusion: N2_c0 <=> N2_e0 # cpd00528[1] <=> cpd00528 rxn10577_c0 = Reaction('rxn10577_c0') rxn10577_c0.name = 'Nitrogen exchange, diffusion' rxn10577_c0.subsystem = 'Transport' rxn10577_c0.lower_bound = -1000.0 rxn10577_c0.upper_bound = 1000.0 cpd00528_c0 = MODEL.metabolites.get_by_id('cpd00528_c0') cpd00528_e0 = Metabolite( 'cpd00528_e0', formula = 'N2', name = 'N2_e', compartment = 'e0') rxn10577_c0.add_metabolites({ cpd00528_c0: -1.0, cpd00528_e0: 1.0 }) # Nitrite transport :NO2_e0 <=> NO2_c0 # cpd00075 <=> cpd00075 rxn08999_c0 = Reaction('rxn08999_c0') rxn08999_c0.name = 'nitrite transport via diffusion (extracellular to periplasm)' rxn08999_c0.subsystem = 'Transport' rxn08999_c0.lower_bound = -1000.0 rxn08999_c0.upper_bound = 1000.0 cpd00075_c0 = MODEL.metabolites.get_by_id('cpd00075_c0') cpd00075_e0 = Metabolite( 'cpd00075_e0', formula = 'NO2', name = 'NO2_e', compartment = 'e0') rxn08999_c0.add_metabolites({ cpd00075_c0: 1.0, cpd00075_e0: -1.0 }) # Nitrate transport :NO3_e0 <=> NO3_c0 # cpd00209 <=> cpd00209 rxn09005_c0 = Reaction('rxn09005_c0') rxn09005_c0.name = 'nitrate transport via diffusion (extracellular to periplasm)' rxn09005_c0.subsystem = 'Transport' rxn09005_c0.lower_bound = -1000.0 rxn09005_c0.upper_bound = 1000.0 cpd00209_c0 = MODEL.metabolites.get_by_id('cpd00209_c0') cpd00209_e0 = Metabolite( 'cpd00209_e0', formula = 'NO3', name = 'NO3_e', compartment = 'e0') rxn09005_c0.add_metabolites({ cpd00209_c0: 1.0, cpd00209_e0: -1.0 }) #add boundary reactions for dinitrogen and nitrite MODEL.add_boundary(cpd00528_e0, type="exchange", reaction_id="EX_cpd00528_e0", ub=1000.) MODEL.add_boundary(cpd00075_e0, type="exchange", reaction_id="EX_cpd00075_e0", ub=1000.) MODEL.add_boundary(cpd00209_e0, type="exchange", reaction_id="EX_cpd00209_e0", ub=1000.) MODEL.add_reactions([rxn10577_c0, rxn08999_c0, rxn09005_c0]) # Fix oxidative phosphorylation # (2) Cytochrome c3+[1] + Ubiquinol-8[1] <=> (2) H+ + (2) Cytochrome c2+[1] + Ubiquinone-8[1] # (2) cpd00109 + cpd15561 <=> (2) cpd00067 + (2) cpd00110 + cpd15560 rxn12750_c0 = Reaction('rxn12750_c0') rxn12750_c0.name = 'ubiquinol---cytochrome-c reductase' rxn12750_c0.subsystem = 'Oxidative phosphorylation' rxn12750_c0.lower_bound = -1000.0 rxn12750_c0.upper_bound = 1000.0 cpd00109_c0 = MODEL.metabolites.get_by_id('cpd00109_c0') cpd15561_c0 = MODEL.metabolites.get_by_id('cpd15561_c0') cpd00067_c0 = MODEL.metabolites.get_by_id('cpd00067_c0') cpd00110_c0 = MODEL.metabolites.get_by_id('cpd00110_c0') cpd15560_c0 = MODEL.metabolites.get_by_id('cpd15560_c0') rxn12750_c0.add_metabolites({ cpd00109_c0: -2.0, cpd15561_c0: -1.0, cpd00067_c0: 2.0, cpd00110_c0: 2.0, cpd15560_c0: 1.0 }) MODEL.add_reactions([rxn12750_c0]) # Fix denitrification pathway # knock-out menaquinol dependent nitrate reductase MODEL. reactions.rxn10121_c0.knock_out() # H2O + Cytochrome c3+ + NO <= H+ + Nitrite + Cytochrome c2+ # cpd00001 + cpd00109 + cpd00418 <= cpd00067 + cpd00075 + cpd00110 rxn00567_c0 = Reaction('rxn00567_c0') rxn00567_c0.name = 'nitric-oxide:ferricytochrome-c oxidoreductase' rxn00567_c0.subsystem = 'Denitrification' rxn00567_c0.lower_bound = -1000.0 rxn00567_c0.upper_bound = 0.0 cpd00001_c0 = MODEL.metabolites.get_by_id('cpd00001_c0') cpd00109_c0 = MODEL.metabolites.get_by_id('cpd00109_c0') cpd00418_c0 = MODEL.metabolites.get_by_id('cpd00418_c0') cpd00067_c0 = MODEL.metabolites.get_by_id('cpd00067_c0') cpd00075_c0 = MODEL.metabolites.get_by_id('cpd00075_c0') cpd00110_c0 = MODEL.metabolites.get_by_id('cpd00110_c0') rxn00567_c0.add_metabolites({ cpd00001_c0: -1.0, cpd00109_c0: -1.0, cpd00418_c0: -1.0, cpd00067_c0: 1.0, cpd00075_c0: 1.0, cpd00110_c0: 1.0 }) # knock-out nitrous_oxide_NAD_plus__oxidoreductase MODEL. reactions.rxn01806_c0.knock_out() # (2) NO + Cytochrome_c2_plus <=> H2O + Nitrous oxide + Cytochrome_c3_plus # (2) cpd00418 + cpd00110 <=> cpd00001 + cpd00659 + cpd00109 rxn05801mod_c0 = Reaction('rxn05801mod_c0') rxn05801mod_c0.name = 'nitrous-oxide:ferricytochrome-c oxidoreductase' rxn05801mod_c0.subsystem = 'Denitrification' rxn05801mod_c0.lower_bound = -1000.0 rxn05801mod_c0.upper_bound = 1000.0 cpd00110_c0 = MODEL.metabolites.get_by_id('cpd00110_c0') cpd00659_c0 = MODEL.metabolites.get_by_id('cpd00659_c0') cpd00109_c0 = MODEL.metabolites.get_by_id('cpd00109_c0') rxn05801mod_c0.add_metabolites({ cpd00418_c0: -2.0, cpd00110_c0: -1.0, cpd00001_c0: 1.0, cpd00001_c0: 1.0, cpd00109_c0: 1.0 }) # knock-out Nitrogen_NAD_oxidoreductase_N2O_forming MODEL. reactions.rxn11937_c0.knock_out() # H2O + (2) Cytochrome c3+ + N2 <=> (2) H+ + (2) Cytochrome c2+ + Nitrous oxide # cpd00001 + (2) cpd00109 + cpd00528 <=> (2) cpd00067 + (2) cpd00110 + cpd00659 rxn39362_c0 = Reaction('rxn39362_c0') rxn39362_c0.name = 'nitrogen:ferricytochrome-c oxidoreductase (N2O-forming)' rxn39362_c0.subsystem = 'Denitrification' rxn39362_c0.lower_bound = -1000.0 rxn39362_c0.upper_bound = 0.0 cpd00528_c0 = MODEL.metabolites.get_by_id('cpd00528_c0') rxn39362_c0.add_metabolites({ cpd00001_c0: -1.0, cpd00109_c0: -2.0, cpd00528_c0: -1.0, cpd00067_c0: 1.0, cpd00110_c0: 2.0, cpd00659_c0: 1.0 }) MODEL.add_reactions([rxn00567_c0, rxn05801mod_c0, rxn39362_c0]) ###Output _____no_output_____ ###Markdown Add heterotrophic nitrification-aerobic denitrification in seperate reactions ###Code # Fix/add heterotrophic nitrification pathway >> adds seperate reactions (lumped below) seperate = False if seperate == True: # O2 + NH3 + ubiquinol-8 => H2O + H+ + Hydroxylamine + ubiquinone-8 # cpd00007 + cpd00013 + cpd15561 => cpd00001 + cpd00067 + cpd00165 + cpd15560 rxn14010mod_c0 = Reaction('rxn14010_c0') rxn14010mod_c0.name = 'ammonia,ubiquinol:oxygen oxidoreductase' rxn14010mod_c0.subsystem = 'heterotrophic nitrification' rxn14010mod_c0.lower_bound = 0.0 rxn14010mod_c0.upper_bound = 1000.0 cpd00007_c0 = MODEL.metabolites.get_by_id('cpd00007_c0') cpd00013_c0 = MODEL.metabolites.get_by_id('cpd00013_c0') cpd15561_c0 = MODEL.metabolites.get_by_id('cpd15561_c0') cpd00001_c0 = MODEL.metabolites.get_by_id('cpd00001_c0') cpd00067_c0 = MODEL.metabolites.get_by_id('cpd00067_c0') cpd00165_c0 = Metabolite( 'cpd00165_c0', formula='H3NO', name='Hydroxylamine_c0', compartment='c0') cpd15560_c0 = MODEL.metabolites.get_by_id('cpd15560_c0') rxn14010mod_c0.add_metabolites({ cpd00007_c0: -1.0, cpd00013_c0: -1.0, cpd15561_c0: -1.0, cpd00001_c0: 1.0, cpd00067_c0: 1.0, cpd00165_c0: 1.0, cpd15560_c0: 1.0 }) # Cytochrome c551+ as cpd18072_c0 (Cytochrome C-ox) & cytochrome c551 as cpd18074_c0 (Cytochrome C-rex) # H2O + (4) Cytochrome c551+ + Hydroxylamine <=> Nitrite + (4) Cytochrome c551 # cpd00001 + (4) cpd18072 + cpd00165 <=> cpd00075 + (4) cpd18074 rxn40053mod_c0 = Reaction('rxn40053mod_c0') rxn40053mod_c0.name = 'hydroxylamine:ferricytochrome-c oxidoreductase' rxn40053mod_c0.subsystem = 'heterotrophic nitrification' rxn40053mod_c0.lower_bound = -1000.0 rxn40053mod_c0.upper_bound = 1000.0 cpd18072_c0 = Metabolite( 'cpd18072_c0', formula='Unkown', name='Cytochrome 551+ or C-ox', compartment='c0') cpd00075_c0 = MODEL.metabolites.get_by_id('cpd00075_c0') cpd18074_c0 = Metabolite( 'cpd18074_c0', formula='Unkown', name='Cytochrome 551 or C-red', compartment='c0') rxn40053mod_c0.add_metabolites({ cpd00001_c0: -1.0, cpd18072_c0: -4.0, cpd00165_c0: -1.0, cpd00075_c0: 1.0, cpd18074_c0: 4.0 }) # H2O + NO + Cytochrome 551+ <=> H+ + Nitrite + Cytochrome 551 # cpd00001 + cpd00418 + cpd18072 <=> cpd00067 + cpd00075 + cpd18074 rxn14428mod_c0 = Reaction('rxn14428mod_c0') rxn14428mod_c0.name = 'Nitrite reductase cytochrome-c type' rxn14428mod_c0.subsystem = 'heterotrophic nitrification' rxn14428mod_c0.lower_bound = -1000.0 rxn14428mod_c0.upper_bound = 1000.0 cpd00418_c0 = MODEL.metabolites.get_by_id('cpd00418_c0') rxn14428mod_c0.add_metabolites({ cpd00001_c0: -1.0, cpd00418_c0: -1.0, cpd18072_c0: -1.0, cpd00067_c0: 1.0, cpd00075_c0: 1.0, cpd18074_c0: 1.0 }) # H2O + Nitrous oxide + (2) Cytochromes c551+ <=> (2) H+ + (2) NO + (2) Cytochrome c551 # cpd00001 + cpd00659 + (2) cpd18072 <=> (2) cpd00067 + (2) cpd00418 + (2) cpd18074 rxn20282mod_c0 = Reaction('rxn20282mod_c0') rxn20282mod_c0.name = 'nitrogen oxide reductase' rxn20282mod_c0.subsystem = 'heterotrophic nitrification' rxn20282mod_c0.lower_bound = -1000.0 rxn20282mod_c0.upper_bound = 1000.0 cpd00659_c0 = MODEL.metabolites.get_by_id('cpd00659_c0') rxn20282mod_c0.add_metabolites({ cpd00001_c0: -1.0, cpd00659_c0: -1.0, cpd18072_c0: -2.0, cpd00067_c0: 2.0, cpd00418_c0: 2.0, cpd18074_c0: 2.0 }) # H2O + N2 + (2) Cytochromes c551+ <= (2) H+ + Nitrous oxide + (2) Cytochromes c551 # cpd00001 + cpd00528 + (2) cpd18072 <= (2) cpd00067 + cpd00659 + (2) cpd18074 rxn22446mod_c0 = Reaction('rxn22446mod_c0') rxn22446mod_c0.name = 'nitrous-oxide reductase' rxn22446mod_c0.subsystem = 'heterotrophic nitrification' rxn22446mod_c0.lower_bound = -1000.0 rxn22446mod_c0.upper_bound = 0.0 cpd00528_c0 = MODEL.metabolites.get_by_id('cpd00528_c0') rxn22446mod_c0.add_metabolites({ cpd00001_c0: -1.0, cpd00528_c0: -1.0, cpd18072_c0: -2.0, cpd00067_c0: 2.0, cpd00659_c0: 1.0, cpd18074_c0: 2.0 }) # (0.5) O2 + (4) H+ + Cytochrome c551 <=> H2O + (2) H+[1] + Cytochrome c551+ # (0.5) cpd00007 + (4) cpd00067 + cpd18074 <=> cpd00001 + (4) cpd00067[1] + cpd18072 rxn10043mod_c0 = Reaction('rxn10043mod_c0') rxn10043mod_c0.name = 'cytochrome-c oxidase' rxn10043mod_c0.subsystem = 'heterotrophic nitrification' rxn10043mod_c0.lower_bound = -1000.0 rxn10043mod_c0.upper_bound = 1000.0 cpd00067_e0 = MODEL.metabolites.get_by_id('cpd00067_e0') rxn10043mod_c0.add_metabolites({ cpd00007_c0: -0.5, cpd00067_c0: -4.0, cpd18074_c0: -1.0, cpd00001_c0: 1.0, cpd00067_e0: 1.0, cpd18072_c0: 1.0 }) MODEL.add_reactions([rxn14010mod_c0, rxn40053mod_c0, rxn14428mod_c0, rxn20282mod_c0, rxn22446mod_c0, rxn10043mod_c0]) MODEL.repair() # Save the model with seperated (de)nitrification reactions seperateSave = False if seperateSave == True: cobra.io.write_sbml_model(MODEL, "Paracoccus_seperateNitrification_completeMedia.sbml") ###Output _____no_output_____ ###Markdown Add heterotrophic nitrification-aerobic denitrification in lumped reactions ###Code # Fix/add heterotrophic nitrification pathway >> adds lumped reactions lumped = False if lumped == True: # O2 + NH3 + ubiquinol-8 => H2O + H+ + Hydroxylamine + ubiquinone-8 # cpd00007 + cpd00013 + cpd15561 => cpd00001 + cpd00067 + cpd00165 + cpd15560 rxn14010mod_c0 = Reaction('rxn14010_c0') rxn14010mod_c0.name = 'ammonia,ubiquinol:oxygen oxidoreductase' rxn14010mod_c0.subsystem = 'heterotrophic nitrification' rxn14010mod_c0.lower_bound = 0.0 rxn14010mod_c0.upper_bound = 1000.0 cpd00007_c0 = MODEL.metabolites.get_by_id('cpd00007_c0') cpd00013_c0 = MODEL.metabolites.get_by_id('cpd00013_c0') cpd15561_c0 = MODEL.metabolites.get_by_id('cpd15561_c0') cpd00001_c0 = MODEL.metabolites.get_by_id('cpd00001_c0') cpd00067_c0 = MODEL.metabolites.get_by_id('cpd00067_c0') cpd00165_c0 = Metabolite( 'cpd00165_c0', formula='H3NO', name='Hydroxylamine_c0', compartment='c0') cpd15560_c0 = MODEL.metabolites.get_by_id('cpd15560_c0') rxn14010mod_c0.add_metabolites({ cpd00007_c0: -1.0, cpd00013_c0: -1.0, cpd15561_c0: -1.0, cpd00001_c0: 1.0, cpd00067_c0: 1.0, cpd00165_c0: 1.0, cpd15560_c0: 1.0 }) # Cytochrome c551+ as cpd18072_c0 (Cytochrome C-ox) & cytochrome c551 as cpd18074_c0 (Cytochrome C-rex) # H2O + (4) Cytochrome c551+ + Hydroxylamine <=> Nitrite + (4) Cytochrome c551 # cpd00001 + (4) cpd18072 + cpd00165 <=> cpd00075 + (4) cpd18074 rxn40053mod_c0 = Reaction('rxn40053mod_c0') rxn40053mod_c0.name = 'hydroxylamine:ferricytochrome-c oxidoreductase' rxn40053mod_c0.subsystem = 'heterotrophic nitrification' rxn40053mod_c0.lower_bound = -1000.0 rxn40053mod_c0.upper_bound = 1000.0 cpd18072_c0 = Metabolite( 'cpd18072_c0', formula='Unkown', name='Cytochrome 551+ or C-ox', compartment='c0') cpd00075_c0 = MODEL.metabolites.get_by_id('cpd00075_c0') cpd18074_c0 = Metabolite( 'cpd18074_c0', formula='Unkown', name='Cytochrome 551 or C-red', compartment='c0') rxn40053mod_c0.add_metabolites({ cpd00001_c0: -1.0, cpd18072_c0: -4.0, cpd00165_c0: -1.0, cpd00075_c0: 1.0, cpd18074_c0: 4.0 }) # (7) H+ + Nitrite + (0.5) O2 + (4) Cytochrome c551 => (3) H2O + (0.5) N2 + (2) H+[e] + (4) Cytochrome c551+ # (7) cpd00067 + cpd00075 + (0.5) cpd00007 + (4) cpd18074 => (3) cpd00001 + (0.5) cpd00528 + (2) cpd00067_e0 + (4) cpd18072 rxn10043lump_c0 = Reaction('rxn10043lump_c0') rxn10043lump_c0.name = 'cytochrome-c oxidase' rxn10043lump_c0.subsystem = 'heterotrophic nitrification' rxn10043lump_c0.lower_bound = 0.0 rxn10043lump_c0.upper_bound = 1000.0 cpd00528_c0 = MODEL.metabolites.get_by_id('cpd00528_c0') cpd00067_e0 = MODEL.metabolites.get_by_id('cpd00067_e0') rxn10043lump_c0.add_metabolites({ cpd00067_c0: -7, cpd00075_c0: -1.0, cpd00007_c0: -0.5, cpd18074_c0: -4.0, cpd00001_c0: 3.0, cpd00528_c0: 0.5, cpd00067_e0: 2.0, cpd18072_c0: 4.0 }) MODEL.add_reactions([rxn14010mod_c0, rxn40053mod_c0, rxn10043lump_c0]) MODEL.repair() # Save the adjusted model lumped = False if lumped == True: cobra.io.write_sbml_model(MODEL, "Paracoccus_lumpedNitrification_completeMedia.sbml") ###Output _____no_output_____
P(s)_curve_derivative.ipynb
###Markdown Plot the smoothed P(s) curve and its derivative (slope) ###Code # import core packages import warnings warnings.filterwarnings("ignore") from itertools import combinations # import semi-core packages import matplotlib.pyplot as plt from matplotlib import colors %matplotlib inline plt.style.use('seaborn-poster') import numpy as np import pandas as pd # import open2c libraries import bioframe import cooler import cooltools import cooltools.expected import seaborn as sns clr = cooler.Cooler('PATH/COOL_FILE.cool') # Load chromosome arms sizes arms = pd.read_table('PATH/ARMS.tab') arms.head() regs = bioframe.parse_regions(arms) regs.head() # cvd == contacts-vs-distance cvd = cooltools.expected.diagsum( clr=clr, regions=regs, transforms={'balanced': lambda p: p['count']*p['weight1']*p['weight2']} ) #cvd # Aggregate diagonals from different genomic regions together. # Since all three calcuated statistics are additive, they can be aggregated # easily via sum() function. cvd_agg = ( cvd .groupby('diag') .agg( {'n_valid':'sum', 'count.sum':'sum', 'balanced.sum':'sum', }) .reset_index() ) # Convert indices of diagonals into genomic separation, expressed in basepairs. cvd_agg['s_bp'] = ( cvd_agg['diag'] * clr.binsize) # Now we can calculate the average raw interaction counts and normalized contact frequencies. cvd_agg['count.avg'] = ( cvd_agg['count.sum'] / cvd_agg['n_valid'] ) cvd_agg['balanced.avg'] = ( cvd_agg['balanced.sum'] / cvd_agg['n_valid'] ) # Plot the P(s) curve f, ax = plt.subplots(1,1) ax.loglog( cvd_agg['s_bp'], cvd_agg['balanced.avg'], ) ax.set( xlabel='separation, bp', ylabel='IC contact frequency') ax.set_aspect(1.0) ax.grid(lw=0.5) binsize=clr.binsize # save to pdf #plt.savefig('{}kb_Ps_curve_arms.pdf'.format(binsize//1000), bbox_inches='tight') #Smooth the P(s) curve with logarithmic binning. # Logbin-expected aggregates P(s) curves per region over exponentially increasing distance bins. lb_cvd, lb_slopes, lb_distbins = cooltools.expected.logbin_expected(cvd) # The resulting table contains P(s) curves for each individual region. # Aggregating these curves into a single genome-wide curve is involving too, # so we created a separate function for this too. lb_cvd_agg, lb_slopes_agg = cooltools.expected.combine_binned_expected( lb_cvd, binned_exp_slope=lb_slopes ) lb_cvd_agg['s_bp'] = lb_cvd_agg['diag.avg'] * clr.binsize lb_slopes_agg['s_bp'] = lb_slopes_agg['diag.avg'] * clr.binsize # Plot the smoothed P(s) curve and its derivative f, axs = plt.subplots( figsize=(6.5,13), nrows=2, gridspec_kw={'height_ratios':[6,2]}, sharex=True) ax = axs[0] ax.loglog( lb_cvd_agg['s_bp'], lb_cvd_agg['balanced.avg'], 'o-', markersize=5, ) ax.set( ylabel='IC contact frequency', xlim=(1e3,1e8) ) ax.set_aspect(1.0) ax.grid(lw=0.5) ax = axs[1] ax.semilogx( lb_slopes_agg['s_bp'], lb_slopes_agg['slope'], alpha=0.5 ) ax.set( xlabel='separation, bp', ylabel='slope') ax.grid(lw=0.5) #plt.savefig('{}kb_Ps_smooth_curve_derivative_arms.pdf'.format(binsize//1000), bbox_inches='tight') ###Output _____no_output_____
Presentation - Optimization with Python P1.ipynb
###Markdown Today's Agenda: 1. Introduction and Python Basics 2. 3D Plots and Sets for Optimization 3. SciPy (Linear Programming) and Application (Drones) 4. Graph Theory (NetworkX) and Wrap-up Part 1: Introduction and Python Basics What is Python and Who cares?> Python is an interpreted, object-oriented, high-level programming language with dynamic semantics. *[Definition from Python.org](https://www.python.org/doc/essays/blurb/)It is a widely used tool employed to solve, visualize problems in many fields that include but not limited to:1. **Mathematics**2. **Engineering**3. Economics4. Medicine5. All other fields of ScienceMany also use it to create software tools, or run web applications.Relevant to today's presentation, we can use it in the following areas:1. Optimization2. Linear Algebra3. Graph Theory4. Data Processing and Visualization (of all the above areas)--- What is a Jupyter Notebook: It is an interactive computing environment that enables users to author notebook documents that include:1. Live code2. Interactive widgets3. Plots4. Narrative text5. Equations (LaTeX)6. Images7. Video See Reference for LaTeX: https://towardsdatascience.com/write-markdown-latex-in-the-jupyter-notebook-10985edb91fd Python Basics - Data Types and Numpy and Plots ###Code # This is a comment # This is Addition 1.0 + 2 # This is a variable a = 5 # This is how to view/print/output the variable print(a) # This is a string print("Hellow World!!") # This is a fancier way to print the variable print("A is: ", a, a+5) # Or simply print(a+5) a ###Output 10 ###Markdown ###Code # This is a tuple b = (3.0,4,5) print(b) z,y,x = b print(z, y, x) a, b = (1, 2) print(a, b) b, a = (a, b) print(a, b) # This is a list c = [5, 6, 90] d = ["apple", "hmm", "yup", "hmm"] print("\nList:\n", c) print(d) # This is a set e = {9, 1, 3, 4, 4, 5} f = {"qwerty", "hello", "a", "b", "qwerty", 9} print("\nSet: ") print(e) print(f) # This is a bool(boolean); TRUE/FALSE print(3 == 4, "\n") a = 5 b = 7 print(a < b, "\n") print(a > b, "\n") print(a-b >= -2, "\n") # Lets try doing some math. How to add vectors ?? A = [2, 3, 4, 5] B = [0, 1, 2, 3] print(A) A + B ###Output [2, 3, 4, 5] ###Markdown NumPyThe NumPy library is a popular Python library used for scientific computing applications, and is an acronym for "Numerical Python". NumPy's operations are divided into three main categories: Fourier Transform and Shape Manipulation, Mathematical and Logical Operations, and Linear Algebra and Random Number Generation. To make it as fast as possible, NumPy is written in C and Python. https://numpy.org/ ###Code # This is an import statement import numpy as np C = np.array([2, 3, 4, 5]) D = np.array([0, 1, 2, 3]) print(C) C # so lets add C+D # Other ops print(C - D) print(C * D) print(D / C) # Using numpy functions https://numpy.org/ # Add E = np.add(C, D) print(E) # inverse F = np.array([[1, 2],[3, 4]]) print("\nF:") print(F) np.linalg.inv(F) # Array creation (For more functions, see: https://numpy.org/doc/stable/reference/routines.html) #Identity matrix I = np.identity(7) print(I) ###Output [[1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 1.]] ###Markdown Plots (Matplotlib)Matplotlib graphs your data on Figures (i.e., windows, Jupyter widgets, etc.), each of which can contain one or more Axes (i.e., an area where points can be specified in terms of x-y coordinates (or theta-r in a polar plot, or x-y-z in a 3D plot, etc.). Reference: https://matplotlib.org/tutorials/introductory/usage.htmlsphx-glr-tutorials-introductory-usage-py Gallery: https://matplotlib.org/gallery/index.html?highlight=gallery ###Code #Array of numbers https://numpy.org/doc/stable/reference/generated/numpy.arange.html#numpy.arange Nums = np.arange(-50, 51) print(Nums) # Y = X^2: Use either numpy.square(X) or X**2 Y = Nums**2 print("\n\nY:") print(Y) import matplotlib.pyplot as plt #Library for plots plt.plot(Nums, Y) ###Output _____no_output_____ ###Markdown 3D Plots ###Code # Creating a 3D surface plot Z^2 = X^2 + Y^2 X = np.arange(0, 3) # X = [0 1 2] Y = np.arange(0, 3) # Y = [0 1 2] # Generate intersections of X, Y: See Output Grid = np.meshgrid(X, Y) Grid # Lets seperate those arrays XX, YY = Grid print(XX) print() print(YY) # Compute output ZZ = (XX)^2 + (YY)^2: Use either numpy.square or ** ZZ = XX**2 + YY**2 ZZ %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = Axes3D(fig) ax.plot_surface(XX, YY, ZZ) # More points X = np.arange(-10, 10, 0.1) # This is Input XX, YY = np.meshgrid(X, X) # ZZ = (XX)^2 + (YY)^2: Use either numpy.square or ** ZZ = XX**2 + YY**2 fig = plt.figure() ax = Axes3D(fig) ax.plot_surface(XX, YY, ZZ) # This Gives Output ###Output _____no_output_____ ###Markdown Function From Wikipedia **Function Syntax**From learnbyexample.org ###Code # Function Definition in Python def myAdder(X, Y): Z = X + Y return Z # Calling the function Sum = myAdder(np.array([2,3,4]), np.array([1,2,3])) Sum # Function Definition in Python def myFunPlotsin3D(X, Y): XX, YY = np.meshgrid(X, Y) # ZZ = (XX)^2 + (YY)^2: Use either numpy.square or ** ZZ = XX**2 + YY**2 fig = plt.figure() ax = Axes3D(fig) ax.plot_surface(XX, YY, ZZ) # This Gives Output return None # Calling the Function A = np.arange(-20, 30) B = np.arange(-100, 200) myFunPlotsin3D(A, B) ###Output _____no_output_____ ###Markdown ->>> Examples in HW3_3- -->>> [Link for HW3 (Examples)](https://mybinder.org/v2/gh/Sridhar701Pitt/OptimizationPresentationMATH/master?filepath=HW3_Updated.ipynb) 2. 3D Plots and SciPy (Linear Programming) IpyvolumeIPyvolume is a Python library to visualize 3d volumes and glyphs (e.g. 3d scatter plots), in the Jupyter notebook, with minimal configuration and effort. In contrast to matplotlib, it also supports widgets. Reference: https://ipyvolume.readthedocs.io/en/latest/ Examples: https://ipyvolume.readthedocs.io/en/latest/examples.html ###Code import ipyvolume as ipv #Imports a library called ipyvolume to help with plotting # More points - Now with ipyvolume X = np.arange(-10, 10, 0.1) # This is Input XX, YY = np.meshgrid(X, X) # ZZ = (XX)^2 + (YY)^2: Use either numpy.square or ** ZZ = XX**2 + YY**2 fig = ipv.figure() ipv.plot_surface(XX, YY, ZZ) # This Gives Output ipv.show() ###Output _____no_output_____ ###Markdown Sets for OptimizationLets take 4 points in 3D space: P1 = ( 1.0, 1.0, 1.0) P2 = ( 2.0,-1.0, 0.0) P3 = (-2.0,-1.5, 0.5) P4 = (-1.0, 0.0,-0.5)Heres a Plot: ###Code import ipyvolume as ipv #Imports a library called ipyvolume to help with plotting import numpy as np #Library import for array creation, and manipulation # 2D array with 4 rows (P1, P2, P3, P4) and 3 columns (X, Y, Z) coordinates Points = np.array([[ 1.0, 1.0, 1.0], [ 2.0,-1.0, 0.0], [-2.0,-1.5, 0.5], [-1.0, 0.0,-0.5]]) # Set = np.array([[ 1.0, 1.0, 1.0], [ 0.5, 1.0, 2.0], [ 2.0, 1.5, 0.5], [ 1.0, 2.0, 0.5]]) # All points in 1st quadrant Points XVals, YVals, ZVals = np.transpose(Points) fig = ipv.figure() # Plot the points ipv.scatter(XVals,YVals,ZVals, size=3, marker='sphere') # Add limits to axes ipv.xyzlim(-3, 3) # Plot Axes Planes - Run the function declaration first plotAxesPlanes() ipv.show() ###Output _____no_output_____ ###Markdown Affine, Conical, and Convex SetsTake the **Linear Combination**: $$ \sum_{i=1}^{4} a_{i} P_{i} $$For **Affine Combination**, we have: $$ \sum_{i=1}^{4} a_{i} = 1 $$ ###Code # Get 1000 random Values for each of a1, a2 and a3 between [-1 to 1) a1, a2, a3 = (10) * np.random.random_sample((3, 1000)) - 5 # Calculate a4 to satisfy the **affine** condition a4 = 1.0 - (a1 + a2 + a3) # Get A, B, C, D values from Points array P1, P2, P3, P4 = Points # Multiply to get the Linear combination with the affine constraints Term1 = np.multiply(a1[:,None],P1[None]) Term2 = np.multiply(a2[:,None],P2[None]) Term3 = np.multiply(a3[:,None],P3[None]) Term4 = np.multiply(a4[:,None],P4[None]) LC = Term1 + Term2 + Term3 + Term4 Ax, Ay, Az = np.transpose(LC) fig = ipv.figure() # Plot the points ipv.scatter(Ax,Ay,Az, size=2, color="#AD33FF", marker='sphere') ipv.scatter(XVals,YVals,ZVals, size=3, marker='sphere') # Add limits to axes ipv.xyzlim(-3, 3) # Plot axes surfaces #plotAxesPlanes() # Plot bounding box #plotBoundingBox(XVals,YVals,ZVals) ipv.show() ###Output _____no_output_____ ###Markdown For **Conical Combination**, we have: $$ Each\,\, a_{i} \geq 0 $$ ###Code # Get 1000 random Values for each of a1, a2, a3, and a4 between [0 to 1) # Also satisfies the concical condiditon a1, a2, a3, a4 = 1 * np.random.random_sample((4, 1000)) # Get A, B , C, D values from set array P1, P2, P3, P4 = Points # Multiply to get the Linear combination with the affine constraints Term1 = np.multiply(a1[:,None],P1[None]) Term2 = np.multiply(a2[:,None],P2[None]) Term3 = np.multiply(a3[:,None],P3[None]) Term4 = np.multiply(a4[:,None],P4[None]) LC = Term1 + Term2 + Term3 + Term4 Ax, Ay, Az = np.transpose(LC) fig = ipv.figure() # Plot the points ipv.scatter(Ax,Ay,Az, size=2, color="#AD33FF", marker='sphere') # Plot the Conical Linear Combination ipv.scatter(XVals,YVals,ZVals, size=3, marker='sphere') # Plot the set S # Add limits to axes ipv.xyzlim(-3, 3) # Plot axes surfaces # plotAxesPlanes() # Plot bounding box plotBoundingBox(XVals,YVals,ZVals) ipv.show() ###Output _____no_output_____ ###Markdown And for the **Convex Combination** we have both the constraints: $$ \sum_{i=1}^{4} a_{i} = 1 \, and \, a_{i} \geq 0 $$We will use[Dirichlet Distribution](https://numpy.org/doc/stable/reference/random/generated/numpy.random.dirichlet.html "Optional Title") to generate random numbers. The Dirichlet distribution is a distribution over vectors x that fulfill the conditions: $$ \sum_{i=1}^k x_i = 1\, and \, x_i>0$$ [Wikipedia Page](https://en.wikipedia.org/wiki/Dirichlet_distribution "Optional Title") ###Code # Get 1000 random dirichlet Values for each of a1, a2, a3, and a4 between [0 to 1) # Also satisfies the convex condiditon # Dirichlet Distribution: a1, a2, a3, a4 = 1 * np.random.dirichlet(np.ones(4), 1000).transpose() # Get A, B , C, D values from set array P1, P2, P3, P4 = Points # Multiply to get the Linear combination with the convex constraints Term1 = np.multiply(a1[:,None],P1[None]) Term2 = np.multiply(a2[:,None],P2[None]) Term3 = np.multiply(a3[:,None],P3[None]) Term4 = np.multiply(a4[:,None],P4[None]) LC = Term1 + Term2 + Term3 + Term4 Ax, Ay, Az = np.transpose(LC) fig = ipv.figure() # Plot the points ipv.scatter(Ax,Ay,Az, size=1, color="#AD33FF", marker='sphere') # Plot the Conical Linear Combination ipv.scatter(XVals,YVals,ZVals, size=2, marker='sphere') # Plot the set S # Add limits to axes ipv.xyzlim(-3, 3) # Plot axes surfaces # plotAxesPlanes() # Plot bounding box plotBoundingBox(XVals,YVals,ZVals) ipv.show() np.random.random_sample((4, 3)) np.random.dirichlet(np.ones(4), 5) # Some helper functions (functions that help!!!) def plotAxesPlanes(): #Surfaces of XY, YZ, ZX planes YZVertices = np.array([[ 0.0, 3.0, 3.0],[ 0.0,-3.0, 3.0],[ 0.0, 3.0,-3.0],[ 0.0,-3.0,-3.0]]) XZVertices = np.array([[ 3.0, 0.0, 3.0],[-3.0, 0.0, 3.0],[ 3.0, 0.0,-3.0],[-3.0, 0.0,-3.0]]) XYVertices = np.array([[ 3.0, 3.0, 0.0],[ 3.0,-3.0, 0.0],[-3.0, 3.0, 0.0],[-3.0,-3.0, 0.0]]) triangles = [(0,1,2),(1,2,3)] YZx, YZy, YZz = np.transpose(YZVertices) obj1 = ipv.plot_trisurf(YZx, YZy, YZz, triangles=triangles) obj1.color = [0., 0., 1., 0.5] obj1.material.transparent = True XZx, XZy, XZz = np.transpose(XZVertices) obj2 = ipv.plot_trisurf(XZx, XZy, XZz, triangles=triangles) obj2.color = [0., 1., 0., 0.5] obj2.material.transparent = True XYx, XYy, XYz = np.transpose(XYVertices) obj2 = ipv.plot_trisurf(XYx, XYy, XYz, triangles=triangles) obj2.color = [1., 0., 0., 0.5] obj2.material.transparent = True return None def plotBoundingBox(XVals, YVals, ZVals): # Bounding box triangles = [(0, 1, 2),(1, 2, 3),(0, 1, 3),(0, 2, 3)] obj0 = ipv.plot_trisurf(XVals, YVals, ZVals, triangles=triangles) obj0.color = [0., 1., 1., 0.7] obj0.material.transparent = True return None ###Output _____no_output_____ ###Markdown ->>> Examples in HW3_4+ -->>> [Link for HW3 Questions4+ (Examples)](https://mybinder.org/v2/gh/Sridhar701Pitt/OptimizationPresentationMATH/master?filepath=HW3_4%2B_Updated.ipynb) Visualizing Fundamental Theorem of Linear Programming in 3DLet's take the same 4 points to be the vertices of the convex region:P1 = ( 1.0, 1.0, 1.0) P2 = ( 2.0,-1.0, 0.0) P3 = (-2.0,-1.5, 0.5) P4 = (-1.0, 0.0,-0.5)Consider the function F(p) = 2*X + 5*Y - 3*Z : we wish to maximizeLets put that into a plot ###Code import numpy as np import ipyvolume as ipv import bqplot.scales # Get 1000 random dirichlet Values for each of a1, a2, a3, and a4 between [0 to 1) # Also satisfies the convex condiditon # Dirichlet Distribution: a1, a2, a3, a4 = 1 * np.random.dirichlet(np.ones(4), 10000).transpose() # Get A, B , C, D values from set array P1, P2, P3, P4 = Points # Multiply to get the Linear combination with the convex constraints Term1 = np.multiply(a1[:,None],P1[None]) Term2 = np.multiply(a2[:,None],P2[None]) Term3 = np.multiply(a3[:,None],P3[None]) Term4 = np.multiply(a4[:,None],P4[None]) LC = Term1 + Term2 + Term3 + Term4 Ax, Ay, Az = np.transpose(LC) # Compute the function F = 2*Ax + 5*Ay - 3*Az fig = ipv.figure() # Plot the points color_scale = bqplot.scales.ColorScale(min=min(F), max=max(F), colors=["#ff3", "#0f0", "#00f"]) ipv.scatter(Ax,Ay,Az, size=1,color = F, color_scale=color_scale, marker = 'sphere') ipv.scatter(XVals,YVals,ZVals, size=2, marker='sphere') # Plot the set S # Add limits to axes ipv.xyzlim(-3, 3) # Plot axes surfaces # plotAxesPlanes() # Plot bounding box #plotBoundingBox(XVals,YVals,ZVals) ipv.show() ###Output _____no_output_____
Transfer Learning/Transfer Learning_Bees vs Ants.ipynb
###Markdown **Description:** Transfer learning to classify bees and ants images by using AlexNet.---- ###Code import torch import torch.nn as nn import torch.optim as optim import numpy as np import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import time import os ###Output _____no_output_____ ###Markdown Data Acquistion ###Code #Get ants and bees data data_dir = 'hymenoptera_data' # download data from torch website, then unzip !if [ ! -d $data_dir ]; then wget https://download.pytorch.org/tutorial/hymenoptera_data.zip && unzip hymenoptera_data.zip; fi #Transform the data: resize and crop it, normalize it with given mean, sd #and convert it to a tensor data_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) #Initialize image datasets image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transform) for x in ['train', 'val']} print('Image datasets are:', image_datasets) #Combine the dataset. 4 images per batch to load and shuffle images at every epoch dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True) for x in ['train', 'val']} #What is the size of dataset dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} print('Size of the dataset is:', dataset_sizes) #Classes of the dataset class_names = image_datasets['train'].classes print('Class names are:', class_names) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("Running on device: {}".format(device)) #Let's visualize some training images #Get a batch of training data inputs, classes = next(iter(dataloaders['train'])) #Make a grid from batch out = torchvision.utils.make_grid(inputs) #Normalize the batch out = out.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) out = std * out + mean out = np.clip(out, 0, 1) #Plot fig, ax = plt.subplots(1, 1, figsize = (16,6)) ax.imshow(out) ###Output _____no_output_____ ###Markdown Use of Pre-trained AlexNet ###Code #Get AlexNet model model_conv = torchvision.models.alexnet(pretrained=True) print(model_conv) #Freeze all layers except the last one for param in model_conv.parameters(): param.requires_grad = False ###Output _____no_output_____ ###Markdown Now we will swap last layer with a new layer to use it for training of bees vs ants. ###Code # Number of features in the bottleneck layer num_ftrs = model_conv.classifier[6].in_features #Substitute it with a new fully connected layer with the same number of inputs but only 2 outputs #Since this bees and ants dataset is small and different from the ImageNet, we are using a linear classifier model_conv.classifier[6] = nn.Linear(num_ftrs, 2) # define cross entropy loss criterion = nn.CrossEntropyLoss() # define the optimizer. Note that we tell the optimizer to only update the # parameters in the last layer. Also, remember that we have set requires_grad=False # on all other parameters of the model, so the optimizer wouldn't have gradient # information on them if we asked it to update all the parameters. optimizer_conv = optim.SGD(model_conv.classifier[6].parameters(), lr=1e-4, momentum=0.9) ###Output _____no_output_____ ###Markdown We will try a larger learning rate of 1e-4 to prevent overfitting. Previously tried parameters was: 0.01, 0.1, 1e-5, 1e-6.This one looks like the one which gives the best. We also tried different momentums: 0.5, 0.6, 0.98. The best one is 0.9 ###Code # generic training function def train_model(model, criterion, optimizer, num_epochs=25): since = time.time() for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history only if in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) train_model(model_conv, criterion, optimizer_conv, num_epochs=25) ###Output Epoch 0/24 ---------- train Loss: 0.0066 Acc: 1.0000 val Loss: 0.2998 Acc: 0.9281 Epoch 1/24 ---------- train Loss: 0.0024 Acc: 1.0000 val Loss: 0.3000 Acc: 0.9346 Epoch 2/24 ---------- train Loss: 0.0028 Acc: 1.0000 val Loss: 0.3001 Acc: 0.9346 Epoch 3/24 ---------- train Loss: 0.0082 Acc: 0.9959 val Loss: 0.3007 Acc: 0.9281 Epoch 4/24 ---------- train Loss: 0.0038 Acc: 1.0000 val Loss: 0.3000 Acc: 0.9281 Epoch 5/24 ---------- train Loss: 0.0021 Acc: 1.0000 val Loss: 0.3003 Acc: 0.9281 Epoch 6/24 ---------- train Loss: 0.0057 Acc: 1.0000 val Loss: 0.3018 Acc: 0.9281 Epoch 7/24 ---------- train Loss: 0.0067 Acc: 0.9959 val Loss: 0.3000 Acc: 0.9281 Epoch 8/24 ---------- train Loss: 0.0054 Acc: 1.0000 val Loss: 0.3013 Acc: 0.9281 Epoch 9/24 ---------- train Loss: 0.0026 Acc: 1.0000 val Loss: 0.3019 Acc: 0.9281 Epoch 10/24 ---------- train Loss: 0.0022 Acc: 1.0000 val Loss: 0.3024 Acc: 0.9346 Epoch 11/24 ---------- train Loss: 0.0029 Acc: 1.0000 val Loss: 0.3014 Acc: 0.9346 Epoch 12/24 ---------- train Loss: 0.0018 Acc: 1.0000 val Loss: 0.3014 Acc: 0.9281 Epoch 13/24 ---------- train Loss: 0.0029 Acc: 1.0000 val Loss: 0.3019 Acc: 0.9281 Epoch 14/24 ---------- train Loss: 0.0050 Acc: 1.0000 val Loss: 0.2977 Acc: 0.9281 Epoch 15/24 ---------- train Loss: 0.0017 Acc: 1.0000 val Loss: 0.2975 Acc: 0.9281 Epoch 16/24 ---------- train Loss: 0.0041 Acc: 1.0000 val Loss: 0.2989 Acc: 0.9281 Epoch 17/24 ---------- train Loss: 0.0052 Acc: 1.0000 val Loss: 0.2983 Acc: 0.9281 Epoch 18/24 ---------- train Loss: 0.0032 Acc: 1.0000 val Loss: 0.2981 Acc: 0.9281 Epoch 19/24 ---------- train Loss: 0.0029 Acc: 1.0000 val Loss: 0.2984 Acc: 0.9281 Epoch 20/24 ---------- train Loss: 0.0033 Acc: 1.0000 val Loss: 0.2973 Acc: 0.9281 Epoch 21/24 ---------- train Loss: 0.0035 Acc: 1.0000 val Loss: 0.2976 Acc: 0.9281 Epoch 22/24 ---------- train Loss: 0.0032 Acc: 1.0000 val Loss: 0.2982 Acc: 0.9281 Epoch 23/24 ---------- train Loss: 0.0022 Acc: 1.0000 val Loss: 0.2986 Acc: 0.9281 Epoch 24/24 ---------- train Loss: 0.0023 Acc: 1.0000 val Loss: 0.2987 Acc: 0.9281 Training complete in 8m 11s