path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
docs/source/tutorials/echo_data.ipynb
###Markdown echo_dataecho_data is a data plugin that echoes the data passed into it. It is useful in grouped_tasks debugging. Example ###Code from nornir import InitNornir from nornir.core.filter import F from nornir_utils.plugins.tasks.data import echo_data from nornir_utils.plugins.functions import print_result nr = InitNornir( inventory={ "plugin": "SimpleInventory", "options": {"host_file": "data/hosts.yaml", "group_file": "data/groups.yaml"}, } ) nr = nr.filter(~F(name="dev5.no_group")) def grouped_task(task): task.run(task=echo_data, name=task.host.name, role=task.host["role"]) r = nr.run(task=grouped_task) print_result(r) ###Output grouped_task******************************************************************** * dev1.group_1 ** changed : False ********************************************** vvvv grouped_task ** changed : False vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO ---- dev1.group_1 ** changed : False ------------------------------------------- INFO {'role': 'www'} ^^^^ END grouped_task ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * dev2.group_1 ** changed : False ********************************************** vvvv grouped_task ** changed : False vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO ---- dev2.group_1 ** changed : False ------------------------------------------- INFO {'role': 'db'} ^^^^ END grouped_task ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * dev3.group_2 ** changed : False ********************************************** vvvv grouped_task ** changed : False vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO ---- dev3.group_2 ** changed : False ------------------------------------------- INFO {'role': 'www'} ^^^^ END grouped_task ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * dev4.group_2 ** changed : False ********************************************** vvvv grouped_task ** changed : False vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv INFO ---- dev4.group_2 ** changed : False ------------------------------------------- INFO {'role': 'db'} ^^^^ END grouped_task ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 
3 - Identify Opinions.ipynb
###Markdown Identifying type of opinions in spanish wikipedia discussions In this analysis, we are going to automatically identify the kind of opinion of authors in the discussions on talk pages of spanish wikipedia. ###Code from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.svm import LinearSVC from sklearn.naive_bayes import MultinomialNB from sklearn.dummy import DummyClassifier from sklearn.pipeline import Pipeline from sklearn.model_selection import StratifiedShuffleSplit from wdds_tokenizer import tokenize import wdds_tokenizer import pandas as pd import numpy as np import os import gc import matplotlib import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline #matplotlib.style.use('seaborn-ticks') matplotlib.rcParams['font.size'] = 14 matplotlib.rcParams['ytick.labelsize'] = 14 matplotlib.rcParams['xtick.labelsize'] = 14 matplotlib.rcParams['axes.labelsize'] = 14 matplotlib.rcParams['axes.titlesize'] = 18 sns.set_style('ticks') ###Output _____no_output_____ ###Markdown Load our labelled dataset, containing the opinions in the talk pages of wikipedia segmented by sentences assuming normal punctuation. This dataset includes the initial 1000 edits of talk pages of political leaders in America. ###Code ds = pd.read_csv('data/wdds.csv') ds.shape ds = ds[~ds.type.isnull()] ds = ds[ds.subtype!='INVALID'] ds = ds[ds.subtype!='SIGN'] ds = ds[ds.subtype!='OLAN'] ds['target'] = ds['type'] ds['opinion'] = ds['clean_opinion'] ds.shape sss = StratifiedShuffleSplit(n_splits=1, test_size=0.35, random_state=0) sss.get_n_splits(ds.opinion, ds.target) for train_index, test_index in sss.split(ds.opinion, ds.target): X_train, X_test = ds.iloc[train_index].opinion, ds.iloc[test_index].opinion y_train, y_test = ds.iloc[train_index].target, ds.iloc[test_index].target print(f'train size: {X_train.shape[0]}') print(f'test size: {X_test.shape[0]}') text_vectorizer = [ ('vect', CountVectorizer(strip_accents='ascii', min_df=3, max_df=0.8, stop_words=wdds_tokenizer.stopset, tokenizer=tokenize)), ('tfidf', TfidfTransformer(use_idf=True, sublinear_tf=True)) ] multinb_clf = Pipeline(text_vectorizer+[('clf', MultinomialNB())]) lsvc_clf = Pipeline(text_vectorizer+[('clf', LinearSVC())]) mf_clf = Pipeline(text_vectorizer+[('clf', DummyClassifier(strategy='most_frequent', random_state=0))]) uniform_clf = Pipeline(text_vectorizer+[('clf', DummyClassifier(strategy='uniform', random_state=0))]) strat_clf = Pipeline(text_vectorizer+[('clf', DummyClassifier(strategy='stratified', random_state=0))]) use_stemmer = True ###Output _____no_output_____ ###Markdown Evaluation of the performance on the test set ###Code from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV summary_scores = ['CI (95\%)', ''] uniform_clf.fit(X_train, y_train) scores = cross_val_score(uniform_clf, X_train, y_train) confidence_interval = scores.std() * 2 print("RND Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), confidence_interval)) summary_scores.extend([confidence_interval, '', '']) #parameters = {'clf__C':[0.1, 1, 10]} parameters = {'clf__C':[1]} parameters['clf__loss']=('hinge','squared_hinge') parameters['clf__multi_class']= ('ovr', 'crammer_singer') parameters['clf__class_weight'] = (None, 'balanced') parameters['clf__tol'] = [1e-3, 1e-4] lsvc_cv = GridSearchCV(lsvc_clf, parameters) lsvc_cv.fit(X_train, y_train) scores = cross_val_score(lsvc_cv, X_train, y_train) confidence_interval = scores.std() * 2 print(f"LSVC best score: {lsvc_cv.best_score_}") print(f"LSVC best params: {lsvc_cv.best_params_}") print("LSVC Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), confidence_interval)) summary_scores.extend([confidence_interval, '','']) parameters = {'clf__alpha':[ 0.1, 1.0, 10.0]} parameters['clf__fit_prior']= [True, False] multinb_cv = GridSearchCV(multinb_clf, parameters) multinb_cv.fit(X_train, y_train) scores = cross_val_score(multinb_cv, X_train, y_train) confidence_interval = scores.std() * 2 print(f"MNB best score: {multinb_cv.best_score_}") print(f"MNB best params: {multinb_cv.best_params_}") print("MNB Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), confidence_interval)) summary_scores.extend([confidence_interval, '','']) from sklearn.metrics import precision_recall_fscore_support docs_test = X_test labels = y_test.unique() labels.sort() rds = pd.DataFrame({'Label': labels}) macro_results = ['Macro', len(y_test)] predicted = uniform_clf.predict(docs_test) results = precision_recall_fscore_support(y_test, predicted) macro_results.extend(precision_recall_fscore_support(y_test, predicted, average='macro')[:3]) rds['Support'] = results[3] rds['BL-P'] = results[0] rds['BL-R'] = results[1] rds['BL-F1'] = results[2] predicted = lsvc_cv.predict(docs_test) results = precision_recall_fscore_support(y_test, predicted) macro_results.extend(precision_recall_fscore_support(y_test, predicted, average='macro')[:3]) rds['LSVC-P'] = results[0] rds['LSVC-R'] = results[1] rds['LSVC-F1'] = results[2] predicted = multinb_cv.predict(docs_test) results = precision_recall_fscore_support(y_test, predicted) macro_results.extend(precision_recall_fscore_support(y_test, predicted, average='macro')[:3]) rds['MNB-P'] = results[0] rds['MNB-R'] = results[1] rds['MNB-F1'] = results[2] rds.loc[len(rds)]=macro_results rds.loc[len(rds)]=summary_scores rds.to_csv('output/classif_report.csv', index=False) rds from sklearn.metrics import confusion_matrix predicted = lsvc_cv.predict(docs_test) confusion_matrix(y_test, predicted) ###Output _____no_output_____ ###Markdown model usage ###Code sample = X_test[:2] labels = y_test[:2] docs_new = sample predicted = lsvc_cv.predict(docs_new) for doc, label, pred in zip(docs_new, labels, predicted): print('%r => %s, %s' % (doc, label, pred)) ###Output 'y existen páginas específicas, foros por ejemplo, para exponer las opiniones sin más.' => ARGUMENTATIVE, PERFORMATIVE 'realmente no coincido contigo en tu argumento, pues creo que en aquel caso los motivos eran también evidentes, pero no vamos a retomar esa discusión.' => ARGUMENTATIVE, ARGUMENTATIVE
mlt-materials-editions-2.0/05-digging-deeper-into-turi/projects/starter/notebook/.ipynb_checkpoints/DiggingDeeper-checkpoint.ipynb
###Markdown Transfer learning with SqueezeNetThese cells do the essential tasks from the previous chapter, to load training and testing data, add the labels, then train and evaluate the model. The only difference is using SqueezeNet instead of VisionFeaturePrint_Screen as the base model for transfer learning.There's an option to load the pre-trained model. ###Code import turicreate as tc import matplotlib.pyplot as plt train_data = tc.image_analysis.load_images("snacks/train", with_path=True) len(train_data) test_data = tc.image_analysis.load_images("snacks/test", with_path=True) len(test_data) import os train_data["label"] = train_data["path"].apply(lambda path: os.path.basename(os.path.split(path)[0])) test_data["label"] = test_data["path"].apply(lambda path: os.path.basename(os.path.split(path)[0])) train_data["label"].value_counts().print_rows(num_rows=20) test_data["label"].value_counts().print_rows(num_rows=20) ###Output +------------+-------+ | value | count | +------------+-------+ | pineapple | 260 | | hot dog | 250 | | grape | 250 | | apple | 250 | | juice | 250 | | ice cream | 250 | | banana | 250 | | watermelon | 250 | | salad | 250 | | muffin | 250 | | doughnut | 250 | | waffle | 250 | | cookie | 249 | | carrot | 249 | | cake | 249 | | strawberry | 249 | | candy | 249 | | orange | 249 | | popcorn | 180 | | pretzel | 154 | +------------+-------+ [20 rows x 2 columns] +------------+-------+ | value | count | +------------+-------+ | doughnut | 50 | | hot dog | 50 | | grape | 50 | | apple | 50 | | juice | 50 | | ice cream | 50 | | banana | 50 | | watermelon | 50 | | salad | 50 | | candy | 50 | | orange | 50 | | cake | 50 | | cookie | 50 | | carrot | 50 | | waffle | 50 | | strawberry | 49 | | muffin | 48 | | popcorn | 40 | | pineapple | 40 | | pretzel | 25 | +------------+-------+ [20 rows x 2 columns] ###Markdown Run the next cell to train the model.**OR**Run the cell **after** the next cell, to load the model from the current folder. ###Code # Train the image classifier model = tc.image_classifier.create(train_data, target="label", model="squeezenet_v1.1", verbose=True, max_iterations=100) # OR load HealthySnacks.model from current folder model = tc.load_model("HealthySnacks.model") model.classifier ###Output _____no_output_____ ###Markdown Continue with trained or loaded model. ###Code metrics = model.evaluate(test_data) print("Accuracy: ", metrics["accuracy"]) print("Precision: ", metrics["precision"]) print("Recall: ", metrics["recall"]) ###Output _____no_output_____ ###Markdown Getting individual predictions ###Code model.predict(test_data) plt.imshow(test_data[1]["image"].pixel_data) output = model.classify(test_data) output imgs_with_pred = test_data.add_columns(output) imgs_with_pred.explore() imgs_filtered = imgs_with_pred[(imgs_with_pred["probability"] > 0.9) & (imgs_with_pred["label"] != imgs_with_pred["class"] )] imgs_filtered.explore() ###Output _____no_output_____ ###Markdown Sorting the prediction probabilities ###Code predictions = model.predict(test_data, output_type='probability_vector') print("Probabilities for 2nd image", predictions[1]) labels = test_data["label"].unique().sort() preds = tc.SArray(predictions[1]) tc.SFrame({'preds': preds, 'labels': labels}).sort([('preds', False)]) ###Output _____no_output_____ ###Markdown Increasing max iterations Run the next cell to load the model from the current folder.**OR**Run the two cells **after** the next cell, to train and save the model. ###Code # Load HealthySnacks_200.model from the current folder model200 = tc.load_model("HealthySnacks_200.model") # Train the image classifier model200 = tc.image_classifier.create(train_data, target="label", model="squeezenet_v1.1", verbose=True, max_iterations=200) model200.save("HealthySnacks_200.model") ###Output _____no_output_____ ###Markdown Continue with trained or loaded model. ###Code metrics200 = model200.evaluate(test_data) print("Accuracy: ", metrics200["accuracy"]) print("Precision: ", metrics200["precision"]) print("Recall: ", metrics200["recall"]) ###Output _____no_output_____ ###Markdown Confusing apples with oranges? ###Code import numpy as np import seaborn as sns def compute_confusion_matrix(metrics, labels): num_labels = len(labels) label_to_index = {l:i for i,l in enumerate(labels)} conf = np.zeros((num_labels, num_labels), dtype=np.int) for row in metrics["confusion_matrix"]: true_label = label_to_index[row["target_label"]] pred_label = label_to_index[row["predicted_label"]] conf[true_label, pred_label] = row["count"] return conf def plot_confusion_matrix(conf, labels, figsize=(8, 8)): fig = plt.figure(figsize=figsize) heatmap = sns.heatmap(conf, annot=True, fmt="d") heatmap.xaxis.set_ticklabels(labels, rotation=45, ha="right", fontsize=12) heatmap.yaxis.set_ticklabels(labels, rotation=0, ha="right", fontsize=12) plt.xlabel("Predicted label", fontsize=12) plt.ylabel("True label", fontsize=12) plt.show() conf = compute_confusion_matrix(metrics200, labels) plot_confusion_matrix(conf, labels, figsize=(16, 16)) ###Output _____no_output_____ ###Markdown Computing recall for each class ###Code for i, label in enumerate(labels): correct = conf[i, i] images_per_class = conf[i].sum() print("%10s %.1f%%" % (label, 100. * correct/images_per_class)) ###Output apple 64.0% banana 68.0% cake 54.0% candy 58.0% carrot 66.0% cookie 56.0% doughnut 62.0% grape 84.0% hot dog 76.0% ice cream 44.0% juice 74.0% muffin 50.0% orange 74.0% pineapple 67.5% popcorn 62.5% pretzel 56.0% salad 72.0% strawberry 67.3% waffle 62.0% watermelon 64.0% ###Markdown Wrangling Turi Create code Using a fixed validation dataset ###Code val_data = tc.image_analysis.load_images("snacks/val", with_path=True) val_data["label"] = val_data["path"].apply(lambda path: os.path.basename(os.path.split(path)[0])) len(val_data) ###Output _____no_output_____ ###Markdown Saving the extracted features ###Code from turicreate.toolkits import _pre_trained_models from turicreate.toolkits import _image_feature_extractor ptModel = _pre_trained_models.MODELS["squeezenet_v1.1"]() feature_extractor = _image_feature_extractor.MXFeatureExtractor(ptModel) train_features = feature_extractor.extract_features(train_data, "image", verbose=True) extracted_train_features = tc.SFrame({ "label": train_data["label"], '__image_features__': train_features, }) extracted_train_features.save("extracted_train_features.sframe") ###Output _____no_output_____ ###Markdown When you open this notebook again, run the next cell to reload saved features. ###Code # Run this the next time you open this notebook extracted_train_features = tc.SFrame("extracted_train_features.sframe") extracted_val_features = tc.SFrame("extracted_val_features.sframe") ###Output _____no_output_____ ###Markdown Continue working with extracted features. ###Code extracted_train_features.head() extracted_train_features[0]["__image_features__"] val_features = feature_extractor.extract_features(val_data, "image", verbose=True) extracted_val_features = tc.SFrame({ "label": val_data["label"], '__image_features__': val_features, }) extracted_val_features.save("extracted_val_features.sframe") ###Output _____no_output_____ ###Markdown Training the classifier with regularization ###Code lr_model = tc.logistic_classifier.create(extracted_train_features, features=['__image_features__'], target="label", validation_set=extracted_val_features, max_iterations=200, seed=None, verbose=True, l2_penalty=10.0, l1_penalty=0.0, convergence_threshold=1e-8) ###Output _____no_output_____ ###Markdown Saving the model ###Code from turicreate.toolkits.image_classifier import ImageClassifier state = { 'classifier': lr_model, 'model': ptModel.name, 'max_iterations': lr_model.max_iterations, 'feature_extractor': feature_extractor, 'input_image_shape': ptModel.input_image_shape, 'target': lr_model.target, 'feature': "image", 'num_features': 1, 'num_classes': lr_model.num_classes, 'classes': lr_model.classes, 'num_examples': lr_model.num_examples, 'training_time': lr_model.training_time, 'training_loss': lr_model.training_loss, } model = ImageClassifier(state) model.save("HealthySnacks_regularized.model") model.export_coreml("HealthySnacks_regularized.mlmodel") model model.classifier ###Output _____no_output_____ ###Markdown Reload the no-regularization model to compare the coefficients. ###Code no_reg_model = tc.load_model("HealthySnacks.model") no_reg_model.classifier ###Output _____no_output_____
English/2_mass_balance.ipynb
###Markdown Mass balance We have already used a mass balance model to add ice to our glacier in [notebook 1](1_glacier_bed_slope.ipynb). In this notebook, we will study mass balance in more detail. We will learn how to describe the climate that the glacier experiences in the model, and we will see how the climate determines how much water is stored as glacier ice. First, we import all the necessary modules: ###Code # The commands below are just importing the necessary modules and functions # Plot defaults %matplotlib inline import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (9, 6) # Default plot size # Scientific packages import numpy as np import pandas as pd # Constants from oggm import cfg cfg.initialize() # OGGM models from oggm.core.massbalance import LinearMassBalance from oggm.core.flowline import FluxBasedModel, RectangularBedFlowline # There are several solvers in OGGM core. We use the default one for this experiment from functools import partial FlowlineModel = partial(FluxBasedModel, min_dt=0, cfl_number=0.01) # OGGM Edu helper functions import oggm_edu as edu import CdeC as cdec ###Output _____no_output_____ ###Markdown We follow similar steps to what we did in [glacier_bed_slope](1_glacier_bed_slope.ipynb) to set up our simple model. ###Code # define horizontal resolution of the model: # nx: number of grid points # map_dx: grid point spacing in meters nx = 200 map_dx = 100 # define glacier top and bottom altitudes in meters (instead of slope in the earlier notebook) top = 3400 bottom = 1400 # This is the bed rock, linearily decreasing from top altitude to bottom altitude, in nx steps bed_h = np.linspace(top, bottom, nx) # At the begining, there is no glacier so our glacier surface is at the bed altitude surface_h = bed_h # calculate the corresponding distance along the glacier (from the top) distance_along_glacier = np.linspace(0,nx, nx) *0.1 # in km # Define the glacier width as we did in flowline_model initial_width = 300 #width in meters # Now describe the widths in "grid points" for the model, based on grid point spacing map_dx widths = np.zeros(nx) + initial_width/map_dx # Define our bed init_flowline = RectangularBedFlowline(surface_h=surface_h, bed_h=bed_h, widths=widths, map_dx=map_dx) ###Output _____no_output_____ ###Markdown Changing the equilibrium line altitudeOver most of the glacier's surface, mass is being added or lost. We define the equilibrium line where the processes of accumulation (ex.: snow) and ablation (ex.: melting) are balanced, so that the surface is in "equilibrium." This is the same diagram we saw earlier, now with the Equilibrium Line Altitude ("ELA") marked in red.![ELA-cartoon](https://raw.githubusercontent.com/OGGM/glacier-graphics/master/glacier_intro/thumbnails/glacier_07.png "ELA diagram, Anne Maussion, Atelier les grox yeux") Discussion: How does the climate influence the equilibrium line elevation?- When it's hotter, the ELA will ...- When it's cold, the ELA will ...- When there is more snow, the ELA will ...- When there is a drought, the ELA will ...**First, think about these alone. Afterwards, discuss with a partner.** Now we are going to test three different ELAs to see how our glacier changes. Based on our discussion, which ELA do you think will correspond to the largest glacier? ###Code # Define the ELAs we want to compare: (we worked with ELA=3000 m in the previous two notebooks) # We will calculate models with the ELAs: 2700, 3000 and 3200 mm/m. These numbers can be found for real glaciers. ELAs = [2700, 3000, 3200] # Define the time period for which the glacier should be calculated: years = 600 # In the following lists intermediate steps of the models will be saved mb_models = [] annual_mbs = [] # here the "final" models will be saved models_ELA = [] # Colors and styles for the graphs colors = ['C1', 'C3', 'C5'] linestyles = ['--', '-.', ':'] # altitude gradient altgrad = 4 for ELA in ELAs: # Calculation of the mass balance models for each gradient mb_model = LinearMassBalance(ELA, grad=altgrad) mb_models.append(mb_model) # Calculation of the annual mass balance along the glacier profile annual_mbs.append(mb_model.get_annual_mb(surface_h) * cfg.SEC_IN_YEAR) # The models require the initial glacier bed, a mass balance model, and an initial time (the year y0) model = FlowlineModel(init_flowline, mb_model=mb_model, y0=0.) # First, look at the state of the glacier models after the chosen amount of years model.run_until(years) # Store for later analyses models_ELA.append(model) # Plot the results plt.figure(figsize=[14,6]) plt.subplot(121) # Annual mass balance for color, ELA, annual_mb in zip(colors, ELAs, annual_mbs): plt.plot(annual_mb, bed_h, color=color, label='Mass balance, ELA='+ str(ELA)) # Add ELA, where mass balance = 0: # plt.axhline(y=ELA, color='k', linestyle='--', linewidth=0.8, label='Equilibrium line altitude') plt.axvline(x=0, color='k', linestyle='--', linewidth=0.8) plt.xlabel('Annual mass balance (m yr-1)') plt.ylabel('Altitude (m)') plt.legend(loc='best'); plt.subplot(122) # Plot the initial conditions first: plt.plot(distance_along_glacier, init_flowline.surface_h, label='Initial glacier') # Get the modelled flowline (model.fls[-1]) and plot its new surface for ELA, color, linestyle, model in zip(ELAs, colors, linestyles, models_ELA): plt.plot(distance_along_glacier, model.fls[-1].surface_h, label='Glacier after {} years, ELA={}'.format(model.yr, ELA), color=color) # Add ELA: plt.axhline(y=ELA, color='k', linestyle=linestyle, linewidth=0.8, label='Equilibrium line altitude') # Add the bedrock: edu.plot_xz_bed(distance_along_glacier, bed_h) ###Output _____no_output_____ ###Markdown **First, think for yourself. Afterwards, discuss with a partner and in class:**- **What do these two charts teach us? What is on the x axis? y axis?**- **In the right graph, the three glaciers are very different. What does the position of the ELA for each glacier teach us? What characteristic is the same for each glacier?** Changing the mass balance gradient (MBG)The mass balance gradient (MBG, or `grad` in the cell below) is what it says: the change in the mass balance of the glacier with altitude. Let's see an example: ###Code grad = 4 # mm/m mb_ex = LinearMassBalance(3000, grad=grad) annual_mb = mb_ex.get_annual_mb(surface_h) * cfg.SEC_IN_YEAR # Plot the mass balance with altitude fig, ax = plt.subplots(1) ax.plot(annual_mb, bed_h, color='b', label='Mass balance, grad='+ str(grad)) ax.axhline(y=3000, color='k', linestyle='--', linewidth=0.8, label='Equilibrium line altitude') ax.axvline(x=0, color='k', linestyle='--', linewidth=0.8) ax.set(xlim=(-7.5, 2.5), xlabel='Annual mass balance (m/yr)', ylabel = 'Altitude (m)') ax.legend(loc='best'); ###Output _____no_output_____ ###Markdown What happens if we change the value of `grad`? Try it in the cell above. The MBG is not as easy to interpret as the ELA. Here are several examples of MBG measured in glaciers around the world:![MBG](https://slideplayer.com/slide/14020519/86/images/11/Glaciology+%E2%80%93+Mass+Balance+Profiles.jpg "https://slideplayer.com/slide/14020519/") Discussion: How does climate influence the mass balance gradient with altitude?- When it's hotter, the MBG will ...- When it's cold, the MBG will ...- When there is more snow, the MBG will ...- When there is a drought, the MBG will ... Let's compare some glaciers with different MBGs: ###Code # Define the MBGs we want to compare: (we worked with grad=4mm/m in the # glacier flowline modelling notebook) # We will calculate models with the MBGs: 0.3, 4 and 15 mm/m. These numbers can be found for real glaciers. grads = [0.3, 4, 15] # Define the time period for which the glacier should be calculated: years = 300 # In the following lists intermediate steps of the models will be saved mb_models = [] annual_mb = [] # here the "final" models will be saved models_MBG = [] # Colors for the graphs colors = ['C1', 'C3', 'C5'] # equilibrium line altitude (ELA) ELA = 3000 for grad in grads: # Calculation of the mass balance models for each gradient a = LinearMassBalance(ELA, grad=grad) mb_models.append(a) # Calculation of the annual mass balance along the glacier profile annual_mb.append(a.get_annual_mb(surface_h) * cfg.SEC_IN_YEAR) # The models require the initial glacier bed, a mass balance model, and an initial time (the year y0) model = FlowlineModel(init_flowline, mb_model=a, y0=0.) # First, look at the state of the glacier models after the chosen amount of years model.run_until(years) # Store the models for later analyses models_MBG.append(model) # Plot the results plt.figure(figsize=[14,6]) plt.subplot(121) # Annual mass balance for color, grad, ann_mb in zip(colors, grads, annual_mb): plt.plot(ann_mb, bed_h, color=color, label='Mass balance, grad='+ str(grad)) # Add ELA, where mass balance = 0: plt.axhline(y=ELA, color='k', linestyle='--', linewidth=0.8, label='Equilibrium line altitude') plt.axvline(x=0, color='k', linestyle='--', linewidth=0.8) plt.xlabel('Annual mass balance (m yr-1)') plt.ylabel('Altitude (m)') plt.legend(loc='best'); plt.subplot(122) # Plot the initial conditions first: plt.plot(distance_along_glacier, init_flowline.surface_h, label='Initial glacier') # Get the modelled flowline (model.fls[-1]) and plot its new surface for color, grad, model in zip(colors, grads, models_MBG): plt.plot(distance_along_glacier, model.fls[-1].surface_h, label='Glacier after {} years, '.format(model.yr) + 'grad=' + str(grad), color=color) # Add ELA: plt.axhline(y=ELA, color='k', linestyle='--', linewidth=0.8, label='Equilibrium line altitude') # Add the bedrock: edu.plot_xz_bed(distance_along_glacier, bed_h) ###Output _____no_output_____ ###Markdown Compare the graphs on the left and right.The model with the highest gradient (purple line) experiences the greatest amount of ablation *and* the greatest accumulation. Therefore, it shows the longest and thickest glacier after the given number of model years. With all models calculated for the same time interval, we can conclude that a high mass balance gradient leads to faster glacier growth.**What do you think: what kind of climate produces high mass balance gradients?**You can read more about the connection between MBG and climate in the references below. Water storage So, we have seen that the climate influences the shape of our glacier. In turn, the shape of the glacier determines the amount of water it can store. How much water is stored in the glaciers we made with different ELAs? We make a table. ###Code # Create lists with different properties of the glacier models length = [] vol_ice = [] vol_water = [] # Add a value to the lists for each model for model in models_ELA: length.append(model.length_m) vol_ice.append(model.volume_km3) vol_water.append(cdec.ice_to_freshwater(model.volume_km3)) # Create a table with the properties of the glacier models dic = {'ELA': ELAs, 'length': length, 'volume': vol_ice, 'equivalent water': vol_water} table = pd.DataFrame.from_dict(dic) table # ask Jupyter to show us the result ###Output _____no_output_____
Final (3).ipynb
###Markdown Analyzing Global COVID-19 Data in Python COVID-19 Background COVID-19 is an infectious disease that is believed to have started in Wuhan City, China in late 2019 and spread throughout the world, creating a worldwide pandemic in early 2020. COVID-19 symptoms can range from mild (or no symptoms) to severe illness. You can become infected by coming into close contact (about 6 feet or two arm lengths) with a person who has COVID-19. COVID-19 is primarily spread from person to person. Data Source: The data is downloaded from European Centre for Disease Prevention and Control. ###Code %matplotlib inline import pandas ###Output _____no_output_____ ###Markdown Importing Data from S3 ###Code df = pandas.read_excel('s3://mays-ia241/COVID-19-geographic-disbtribution-worldwide_April_10_2020.xlsx') df[:10] #the top 10 rows ###Output _____no_output_____ ###Markdown Analysis 1. Sum Of Deaths Per Day Globally ###Code sum_death_by_date = df.groupby('dateRep').sum()['deaths'] sum_death_by_date.plot() ###Output _____no_output_____ ###Markdown This chart allows us to see that over the months, death rates have ultimatley risen. You can also see that from January to March, death rates were consitent until they sharply rose mid-March and into April. 2. The Top 15 Countries with the Highest Amount of Cases ###Code sum_cases_by_country = df.groupby('countriesAndTerritories').sum()['cases'] sum_cases_by_country.nlargest(15).plot.pie() ###Output _____no_output_____ ###Markdown This Pie Chart represents the Top 15 coutries with the most COVIS-19 cases. It is clear that the USA has the most cases, followed by Spain and Italy. 3. Looking at China's Data ###Code china_data = df.loc[df['countriesAndTerritories'] == 'China'] china_data[:10] #the top 10 rows ###Output _____no_output_____ ###Markdown 4. Number of cases in China related to their deaths ###Code china_data.plot.scatter(x='cases',y='deaths') ###Output _____no_output_____ ###Markdown This graph shows the relationship between the number of cases and deaths in China. However, I think it better highlights the confusion and possible deception surrounding the true number of cases and deaths in China 5. The Sum of Cases in China per Day ###Code china_data = df.loc[df['countriesAndTerritories'] == 'China'].groupby('dateRep').sum()['cases'] china_data.plot() ###Output _____no_output_____
tutorial/11-ensdf-processing.ipynb
###Markdown ENSDF Processing Tutorial ensdf_processing is a pyne module that contains ensdf (Evaluated Nuclear Structure Data File) evaluation tools. It includes ALPHAD, BRICC, DELTA, GABS, GTOL, BLDHST, HSICC, HSMRG, SEQHST, LOGFT, RADLIST, RADD and RULER. FUll documentation for the internal structure of each can be found at:http://www.nndc.bnl.gov/nndcscr/ensdf_pgm/analysis/Examples of the Python interface for these evaluation tools follows. ###Code import pyne from pyne import ensdf_processing ###Output _____no_output_____ ###Markdown All of the evaluation tools have a single dictionary parameter. ###Code input_dict = {} ###Output _____no_output_____ ###Markdown This input dictionary must have all of the key-pair values specified in the documentation for the specific evaluation tool being run. In this example, alphad will be run, which requires the following keys: * 'input_file'* 'report_file'* 'rewrite_input_with_hinderance_factor'* 'output_file'. ###Code input_dict['input_file'] = 'ensdf_processing/alphad/ref_a228.ens' input_dict['report_file'] = '/alphad.rpt' input_dict['rewrite_input_with_hinderance_factor'] = 1 input_dict['output_file'] = '/alphad.out' output_dict = ensdf_processing.alphad(input_dict) ###Output _____no_output_____ ###Markdown To run the evaluation tool, call ensdf_processing.(dictionary) ###Code output_dict = ensdf_processing.alphad(input_dict) ###Output _____no_output_____ ###Markdown A dictionary is returned, with all the input key-pairs, as well as any other information the evaluation tool returns. In this case, no additional key-pair values have been added to the returned dictionary. Alphad wrote the resulting output file to '/alphad.out', and has a report file at 'alphad.rpt', both specified by the input dicitonary. ###Code print(output_dict) ###Output {'report_file': '/alphad.rpt', 'rewrite_input_with_hinderance_factor': 1, 'output_file': '/alphad.out', 'input_file': 'ensdf_processing/alphad/ref_a228.ens'} ###Markdown Following are examples of each of the evaluation tools packaged with PyNE: ALPHAD (calculates alpha HF's and theoretical half-lives) ###Code input_dict = {} input_dict['input_file'] = 'ensdf_processing/alphad/ref_a228.ens' input_dict['report_file'] = '/tmp_alphad.rpt' input_dict['rewrite_input_with_hinderance_factor'] = 1 input_dict['output_file'] = '/tmp_alphad.out' output_dict = ensdf_processing.alphad(input_dict) ###Output _____no_output_____ ###Markdown BRICC (Calculates the conversion electron, electron-positron pair conversion coeffcients and the E0 electronic factors) ###Code input_dict = {} input_dict['input_type'] = 'evaluation' input_dict['input_file'] = 'ensdf_processing/bricc/ref_a228.ens' input_dict['BrIccNH'] = 0 output_dict = ensdf_processing.bricc(input_dict) ###Output _____no_output_____ ###Markdown BLDHST ###Code input_dict = {} input_dict['input_file'] = 'ensdf_processing/bldhst/ref_bldhst_iccseq.dat' input_dict['output_table_file'] = '/tmp_bldhst_icctbl.dat' input_dict['output_index_file'] = '/tmp_bldhst_iccndx.dat' output_dict = ensdf_processing.bldhst(input_dict) ###Output _____no_output_____ ###Markdown DELTA (analyzes gamma-gamma angular correlations from unaligned states) ###Code input_dict = {} input_dict['input_file'] = 'ensdf_processing/delta/ref_inp.dat' input_dict['output_file'] = '/tmp_delta.dat' output_dict = ensdf_processing.delta(input_dict) ###Output _____no_output_____ ###Markdown GABS (gamma-ray absolute intensity and normalization calculation) ###Code input_dict = {} input_dict['input_file'] = 'ensdf_processing/gabs/ref_gabs_80Br.in' input_dict['output_file'] = '/tmp_gabs_80Br.rpt' input_dict['dataset_file'] = '/tmp_gabs_80Br.new' output_dict = ensdf_processing.gabs(input_dict) ###Output _____no_output_____ ###Markdown GTOL (performs a least-squares fit to the gamma-energies to obtain level energies and calculates the net feedings to levels) ###Code input_dict = {} input_dict['input_file'] = 'ensdf_processing/gtol/ref_gtol.inp' input_dict['report_file'] = '/tmp_gtol.rpt' input_dict['new_ensdf_file_with_results'] = 0 input_dict['output_file'] = '/tmp_gtol.out' input_dict['supress_gamma_comparison'] = 1 input_dict['supress_intensity_comparison'] = 1 input_dict['dcc_theory_percent'] = 1.4 output_dict = ensdf_processing.gtol(input_dict) ###Output _____no_output_____ ###Markdown HSICC (interpolates Hager-Seltzer and Dragoun internal conversion coefficients) ###Code input_dict = {} input_dict['data_deck'] = 'ensdf_processing/hsicc/ref_hsicc_data.tst' input_dict['icc_index'] = 'ensdf_processing/hsicc/ref_hsicc_iccndx.dat' input_dict['icc_table'] = 'ensdf_processing/hsicc/ref_hsicc_icctbl.dat' input_dict['complete_report'] = '/tmp_out_hsicc_hscalc.lst' input_dict['new_card_deck'] = '/tmp_out_hsicc_cards.new' input_dict['comparison_report'] = '/tmp_out_hsicc_compar.lst' input_dict['is_multipol_known'] = 'Y' output_dict = ensdf_processing.hsicc(input_dict) ###Output _____no_output_____ ###Markdown HSMRG ###Code input_dict = {} input_dict['data_deck'] = 'ensdf_processing/hsmrg/ref_hsmrg_data.tst' input_dict['card_deck'] = 'ensdf_processing/hsmrg/ref_hsmrg_cards.new' input_dict['merged_data_deck'] = '/tmp_out_cards.mrg' output_dict = ensdf_processing.hsmrg(input_dict) ###Output _____no_output_____ ###Markdown SEQHST ###Code input_dict = {} input_dict['binary_table_input_file'] = 'ensdf_processing/seqhst/ref_seqhst_icctbl.dat' input_dict['sequential_output_file'] = '/tmp_out_iccseq.dat' output_dict = ensdf_processing.seqhst(input_dict) ###Output _____no_output_____ ###Markdown LOGFT (calculates log ft values for beta and electron-capture decay, average beta energies, and capture fractions) ###Code input_dict = {} input_dict['input_data_set'] = 'ensdf_processing/logft/ref_logft.inp' input_dict['output_report'] = '/tmp_logft.rpt' input_dict['data_table'] = 'ensdf_processing/logft/ref_logft.dat' input_dict['output_data_set'] = '/tmp_logft.new' output_dict = ensdf_processing.logft(input_dict) ###Output _____no_output_____ ###Markdown RADD ###Code input_dict = {} input_dict['atomic_number'] = '86' input_dict['neutron_number'] = '113' input_dict['output_file'] = 'tmp_output.out' radd_output = ensdf_processing.radd(input_dict) ###Output _____no_output_____ ###Markdown RADLIST (calculates atomic & nuclear radiations; checks energy balance) ###Code input_dict = {} input_dict['output_radiation_listing'] = 'Y' input_dict['output_ensdf_like_file'] = 'N' input_dict['output_file_for_nudat'] = 'N' input_dict['output_mird_listing'] = 'N' input_dict['calculate_continua'] = 'N' input_dict['input_file'] = 'ensdf_processing/radlst/ref_radlst.inp' input_dict['output_radlst_file'] = '/tmp_radlst.rpt' input_dict['input_radlst_data_table'] = 'ensdf_processing/radlst/ref_mednew.dat' input_dict['output_ensdf_file'] = '/tmp_ensdf.rpt' output_dict = ensdf_processing.radlist(input_dict) ###Output /home/josh/anaconda/lib/python2.7/site-packages/pyne-0.5.0_rc1-py2.7.egg/pyne/radlist ###Markdown RULER (calculates reduced transition probabilities) ###Code input_dict = {} input_dict['input_file'] = 'ensdf_processing/ruler/ref_ruler.inp' input_dict['output_report_file'] = '/tmp_ruler.rpt' input_dict['mode_of_operation'] = 'R' input_dict['assumed_dcc_theory'] = '1.4' output_dict = ensdf_processing.ruler(input_dict) ###Output _____no_output_____
Final Notebooks/WeatherPy.ipynb
###Markdown WeatherPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. ###Code # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time from scipy.stats import linregress import datetime import random from pprint import pprint import os import csv # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) # url + api key url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + weather_api_key ###Output _____no_output_____ ###Markdown Generate Cities List ###Code # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) ###Output _____no_output_____ ###Markdown Perform API Calls* Perform a weather check on each city using a series of successive API calls.* Include a print log of each city as it'sbeing processed (with the city number and city name). ###Code # create empty lists to store the data new_cities = [] cloudiness = [] country = [] date = [] humidity = [] temp = [] lat = [] lng = [] wind = [] record_counter = 0 set_counter = 0 print('------------------------') print('Beginning Data Retrieval') print('------------------------') # use for loop to add city to base url for city in cities: query_url = url + "&q=" + city # Get weather data response = requests.get(query_url).json() if record_counter < 50: record_counter += 1 else: set_counter += 1 record_counter = 0 print('Processing record {} of set {} | {}'.format(record_counter, set_counter, city)) print(query_url) try: cloudiness.append(response['clouds']['all']) country.append(response['sys']['country']) date.append(response['dt']) humidity.append(response['main']['humidity']) temp.append(response['main']['temp_max']) lat.append(response['coord']['lat']) lng.append(response['coord']['lon']) wind.append(response['wind']['speed']) new_cities.append(city) except: print("City not found!") pass print('-------------------------') print('Data Retrieval Complete') print('-------------------------') ###Output ------------------------ Beginning Data Retrieval ------------------------ Processing record 1 of set 0 | rikitea http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=rikitea Processing record 2 of set 0 | hithadhoo http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=hithadhoo Processing record 3 of set 0 | saleaula http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=saleaula City not found! Processing record 4 of set 0 | hobart http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=hobart Processing record 5 of set 0 | albany http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=albany Processing record 6 of set 0 | mujiayingzi http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=mujiayingzi Processing record 7 of set 0 | tiznit http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=tiznit Processing record 8 of set 0 | klaksvik http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=klaksvik Processing record 9 of set 0 | puerto ayora http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=puerto ayora Processing record 10 of set 0 | catamarca http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=catamarca Processing record 11 of set 0 | ushuaia http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=ushuaia Processing record 12 of set 0 | bonavista http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=bonavista Processing record 13 of set 0 | bargal http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=bargal City not found! Processing record 14 of set 0 | kamloops http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=kamloops Processing record 15 of set 0 | hermanus http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=hermanus Processing record 16 of set 0 | mataura http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=mataura Processing record 17 of set 0 | busselton http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=busselton Processing record 18 of set 0 | cape town http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=cape town Processing record 19 of set 0 | jamestown http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=jamestown Processing record 20 of set 0 | thompson http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=thompson Processing record 21 of set 0 | tommot http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=tommot Processing record 22 of set 0 | kruisfontein http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=kruisfontein Processing record 23 of set 0 | punta arenas http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=punta arenas Processing record 24 of set 0 | buala http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=buala Processing record 25 of set 0 | kodiak http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=kodiak Processing record 26 of set 0 | lebu http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=lebu Processing record 27 of set 0 | kapaa http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=kapaa Processing record 28 of set 0 | catuday http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=catuday Processing record 29 of set 0 | ponta do sol http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=ponta do sol Processing record 30 of set 0 | tarakan http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=tarakan Processing record 31 of set 0 | hunza http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=hunza City not found! Processing record 32 of set 0 | marzuq http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=marzuq Processing record 33 of set 0 | vagur http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=vagur Processing record 34 of set 0 | vaini http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=vaini Processing record 35 of set 0 | alberton http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=alberton Processing record 36 of set 0 | crab hill http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=crab hill City not found! Processing record 37 of set 0 | barrow http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=barrow Processing record 38 of set 0 | jacobabad http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=jacobabad Processing record 39 of set 0 | tynda http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=tynda Processing record 40 of set 0 | laurel http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=laurel Processing record 41 of set 0 | belushya guba http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=belushya guba City not found! Processing record 42 of set 0 | nador http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=nador Processing record 43 of set 0 | nikolskoye http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=nikolskoye Processing record 44 of set 0 | urucara http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=urucara Processing record 45 of set 0 | ugoofaaru http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=ugoofaaru Processing record 46 of set 0 | san patricio http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=san patricio Processing record 47 of set 0 | fortuna http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=fortuna Processing record 48 of set 0 | praia http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=praia Processing record 49 of set 0 | barentsburg http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=barentsburg City not found! Processing record 50 of set 0 | mount gambier http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=mount gambier Processing record 0 of set 1 | cherskiy http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=cherskiy Processing record 1 of set 1 | castro http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=castro Processing record 2 of set 1 | saint-philippe http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=saint-philippe Processing record 3 of set 1 | shenjiamen http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=2ae1fca35e0586e04572f2c0ff31f738&q=shenjiamen ###Markdown Convert Raw Data to DataFrame* Export the city data into a .csv.* Display the DataFrame ###Code weather_dict = { "City": new_cities, "Cloudiness" : cloudiness, "Country" : country, "Date" : date, "Humidity" : humidity, "Temp": temp, "Lat" : lat, "Lng" : lng, "Wind Speed" : wind } weather_data = pd.DataFrame(weather_dict) weather_data.count() weather_data.head() ###Output _____no_output_____ ###Markdown Plotting the Data* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.* Save the plotted figures as .pngs. Latitude vs. Temperature Plot ###Code # Output to a csv cities_list = zip(new_cities,cloudiness,country,date,humidity,temp,lat,lng) # Set variable for output file output_file = os.path.join('..','output_data', 'cities.csv') # Open the output file with open(output_file, 'w') as datafile: weather_data.to_csv(output_file) # use datetime date =datetime.datetime.fromtimestamp( int(weather_data['Date'][0]) ).strftime('%m/%d/%Y') weather_data.plot(kind='scatter', x='Lat', y='Temp', c='DarkBlue') plt.title('City Latitude Vs Max Temperature ({})'.format(date) ) plt.xlabel('Latitude') plt.ylabel('Max temperature (F)') plt.grid() plt.savefig("Fig1.png") ###Output _____no_output_____ ###Markdown Latitude vs. Humidity Plot ###Code weather_data.plot(kind='scatter',x='Lat',y='Humidity', c='DarkBlue') plt.title('City Latitude Vs Max Humidity ({})'.format(date) ) plt.xlabel('Latitude') plt.ylabel('Humidity (%)') plt.grid() plt.savefig("Fig2.png") ###Output _____no_output_____ ###Markdown Latitude vs. Cloudiness Plot ###Code weather_data.plot(kind='scatter',x='Lat',y='Cloudiness', c='DarkBlue') plt.title('City Latitude Vs Cloudiness ({})'.format(date) ) plt.xlabel('Latitude') plt.ylabel('Cloudiness (%)') plt.grid() plt.savefig("Fig3.png") ###Output _____no_output_____ ###Markdown Latitude vs. Wind Speed Plot ###Code weather_data.plot(kind='scatter',x='Lat',y='Wind Speed', c='DarkBlue') plt.title('City Latitude Vs Wind Speed ({})'.format(date) ) plt.xlabel('Latitude') plt.ylabel('Wind Speed (mph)') plt.grid() plt.savefig("Fig3.png") ###Output _____no_output_____ ###Markdown Linear Regression ###Code # OPTIONAL: Create a function to create Linear Regression plots def linear_reg_plot(df, title_name, y_column_name, y_label, file_name, xy): x_values = df['Lat'] y_values = df[y_column_name] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x +" + str(round(intercept,2)) plt.scatter(x_values,y_values) plt.plot(x_values,regress_values,"r-") plt.xlabel('Latitude') plt.ylabel(y_label) plt.annotate(line_eq,xy,fontsize=15,color="red") plt.title(title_name) print(f"The r-squared is: {rvalue}") plt.savefig(file_name) # Create Northern and Southern Hemisphere DataFrames northern_hemisphere_df = weather_data.loc[weather_data['Lat'] > 0, :] southern_hemisphere_df = weather_data.loc[weather_data['Lat'] < 0, :] ###Output _____no_output_____ ###Markdown Northern Hemisphere - Max Temp vs. Latitude Linear Regression ###Code linear_reg_plot(northern_hemisphere_df, 'Northern Max Temp Vs Latitude Regression', 'Temp', 'Temp', '../output_data/NorthernMaxTempVsLatitudeReg.png', (10,0)) ###Output The r-squared is: -0.8924563048884262 ###Markdown Southern Hemisphere - Max Temp vs. Latitude Linear Regression ###Code linear_reg_plot(southern_hemisphere_df, 'Southern Max Temp Vs Latitude Regression', 'Temp', 'Temp', '../output_data/SouthernMaxTempVsLatitudeReg.png', (-40,50)) ###Output The r-squared is: 0.43714542640412146 ###Markdown Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression ###Code linear_reg_plot(northern_hemisphere_df, 'Northern Humidity Vs. Latitude Regression', 'Humidity', 'Humidity', '../output_data/NorthernHumidityVsLatitudeReg.png', (40,20)) ###Output The r-squared is: 0.3454955107722853 ###Markdown Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression ###Code linear_reg_plot(southern_hemisphere_df, 'Southern Humidity Vs. Latitude Regression', 'Humidity', 'Humidity', '../output_data/SouthernHumidityVsLatitudeReg.png', (-50,20)) ###Output The r-squared is: 0.24687259214394075 ###Markdown Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression ###Code linear_reg_plot(northern_hemisphere_df, 'Northern Cloudiness Vs. Latitude Regression', 'Cloudiness', 'Cloudiness', '../output_data/NorthernCloudinessVsLatitudeReg.png', (40,20)) ###Output The r-squared is: 0.31497756482516004 ###Markdown Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression ###Code linear_reg_plot(southern_hemisphere_df, 'Southern Cloudiness Vs. Latitude Regression', 'Cloudiness', 'Cloudiness', '../output_data/SouthernCloudinessVsLatitudeReg.png', (-50,20)) ###Output The r-squared is: 0.29461559053945563 ###Markdown Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression ###Code linear_reg_plot(northern_hemisphere_df, 'Northern Wind Speed Vs. Latitude Regression', 'Wind Speed', 'Wind Speed', '../output_data/NorthernWindSpeedVsLatitudeReg.png', (40,20)) ###Output The r-squared is: 0.021595960038188117 ###Markdown Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression ###Code linear_reg_plot(southern_hemisphere_df, 'Southern Wind Speed Vs. Southern Hemisphere', 'Wind Speed', 'Wind Speed', '../output_data/SouthernWindSpeedVsLatitudeReg.png', (-30,20)) ###Output The r-squared is: -0.2550635511455826
099_01_debug_stochastic_mcmc.ipynb
###Markdown Debugging Stochastic Models ###Code #hide #skip ! [ -e /content ] && pip install -Uqq pyndamics3 emcee # upgrade pyndamics3 on colab %pylab inline from pyndamics3 import Simulation,Stochastic_Simulation from pyndamics3.mcmc import * flut = array([0,1,2,3,4,5,6,7,8,9,10,11,12,13]) flui = array([3,8,26,76,225,298,258,233,189,128,68,29,14,4]) β=1.9732213241997467 γ=0.47521873806558335 β=.5 γ=1 So=763 Io=1 stoch_sim=Stochastic_Simulation() stoch_sim.add("-S+I",'β*S*I/N',S=So,I=Io) stoch_sim.add("-I +R",'γ*I',R=0) stoch_sim.add("N=S+I+R") stoch_sim.params(β=β,γ=γ) stoch_sim.add_data(t=flut,I=flui) stoch_sim.run(20,Nsims=100) stoch_sim['I'] I=stoch_sim.components[1] for i in range(100): plot(stoch_sim.t,stoch_sim.I[i],'ro',alpha=0.05) plot(flut,flui,'ko',ms=10,lw=3,) print(stoch_sim.func_str) dynamic_sim=sim=Simulation() sim.add("N=S+I+R") sim.add("S'=-β*S*I/N",So) sim.add("I'=+β*S*I/N-γ*I",Io) sim.add("R'=+γ*I",0) sim.params(β=β,γ=γ) sim.add_data(t=flut,I=flui) sim.run(20) plot(sim.t,sim.I) plot(flut,flui,'ko',ms=10,lw=3,) stoch_sim.I[i] stoch_model=MCMCModel(stoch_sim,β=Uniform(0,5), γ=Uniform(0,5)) number_of_iterations=500 stoch_model.run_mcmc(number_of_iterations,repeat=3) stoch_model.plot_chains() stoch_model.plot_distributions() stoch_sim._params['β'] stoch_sim.run(20,Nsims=100) for i in range(100): plot(stoch_sim.t,stoch_sim.I[i],'ro',alpha=0.05) plot(flut,flui,'ko',ms=10,lw=3,) ###Output _____no_output_____
Software Engineering/Introduction to Software Engineering/Web Development/world_bank_api.ipynb
###Markdown Using APIs with PythonThis Ipython notebook shows you how to use the World Bank API with the Python requests library. At the end there is a short exercise for you.To access APIs in Python, you can use the [requests library](http://docs.python-requests.org/en/master/user/install/). To install, you can go in a terminal and type:```pip install pipenvpipenv install requests```When you use the requests library, it's like putting an API url in a browser except now Python does this behind the scenes programatically and then stores the results in a variable. The classroom workspaces already have the requests library installed. Here is a demonstration of how the request library works. ###Code import requests r = requests.get('http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD') r.text ###Output _____no_output_____ ###Markdown Explanation of Results ```import requests```This line imports the requests library.```r = requests.get('http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD')```The line `requests.get` sends out a "get" request to the url specified in parenthesis. The html standard specifies different types of [request methods](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) with the most common one being "get". A "get" request is generally reserved for obtaining data from a server. For the World Bank API, you'll only need to make "get" requests. An API's documentation will generally tell you what type of requests to make.The last line of code outputs the results.```r.text``` In this case, the output is xml data. [XML](https://www.w3schools.com/xml/xml_whatis.asp) is a language for storing and transmitting data. XML was standardized in the late 1990s. A few years later, the JSON format came along and serves a similar purpose. JSON tends to be much easier to work with and luckily, the World Bank API can return data in json format by specifying the format parameters. Next, you'll see how to send parameters with your get request. Sending Parameters with the Get RequestThis url, `http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD`, obtains GDP data for Brazil. You specify the country name with the [2-character ISO code](http://www.nationsonline.org/oneworld/country_code_list.htm) and then specify the economic indicator that you want, which in this case is `NY.GDP.MKTP.CD`. To see all of the possible indicators, visit the [world bank indicator dashboard](https://data.worldbank.org/indicator?tab=all) or use the API address `http://api.worldbank.org/v2/indicators`.The url syntax is specific to the World Bank API. Other APIs might be different.When making a get request, you can add parameters to the end of the url with the following syntax:`http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD?format=json&per_page=500&date=1990:2015`where a question mark was added at the end of the url following by each parameter name and parameter values. The parameters are separated by the ampersand & sign. Adding parameters to a get request with the question mark ? and ampersand is the standard way for adding parameters to a get request.With the requests library, you can send this get request in one of two ways. The first way specifies the entire url in a single string.```r = requests.get('http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD?format=json&per_page=500&date=1990:2015')```The second way separate the base url and the parameters.```payload = {'format': 'json', 'per_page': '500', 'date':'1990:2015'}r = requests.get('http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD', params=payload)```Why use the second version? It's somewhat easier to write programs with the second version because you can easily change the values in the payload dictionary whereas the first version involves manipulating the url string.Run the code cell below to show that both versions give the same results. ###Code r_url = requests.get('http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD?format=json&per_page=500&date=1990:2015') payload = {'format': 'json', 'per_page': '500', 'date':'1990:2015'} r_payload = requests.get('http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD', params=payload) # Test if both results are the same string r_url.text == r_payload.text ###Output _____no_output_____ ###Markdown Navigating the JSON responseOnce you have the data, what can you do with it? The requests library comes with a json decoder. In order to obtain the json response, all you have to do is use the .json() method like so:```payload = {'format': 'json', 'per_page': '500', 'date':'1990:2015'}r = requests.get('http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD', params=payload)r.json()```Run the cell below to see the results: ###Code payload = {'format': 'json', 'per_page': '500', 'date':'1990:2015'} r = requests.get('http://api.worldbank.org/v2/countries/br/indicators/NY.GDP.MKTP.CD', params=payload) r.json() ###Output _____no_output_____ ###Markdown The result is a list of json values, which you can iterate through to access all of the data. Every API will return data in a slightly different way, so you need to look at the data and figure out how to clean the data for your purposes. For example, in the json response, the first entry contains meta data about the results. The second entry is a list containing all of the data points. For the purposes of analyzing the data, you only need the second entry, which you can access with `r.json()[1]`. Run the code cells below to see an example of how to iterate through the data, clean the data, and then plot the results. ###Code # get the World Bank GDP data for Brazil, China and the United States payload = {'format': 'json', 'per_page': '500', 'date':'1990:2016'} r = requests.get('http://api.worldbank.org/v2/countries/br;cn;us/indicators/NY.GDP.MKTP.CD', params=payload) # put the results in a dictionary where each country contains a list of all the x values and all the y values # this will make it easier to plot the results from collections import defaultdict data = defaultdict(list) for entry in r.json()[1]: # check if country is already in dictionary. If so, append the new x and y values to the lists if data[entry['country']['value']]: data[entry['country']['value']][0].append(int(entry['date'])) data[entry['country']['value']][1].append(float(entry['value'])) else: # if country not in dictionary, then initialize the lists that will hold the x and y values data[entry['country']['value']] = [[],[]] # show the results contained in the data dictionary for country in data: print(country) print(data[country][0]) print(data[country][1]) print('\n') # visualize the results with matplotlib import matplotlib.pyplot as plt %matplotlib inline # create a plot for each country for country in data: plt.plot(data[country][0], data[country][1], label=country) # label the plot plt.title('GDP for Brazil, China, and USA 1990 to 2015') plt.legend() plt.xlabel('year') plt.ylabel('GDP') plt.show() ###Output _____no_output_____ ###Markdown PracticeHere is a practice exercise for you to try out. Go to the World Bank data website found [here](https://data.worldbank.org/indicator) and find the `Population growth (annual %)` indicator. If you click on the indicator link, you'll see the indicator symbol in the web URL (hint it starts with "SP").Use the requests library to find the indicator values for China and India in the years 2013, 2014 and 2015. Put the results in a dictionary in this format:{country_name: [[2013, 2014, 2015], [value_2013, value_2014, value_2015]]} ###Code # TODO: assign parameters to a dictionary called payload. # Use the format parameter so that the request # returns data in the json format. # Use the date parameter to filter for the necessary years payload = {} # TODO: write the request and put the results in the r variable r = '' # TODO: store the results in a dictionary like in the previous example. # where each the dictionary keys are a country and the dictionary # values are lists of lists with the dates in one list and the values # in another list. # possible solution for the API exercise payload = {'format': 'json', 'per_page': '500', 'date':'2013:2016'} r = requests.get('http://api.worldbank.org/v2/countries/in;cn/indicators/SP.POP.GROW', params=payload) # clean the data and put it in a dictionary data = defaultdict(list) for entry in r.json()[1]: # check if country is already in dictionary. If so, append the new x and y values to the lists if data[entry['country']['value']]: data[entry['country']['value']][0].append(int(entry['date'])) data[entry['country']['value']][1].append(float(entry['value'])) else: # if country not in dictionary, then initialize the lists that will hold the x and y values data[entry['country']['value']] = [[],[]] ###Output _____no_output_____
Project/Data 245 Project w Rest Days.ipynb
###Markdown Data 245: Project NBA Prediction Imports ###Code import numpy as np, scipy as sc, pandas as pd, requests import xml.etree.ElementTree as ET from nba_api.stats.static import players from nba_api.stats.endpoints import playercareerstats, playergamelog, playergamelogs, fantasywidget from nba_api.stats.endpoints import infographicfanduelplayer, playerdashboardbylastngames, playernextngames from nba_api.stats.endpoints import commonplayerinfo, commonallplayers import json import time from IPython.display import Audio ###Output _____no_output_____ ###Markdown Notes Scoring Rules:- point +1- 3pt made +0.5- rebound +1.25- assist +1.5- steal +2- turnover -0.5- 10x2 +1.5- 10x3 +3Gen restrictions:- 8 players- min 2 teams- 1x5 basic positions, 1G, 1F, 1 Util Sound (not neccessary for function) ###Code ''' Audio Source: https://mixkit.co/free-sound-effects/beep/ Positive Interface Beep ''' sound_file = './sound/beep.wav' ###Output _____no_output_____ ###Markdown Injuries Dataframe ###Code ''' url = 'https://www.fantasybasketballnerd.com/service/injuries' res = requests.get(url) content = res.text tree = ET.ElementTree(ET.fromstring(content)) team = "" player = [] injuries = [] cols = ["name","injury","notes","updated"] for elem in tree.iter(): if elem.tag == "Team": team = elem.attrib['code'] elif elem.tag == "Player": player.append(team) elif elem.tag in cols: player.append(elem.text) if elem.tag == "updated": injuries.append(player) player = [] injuries cols.insert(0,"team") injuries_df = pd.DataFrame.from_records(injuries,columns=cols)''' ###Output _____no_output_____ ###Markdown Get Active Players ###Code x = commonallplayers.CommonAllPlayers().get_data_frames()[0] active_players = x[x['TO_YEAR'] == '2020'] ap = list(active_players['PERSON_ID']) len(ap) ###Output _____no_output_____ ###Markdown Season Stats Data Frame Building ###Code t_df = commonplayerinfo.CommonPlayerInfo(player_id=2544).get_data_frames()[0] headers = list(t_df.columns.values) df = pd.DataFrame(columns = headers) e = 0 for player in ap: try: base = commonplayerinfo.CommonPlayerInfo(player_id=player, timeout=1000).get_data_frames() df = df.append(base[0]) time.sleep(0.600) except: if e < 7: e+=1 time.sleep(45) #print(e) base = commonplayerinfo.CommonPlayerInfo(player_id=player, timeout=1000).get_data_frames() df = df.append(base[0]) continue else: print('break') break Audio(sound_file, autoplay=True) f_df = fantasywidget.FantasyWidget().get_data_frames()[0] df.columns f_df.columns df = df.rename(columns={"PERSON_ID": "PLAYER_ID"}) df = df.drop(['TEAM_ID', 'TEAM_ABBREVIATION'], axis = 1) df = df.join(f_df.set_index('PLAYER_ID'), on='PLAYER_ID') keep = ['PLAYER_ID', 'FIRST_NAME', 'LAST_NAME', 'BIRTHDATE', 'HEIGHT', 'WEIGHT', 'SEASON_EXP', 'ROSTERSTATUS', 'TEAM_ID', 'TEAM_NAME', 'TEAM_ABBREVIATION', 'TEAM_CODE', 'TEAM_CITY', 'DRAFT_YEAR', 'DRAFT_NUMBER', 'PLAYER_POSITION', 'GP', 'MIN', 'PTS', 'REB', 'AST', 'BLK', 'STL', 'TOV', 'FG3M', 'FGA', 'FG_PCT', 'FTA', 'FT_PCT'] df = df.drop([i for i in df.columns if i not in keep], axis = 1) df['DRAFT_NUMBER'].unique() dic = {} for i in ['GP', 'MIN', 'PTS', 'REB', 'AST', 'BLK', 'STL', 'TOV', 'FG3M', 'FGA', 'FG_PCT','FTA', 'FT_PCT']: dic[i] = 'CURRENT_SEASON_'+i df = df.rename(columns=dic) df['DRAFT_NUMBER'] = df['DRAFT_NUMBER'].replace({'Undrafted':61, None:61}) df.columns ###Output _____no_output_____ ###Markdown Gamelog Dictionary ###Code Game_Logs = {} e = 0 for player in ap: try: base = playergamelog.PlayerGameLog(player_id=player, timeout=1000).get_data_frames() Game_Logs[player] = base[0] time.sleep(0.600) except: if e < 7: e+=1 time.sleep(45) #print(e) base = playergamelog.PlayerGameLog(player_id=player, timeout=1000).get_data_frames() Game_Logs[player] = base[0] continue else: print('break') break Audio(sound_file, autoplay=True) for player in ap: if len(Game_Logs[player]) == 0: Game_Logs.pop(player) def dkings_score(row): try: threes = row['FG3M'] r = row['REB'] a = row['AST'] s = row['STL'] b = row['BLK'] t = row['TOV'] p = row['PTS'] score = 0.5*threes + 1.25*r + 1.5*a + 2*s +2*b + -0.5*t + p dd = sum(1 for i in [p, r, a, s, b] if i>=10) if dd == 2: score += 1.5 if dd > 2: score += 4.5 return(score) except: print(row) pass to_be_averaged= ['MIN', 'FGM', 'FGA', 'FG3M', 'FG3A', 'FTM', 'FTA', 'OREB', 'DREB', 'AST', 'STL', 'BLK', 'TOV', 'PTS'] # Add rest days into dataframe def resting(select_df): # Transform string of game_date to datetime format select_df['GAME_DATETIME'] = pd.to_datetime(select_df['GAME_DATE'], format='%b %d, %Y') select_df['GAME_DATENEW'] = select_df['GAME_DATETIME'].dt.date # Take diff between each date select_df['GAME_DATENEW']=pd.to_datetime(select_df['GAME_DATENEW']) select_df['rest_days'] = select_df['GAME_DATENEW'].diff() # Rest days column is added select_df['rest_days'] = select_df['rest_days'].abs() select_df.drop(columns=['GAME_DATETIME', 'GAME_DATENEW']) for frame in Game_Logs: x = Game_Logs[frame] x['DKING_SCORE'] = x.apply(lambda row: dkings_score(row), axis=1) x['TEAM'] = x.apply(lambda row: row['MATCHUP'][:3], axis=1) x['OPPOSING_TEAM'] = x.apply(lambda row: row['MATCHUP'][-3:], axis=1) x['HOME_AWAY'] = x.apply(lambda row: 1 if '@' in row['MATCHUP'] else 0, axis = 1) x.rename({'WL': 'PREVIOUS_WL'}) for column in to_be_averaged: x['SEASON_'+ column] = x[column].rolling(window = len(x), min_periods = 1).mean() x['5_GAME_AVG_'+ column] = x[column].rolling(window = 5, min_periods = 1).mean() Game_Logs[frame] = x.drop(['MIN', 'MATCHUP', 'FGM', 'FGA', 'FG_PCT', 'FG3M', 'FG3A', 'FG3_PCT', 'FTM', 'FTA', 'FT_PCT', 'OREB', 'DREB', 'REB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS', 'PLUS_MINUS', 'VIDEO_AVAILABLE'], axis = 1) not_shift = ['SEASON_ID', 'Player_ID', 'Game_ID', 'GAME_DATE','DKING_SCORE', 'TEAM', 'OPPOSING_TEAM', 'HOME_AWAY'] testing = Game_Logs[ap[0]] to_shift = [i for i in testing.columns if i not in not_shift] for frame in Game_Logs: for column in to_shift: Game_Logs[frame][column]= Game_Logs[frame][column].shift(1) Game_Logs[frame] = Game_Logs[frame][[c for c in Game_Logs[frame] if c != 'DKING_SCORE'] + ['DKING_SCORE']] resting(Game_Logs[frame]) testing.head() ###Output _____no_output_____
Datascience_With_Python/Machine Learning/Videos/House Prices Prediction/house_prices_prediction.ipynb
###Markdown Introduction:- Ask a home buyer to describe their dream house, and they probably won't begin with the height of the basement ceiling or the proximity to an east-west railroad. But this playground competition's dataset proves that much more influences price negotiations than the number of bedrooms or a white-picket fence.- With 79 explanatory variables describing (almost) every aspect of residential homes in Ames, Iowa, this competition challenges you to predict the final price of each home.- Link of dataset: https://www.kaggle.com/c/neolen-house-price-prediction/data Importing the libraries ###Code import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn import metrics from sklearn.metrics import accuracy_score from sklearn.linear_model import Lasso df=pd.read_csv("train.csv") df_test=pd.read_csv("test.csv") df.head(10) df.tail() df.shape df.columns sns.countplot(x='MSZoning',data=df) sns.countplot(x="Alley",data=df) sns.countplot(x='LotShape',data=df) sns.countplot(x='LandContour',data=df) sns.countplot(x='Utilities',data=df) df=pd.get_dummies(df) df.head() Y=(df.SalePrice) X=df.copy() X.drop(['SalePrice'], axis=1,inplace=True) X_train, X_test, y_train, y_test = train_test_split(X,Y, train_size=0.8, test_size=0.2,random_state=0) regressor = RandomForestRegressor(n_estimators=100) regressor.fit(X_train,y_train) regressor.fit(X_train,y_train) test_data_prediction = regressor.predict(X_test) error_score = metrics.r2_score(y_test,test_data_prediction) print("R squared error : ", error_score) regressor= LinearRegression() regressor.fit(X_train, y_train) y_pred= regressor.predict(X_test) print('Train Score: ', regressor.score(X_train, y_train)) model = Lasso(alpha=1.0) model.fit(X_train,y_train) y_pred = model.predict(X_test) print('Train Score: ', model.score(X_train, y_train)) ###Output Train Score: 0.9019365719745986
Activities Week 10 (SQLAlchemy)/SQLAlchemy_Day2/Day2/Day2.ipynb
###Markdown Activity 1 Instructor Turn ###Code from sqlalchemy import create_engine from sqlalchemy import Column, Integer, String, Float from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class BaseballPlayer(Base): __tablename__ = "player" player_id = Column(String, primary_key=True) birth_year = Column(Integer) birth_month = Column(Integer) birth_day = Column(Integer) birth_country = Column(String) birth_state = Column(String) birth_city = Column(String) name_first = Column(String) name_last = Column(String) name_given = Column(String) weight = Column(Integer) height = Column(Integer) bats = Column(String) throws = Column(String) debut = Column(String) final_game = Column(String) # Create Database Connection engine = create_engine('sqlite:///./Resources/database.sqlite') Base.metadata.create_all(engine) from sqlalchemy.orm import Session session = Session(bind=engine) # Print all of the player names in the database players = session.query(BaseballPlayer) for player in players: print(player.name_given) # Find the number of players from the USA usa = session.query(BaseballPlayer).\ filter(BaseballPlayer.birth_country == 'USA').count() print("There are {} players from the USA".format(usa)) # Find those players who were born before 1990 born_in_eighties = session.query(BaseballPlayer).\ filter(BaseballPlayer.birth_year < 1990).count() print("{} players were born before 1990".format(born_in_eighties)) # Find thos players from the USA who were born after 1989 born_after_ninties = session.query(BaseballPlayer).\ filter(BaseballPlayer.birth_year > 1989).filter(BaseballPlayer.birth_country == "USA").count() print("{} USA players were born after 1989".format(born_after_ninties)) ###Output 300 USA players were born after 1989 ###Markdown Students Turn Activity 2 Instructions* Use the starter file [sharks.sql](Resources/sharks.sql) and MySQL Workbench to create and populate a database called `SharkSearch`.* Within a Python script, create a "Sharks" class that will be able to read all of the columns in from the table you created* Using SQLAlchemy, perform the following queries...* Print all locations of shark attacks* Find the number of provoked attacks* Find the number of attacks in the USA* Find the number of attacks in 2017* Find the number of attacks while surfing* Find the number of fatal shark attacks in 2017 in Australia ###Code from sqlalchemy import create_engine import pymysql pymysql.install_as_MySQLdb() from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() from sqlalchemy import Column, Integer, String, Float from config import dbuser, dbpasswd, dburi, dbport, dbname from sqlalchemy import create_engine engine = create_engine(f"mysql://{dbuser}:{dbpasswd}@{dburi}:{dbport}/{dbname}") Base.metadata.create_all(engine) from sqlalchemy.orm import Session session = Session(bind=engine) # create your shark class # YOUR CODE HERE class shark(Base): #can name this whatever you want __tablename__ = "sharks" #must match table name in database id = Column(Integer, primary_key=True) case_number = Column(String) date = Column(String) year = Column(Integer) type = Column(String) country = Column(String) area = Column(String) location = Column(String) activity = Column(String) name = Column(String) sex = Column(String) age = Column(Integer) injury = Column(String) fatal_y_n = Column(String) time = Column(String) species = Column(String) investigator_or_source = Column(String) pdf = Column(String) original_order = Column(Integer) # print all locations of shark attacks # YOUR CODE HERE locations = session.query(shark) for place in locations: print(place.location) # find the number of provoked attacks provoked = session.query(shark).filter(shark.type == "provoked").count() print(f"Number of provoked attacks: {provoked}") # find the number of attacks in USA usa = session.query(shark).filter(shark.country == "usa").count() print(f"Number of attacks in USA: {usa}") # find the number of attacks in 2017 last_year = session.query(shark).filter(shark.year == "2017").count() print(f"Number of attacks in 2017: {last_year}") # find the number of attacks while surfing surf = session.query(shark).filter(shark.activity == "surfing").count() print(f"Number of attacks while surfing: {surf}") # find the number of fatal attacks fatal = session.query(shark).filter(shark.fatal_y_n == "y").count() print(f"Number of fatal attacks: {fatal}") # find the number of fatal attacks while surfing fatal_surf = session.query(shark).filter(shark.fatal_y_n == "y")\ .filter(shark.activity == "surfing").count() print(f"Number of fatal attacks while surfing: {fatal_surf}") # find the number of fatal attacks in 2017 in Australia fatal_aussie = session.query(shark).filter(shark.fatal_y_n == "y")\ .filter(shark.country == "Australia")\ .filter(shark.year == "2017").count() print(f"Number of fatal attacks in 2017 in Australia: {fatal_aussie}") #instead of .filter(class.column) for 2nd and 3rd filter, use filter_by(column) ###Output Number of fatal attacks in 2017 in Australia: 0 ###Markdown Instructor Turn Activity 3 ###Code from sqlalchemy import create_engine, Column, Integer, String from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() # Define our pet table class Pet(Base): __tablename__ = 'pet' id = Column(Integer, primary_key=True) name = Column(String) type = Column(String) age = Column(Integer) # Right now, this table only exists in python and not in the actual database Base.metadata.tables # Create our database engine & file engine = create_engine('sqlite:///pets.sqlite') # This is where we create our tables in the database Base.metadata.create_all(engine) # The ORM’s “handle” to the database is the Session. from sqlalchemy.orm import Session session = Session(engine) ###Output _____no_output_____ ###Markdown Create Data ###Code # Note that adding to the session does not update the table. It queues up those queries. session.add(Pet(name='Justin Timbersnake', type='snek', age=2)) session.add(Pet(name='Pawtrick Stewart', type='good boy', age=10)) session.add(Pet(name='Godzilla', type='iguana', age=1)) session.add(Pet(name='Marshmallow', type='polar bear', age=4)) ###Output _____no_output_____ ###Markdown The data hasn't been added yetengine.execute('select * from pet').fetchall() ###Code # We can use the new attribute to see the queue of data ready to go into the database session.new # commit() flushes whatever remaining changes remain to the database, and commits the transaction. session.commit() # Nothing new to add (checks the queue) session.new # query the database session.query(Pet.name, Pet.type, Pet.age).all() ###Output _____no_output_____ ###Markdown Update Data ###Code # Create a query and then run update on it pet = session.query(Pet).filter_by(name="Marshmallow").first() pet.age += 1 # For modifications, we can use the dirty attribute session.dirty # persistent objects which currently have changes detected # (this collection is now created on the fly each time the property is called) # Commit Transaction session.commit() # Session is up-to-date because session.dirty is empty now session.dirty session.query(Pet.id, Pet.name, Pet.type, Pet.age).all() ###Output _____no_output_____ ###Markdown Delete Data ###Code # Create a query and then delete the row collected pet = session.query(Pet).filter_by(id=4).delete() session.commit() session.query(Pet.id, Pet.name, Pet.type, Pet.age).all() ###Output _____no_output_____ ###Markdown Students Turn Activity 4 Instructions * Within a Python file, create new SQLAlchemy class called `Garbage` that holds the following values... * `__tablename__`: Should be "garbage_collection" * `id`: The primary key for the table that is an integer and automatically increments * `item`: A string that describes what kind of item was collected * `weight`: A double that explains how heavy the item is * `collector`: A string that lets users know which garbage man collected the item * Create a connection and a session before adding a few items into the SQLite database crafted. * Update the values within at least two of the rows added to the table. * Delete the row with the lowest weight from the table. * Print out all of the data within the database. ###Code # Import SQL Alchemy from sqlalchemy import create_engine # Import and establish Base for which classes will be constructed from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() # Import modules to declare columns and column data types from sqlalchemy import Column, Integer, String, Float # Create the Garbage class class garbage(Base): __tablename__ = "garbage_collection" id = Column(Integer, primary_key=True) item = Column(String) weight = Column(Integer) collector = Column(String) # Create a connection to a SQLite database engine = create_engine('sqlite:///garbage.sqlite') #Create the garbage_collection table within the database Base.metadata.create_all(engine) # To push the objects made and query the server we use a Session object from sqlalchemy.orm import Session session = Session(bind=engine) # Create some instances of the Garbage class session.add(garbage(item="couch", weight=100, collector="James")) session.add(garbage(item="tv", weight=65, collector="Tony")) session.add(garbage(item="lamp", weight=10, collector="James")) session.add(garbage(item="poster", weight=1, collector="Paul")) # Add these objects to the session engine.execute('select * from garbage_collection').fetchall() session.new session.commit() session.query(garbage.item, garbage.weight, garbage.collector).all() # Update two rows of data update1 = session.query(garbage).filter(garbage.item == "couch").first() #.first retrieves object update1.collector = "Paul" update2 = session.query(garbage).filter(garbage.item == "lamp").first() update2.weight = 5 session.commit() session.query(garbage.item, garbage.weight, garbage.collector).all() # Delete the row with the lowest weight session.query(garbage).filter(garbage.weight == 1).delete() session.commit() session.query(garbage.item, garbage.weight, garbage.collector).all() # Collect all of the items and print their information # YOUR CODE HERE ###Output _____no_output_____ ###Markdown Instuctor Turn Activity 5 ###Code # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine # Create engine using the `demographics.sqlite` database file engine = create_engine("sqlite:///./Resources/dow.sqlite") # Declare a Base using `automap_base()` Base = automap_base() # Use the Base class to reflect the database tables Base.prepare(engine, reflect=True) # Print all of the classes mapped to the Base Base.classes.keys() # Assign the dow class to a variable called `Dow` Dow = Base.classes.dow # Create a session session = Session(engine) # Display the row's columns and data in dictionary format first_row = session.query(Dow).first() first_row.__dict__ #use .keys() here to show just the keys, not key-value pairs # Use the session to query Dow table and display the first 5 trade volumes for row in session.query(Dow.stock, Dow.volume).limit(15).all(): print(row) ###Output ('AA', 239655616) ('AA', 242963398) ('AA', 138428495) ('AA', 151379173) ('AA', 154387761) ('AA', 114691279) ('AA', 80023895) ('AA', 132981863) ('AA', 109493077) ('AA', 114332562) ('AA', 130374108) ('AA', 95550392) ('AXP', 45102042) ('AXP', 25913713) ('AXP', 38824728) ###Markdown Students Activity 6 Reflecting SQL: * Create engine using the `demographics.sqlite` database file * Declare a Base using `automap_base()` and use this new Base class to reflect the database's tables * Assign the demographics table/class to a variable called `Demographics` * Create a session and use this session to query the `Demographics` table and display the first five locations* **Bonus**: * Query and print the number of unique locations in the table.* **Hint**: * For the bonus, look into counting and grouping operations in SQLAlchemy ###Code # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine # Create engine using the `demographics.sqlite` database file engine = create_engine("sqlite:///./Resources/demographics.sqlite") # Declare a Base using `automap_base()` Base = automap_base() # Use the Base class to reflect the database tables Base.prepare(engine, reflect=True) # Print all of the classes mapped to the Base Base.classes.keys() #name of the table in the class # Assign the demographics class to a variable called `Demographics` Demographics = Base.classes.demographics #make table into your own variable #Column rows first_row = session.query(Demographics).first() first_row.__dict__.keys() # Create a session session = Session(engine) # Use the session to query Demographics table and display the first 5 locations for row in session.query(Demographics.location).limit(5).all(): print(row) # BONUS: Query and print the number of unique Locations # Hints: Look into counting and grouping operations in SQLAlchemy unique = session.query(Demographics).group_by(Demographics.location).count() unique ###Output _____no_output_____ ###Markdown Instructor Turn Activity 7 ###Code # Import SQLAlchemy `automap` and other dependencies import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, inspect # Create the connection engine engine = create_engine("sqlite:///./Resources/dow.sqlite") # Create the inspector and connect it to the engine inspector = inspect(engine) # Collect the names of tables within the database inspector.get_table_names() # Using the inspector to print the column names within the 'dow' table and its types columns = inspector.get_columns('dow') for column in columns: print(column["name"], column["type"]) ###Output id INTEGER quarter INTEGER stock TEXT date TEXT open_price FLOAT high_price FLOAT low_price FLOAT close_price FLOAT volume INTEGER percent_change FLOAT ###Markdown Students Turn Activity 8 * Using the attached SQLite file, use an inspector to collect the following information... * The names of all of the tables within the database. * The column names and data types for the `Salaries` table. ###Code import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, inspect # Create the connection engine engine = create_engine("sqlite:///./Resources/salary.sqlite") # Create the inspector and connect it to the engine inspector = inspect(engine) # Collect the names of tables within the database inspector.get_table_names() # Using the inspector to print the column names within the 'Salaries' table and its types columns = inspector.get_columns('Salaries') for column in columns: print(column["name"], column["type"]) ###Output Id INTEGER EmployeeName TEXT JobTitle TEXT BasePay NUMERIC OvertimePay NUMERIC OtherPay NUMERIC Benefits NUMERIC TotalPay NUMERIC TotalPayBenefits NUMERIC Year INTEGER Notes TEXT Agency TEXT Status TEXT ###Markdown Students Turn Activity 9 Plotting Query Results Setup ###Code # Import Matplot lib import matplotlib from matplotlib import style style.use('seaborn') import matplotlib.pyplot as plt import pandas as pd # Import SQLAlchemy `automap` and other dependencies here import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, inspect # Create an engine for the `emoji.sqlite` database engine = create_engine("sqlite:///./Resources/emoji.sqlite", echo=False) ###Output _____no_output_____ ###Markdown Explore Database ###Code # Use the Inspector to explore the database and print the table names inspector = inspect(engine) inspector.get_table_names() # Use Inspector to print the column names and types columns = inspector.get_columns('emoji') for column in columns: print(column["name"], column["type"]) # Use `engine.execute` to select and display the first 10 rows from the emoji table engine.execute('select * from emoji where id < 11').fetchall() ###Output _____no_output_____ ###Markdown Reflect database and Query ###Code # Reflect Database into ORM class Base = automap_base() Base.prepare(engine, reflect=True) Emoji = Base.classes.emoji # Start a session to query the database session = Session(engine) ###Output _____no_output_____ ###Markdown Use Matplotlib to create a horizontal bar chart and plot the emoji score in descending order. Use emoji_char as the y-axis labels. Plot only the top 10 emojis ranked by score ###Code # Query Emojis for `emoji_char`, `emoji_id`, and `score` and save the query into results results = session.query(Emoji.emoji_char, Emoji.emoji_id, Emoji.score).order_by(Emoji.score.desc()).all() results ###Output _____no_output_____ ###Markdown Unpack tuples using list comprehensions ###Code # Unpack the `emoji_id` and `scores` from results and save into separate lists emoji_id = [result[1] for result in results[:10]] scores = [int(result[2]) for result in results[:10]] emoji_id ###Output _____no_output_____ ###Markdown Plot using Matplotlib ###Code # Create a horizontal bar chart and plot the `emoji_id` on the y-axis and the `score` on the x-axis # Challenge: Try to plot the scores in descending order on the graph (The largest score is at the top) plt.barh(emoji_id, scores) plt.show() ###Output _____no_output_____ ###Markdown Plot using Pandas Plotting ###Code # Load the results into a Pandas DataFrame results_df = pd.DataFrame(results) results_df # Load the results into a pandas dataframe. Set the index to the `emoji_id` df = results_df.set_index("emoji_id") df.head(10) ###Output _____no_output_____ ###Markdown Plot using Pandas ###Code # Plot the dataframe as a horizontal bar chart using pandas plotting # YOUR CODE HERE # BONUS: Use Pandas `read_sql_query` to load a query statement directly into the DataFrame # YOUR CODE HERE ###Output _____no_output_____
2019-08-12_competition/regression_competition.ipynb
###Markdown Energy price forecasting competitionThis notebook has two purposes:1. Explain the data to be used in the energy price forecasting competition 2. Provide a template for importing the data and uploading results to the evaluation server BackgroundEnergy prices in two-day US energy markets are made up of two distinct components:1. A cost of energy, this is constant across all locations on the power grid within an hour, but varies hour by hour2. A location-specific variable costThe two-day structure of the market is such that participants bid each morning how much power they are willing to buy or sell at specific locations in each hour of the following dayThe power grid operator aggregates these individual bids and produces a market clearing price at each hour and location such that total supply and demand can be metDepending on where an individual market participant's bid prices were relative to the market clearing price, they will either be awarded bids or notThe next day when the bids are "active", unforseen circumstances often arise and the actual price of electricity at the specific locations will vary from the price announced by the power grid operatorThe price that was declared by the grid operator is referred to as the day ahead price and the price that prevails when bids are active in the next day is called the real time priceYour task is to use fundamental data from the a US power grid to formulate 1-day ahead hourly forecasts for the day ahead and real time cost of energy (the component of prices that is constant across the entire power grid)For each day you will report a projected day-ahead marginal cost of energy (acronym `damce`) and a real time marginal cost of energy (acronym `rtmce`) for each of the 24 hours in the next day ###Code import pandas as pd import requests ###Output _____no_output_____ ###Markdown Let's now import the data and describe various properties of it ###Code kw = dict(parse_dates=["date"], index_col=["date", "hour"]) train_X = pd.read_csv("train_X.csv", **kw) train_y = pd.read_csv("train_y.csv", **kw) test_X = pd.read_csv("test_X.csv", **kw) weather = pd.read_csv("weather_data.csv", **kw) train_X.head() ###Output _____no_output_____ ###Markdown Notice that the data is given in an hourly frequencyLet's get more info on all the columns: ###Code train_X.info() train_X.loc[train_X.isna().any(axis=1),:] ###Output _____no_output_____ ###Markdown The columns are:- `damce`: day ahead marginal cost of energy (units dollars)- `rtmce`: real time marginal cost of energy (units dollars)- `load`: total load (demand for energy) across the power grid (units MWh)- `zone_1_wind_production`: total production of energy from wind farms in zone 1 (units MWh)- `zone_2_wind_production`: total production of energy from wind farms in zone 2 (units MWh)- `zone_3_wind_production`: total production of energy from wind farms in zone 3 (units MWh)- `zone_4_wind_production`: total production of energy from wind farms in zone 2 (units MWh)- `zone_5_wind_production`: total production of energy from wind farms in zone 5 (units MWh)- `neighbor_region_1_load`: total demand for energy in region 1 of a neighboring electricity market (units MWh)- `neighbor_region_2_load`: total demand for energy in region 2 of a neighboring electricity market (units MWh)- `neighbor_region_3_load`: total demand for energy in region 3 of a neighboring electricity market (units MWh)- `wind`: total amount of energy produced from wind farms (units MWh)- `natural_gas`: total amount of energy produced from natural gas plants (units MWh)- `nuclear`: total amount of energy produced from nuclear power plants (units MWh)- `coal`: total production of energy from coal plants (units MWh)Note that there is some **missing data**. You WILL have to determine how to handle thisLet's look at the targets: ###Code train_y.head() ###Output _____no_output_____ ###Markdown The targets stored in a two column DataFrametarget1 is the `damce` and target2 is the `rtmce`Note that the target data has been shifted forward by two full days to account for the availability of data each morning before the market participants submit their bidsThe two day time shift is necessary because if I were submitting bids on 2019-08-06, I would only have access to data through 2019-08-05, but would be submitting bids that are active in the real time market on 2019-08-07 There is also another set of data imported into the `weather` variableLet's take a look at that ###Code weather.info() weather.head() ###Output <class 'pandas.core.frame.DataFrame'> MultiIndex: 9260 entries, (2018-01-02 00:00:00, 1) to (2019-08-07 00:00:00, 22) Data columns (total 18 columns): temp_KC 9260 non-null float64 temp_KS 9260 non-null float64 temp_MT 9260 non-null float64 temp_ND 9260 non-null float64 temp_OK 9260 non-null float64 temp_SD 9260 non-null float64 wind_east_KC 9260 non-null float64 wind_east_KS 9260 non-null float64 wind_east_MT 9260 non-null float64 wind_east_ND 9260 non-null float64 wind_east_OK 9260 non-null float64 wind_east_SD 9260 non-null float64 wind_north_KC 9260 non-null float64 wind_north_KS 9260 non-null float64 wind_north_MT 9260 non-null float64 wind_north_ND 9260 non-null float64 wind_north_OK 9260 non-null float64 wind_north_SD 9260 non-null float64 dtypes: float64(18) memory usage: 1.3 MB ###Markdown This DataFrame has hourly weather forecasts for locations in several of the states in the power grid we are studyingThe columns are named `(variable)_(XX)` where `variable` is shorthand for the variable and `XX` is the two letter abbreviation of the stateThe variables are:- `temp`: temperature in degrees farenheit- `wind_east`: the magnitude of wind flow in the east direction in miles per hour- `wind_north`: the magnitude of wind flow in the north direction in miles per hourWe did not include the columns of this DataFrame in `train_X` or `test_X` because it is not available for all hours of the day: ###Code weather.reset_index()["hour"].value_counts().sort_index() ###Output _____no_output_____ ###Markdown There should be 579 hours for all days, but there is not for two reasons:1. The hourly forecasts turn to 3-hourly forecasts between 1 and 2 PM each day2. The time shift that occurs due to daylight savings time causes some hours to appear only in winter months and some to appear only in summer months (e.g. hour 19 shows up in the winter whereas hour 18 appears in the summer)This data is likely helpful and informative for your task, but if you desire to use it you will have to come up with a strategy for handling the missing hours in this dataset relative to what is in `train_X` and `test_X`Note that because these are weather forecasts, you are permitted to join them with the `train_X` and `test_X` (on the date, hour columns) DataFrame and use them without worrying about if the data would be available at market participant bid deadline time Competition rulesYour tasks is to use data included in `train_X` (and potentially `weather`) to construct a regression model that predicts the day-ahead and real-time marginal cost of energy one day forwardThe targets are already comptued for you in `train_y`, so you do not need to worry about shifting data yourselfThis is inherently a time-series task, but you can apply non-time series methods without a problem (in fact, time series methods are more advanced/difficult, so we reccomend starting with classic regression algorithms)Because of the time series nature of the problem, you could potentially look in `train_X` and find the corresponding values for `train_y`If you figure out the pattern you could apply it to `test_X` and exactly produce some values for `test_y`Please do not do this -- you won't learnWe will review all code used to make submissions and will disqualify any submissions that "cheat" in this wayYou are permitted (encouraged) to work in teamsThere is no limit on the number of responses you can submitIn order to submit responses we have created a function `upload_responses` belowPlease read the documentation for how this function worksAs an example of usage, the code below would make a properly formatted submission:```pythonpredictions = np.random.randn(test_X.shape[0], 2)upload_response("Gryffindor", predictions)```The performance of all submitted responses will be evaluated using the MSE loss function ###Code def upload_response(team_name, predictions): """ Upload a response to evaluation server and return feedback Parameters ========== team_name: string A string representing your team name. This will appear on the leaderboard and will be used to identify the winning team predictions: pd.DataFrame or numpy array or list of lists A 2-dimensional numpy array, pandas DataFrame, or list of lists containing the predictions. The shape of this object MUST have two columns and the same number of rows as test_X. Returns ======= rank: int The rank of the current submission, relative to all others that have been recieved leaderboard: pd.DataFrame A pandas DataFrame representing a leaderboard of the top 50 responses recieved so far """ import numpy as np import requests import pandas as pd url = "http://jupyter.valorumdata.com:5000/submit" payload = dict(name=team_name, prediction=np.asarray(predictions).tolist()) res = requests.post(url, json=payload) if not res.ok: msg = res.content raise ValueError("Failed with message: {}".format(msg)) print("Response successfully submitted") data = res.json() rank = data["rank"] print("Your current rank is {}".format(rank)) leaderboard = pd.DataFrame(res.json()["leaders"]) leaderboard["timestamp"] = pd.to_datetime(leaderboard["timestamp"]) return rank, leaderboard ###Output _____no_output_____ ###Markdown WorkspaceOk, that's it! Let's get to workDo your best to build the winning modelGood luck! ###Code from sklearn import preprocessing, pipeline, linear_model, metrics, svm, multioutput, neural_network for _df in [train_X, test_X]: _df["nonwind"] = _df.eval("load - wind") train_X.describe().T model1 = linear_model.LinearRegression(fit_intercept=False) X1 = train_X[["nonwind"]].ffill(limit=4).bfill(limit=4) X1_test = test_X[["nonwind"]].ffill(limit=1) model1.fit(X1, train_y) metrics.mean_squared_error(model1.predict(X1), train_y) upload_response("sglyon-baseline", model1.predict(X1_test)) X2 = train_X.ffill(limit=4).bfill(limit=4) X2_test = test_X.ffill(limit=1) model2 = pipeline.make_pipeline( preprocessing.StandardScaler(), linear_model.MultiTaskElasticNetCV(cv=12) ) model2.fit(X2, train_y) pd.DataFrame(model2.steps[-1][-1].coef_, columns=list(X2)).T metrics.mean_squared_error(model2.predict(X2), train_y) upload_response("sglyon-enet_full", model2.predict(X2_test)) def transform3(_df): out = _df.copy() out["rtda_mce"] = out.eval("rtmce - damce") return out X3 = transform3(train_X).ffill(limit=4).bfill(limit=4) X3_test = transform3(test_X).ffill(limit=1) from copy import deepcopy model3 = pipeline.make_pipeline( preprocessing.StandardScaler(), linear_model.MultiTaskElasticNetCV(cv=10) ) model3.fit(X3, train_y) metrics.mean_squared_error(model3.predict(X3), train_y) upload_response("sglyon-enet-rtdamce", model3.predict(X3_test)) train_X.index.get_level_values("hour") >= 7 train_X.describe().T def transform4(_df): out = _df.copy() out["is_weekend"] = out.index.get_level_values("date").dayofweek >= 5 _hr = out.index.get_level_values("hour") out["is_peak"] = (_hr >= 7) & (_hr <= 21) return out.astype(float) X4 = transform4(train_X).ffill(limit=4).bfill(limit=4) X4_test = transform4(test_X).ffill(limit=1) model4 = pipeline.make_pipeline( preprocessing.StandardScaler(), linear_model.MultiTaskElasticNetCV(cv=10) ) model4.fit(X4, train_y) metrics.mean_squared_error(model4.predict(X4), train_y) upload_response("sglyon-enet-weekend-peak", model4.predict(X4_test)) ###Output /opt/anaconda3/lib/python3.7/site-packages/sklearn/pipeline.py:331: DataConversionWarning: Data with input dtype bool, float64 were all converted to float64 by StandardScaler. Xt = transform.transform(Xt) ###Markdown nonlinear-ml ###Code from sklearn import tree, ensemble, model_selection def transform5(_df): out = _df.copy() out["is_weekend"] = out.index.get_level_values("date").dayofweek >= 5 _hr = out.index.get_level_values("hour") out["is_peak"] = (_hr >= 7) & (_hr <= 21) return out # X5 = transform4(train_X).ffill(limit=4).bfill(limit=4) # X5_test = transform4(test_X).ffill(limit=1) model5_base = pipeline.Pipeline([ ("scale", preprocessing.StandardScaler()), ("tree", tree.DecisionTreeRegressor(max_depth=6, min_samples_leaf=0.01)) ]) param_grid5 = dict( tree__max_depth=[2, 6, 10], tree__min_samples_leaf=[1, 0.01, 0.05, 0.1] ) model5 = model_selection.GridSearchCV(model5_base, param_grid5, cv=10) model5.fit(X2, train_y) model5_base.fit(X2, train_y) print(metrics.mean_squared_error(model5.predict(X2), train_y)) print(metrics.mean_squared_error(model5_base.predict(X2), train_y)) upload_response("sglyon-dtree", model5_base.predict(X2_test)) def transform6(_df): out = _df.copy() out["is_weekend"] = out.index.get_level_values("date").dayofweek >= 5 _hr = out.index.get_level_values("hour") out["is_peak"] = (_hr >= 7) & (_hr <= 21) return out.astype(float) X6 = transform6(train_X).ffill(limit=4).bfill(limit=4) X6_test = transform6(test_X).ffill(limit=1) model6 = pipeline.Pipeline([ ("scale", preprocessing.StandardScaler()), ("tree", tree.DecisionTreeRegressor(max_depth=6, min_samples_leaf=0.01)) ]) model6.fit(X6, train_y) print(metrics.mean_squared_error(model6.predict(X6), train_y)) upload_response("sglyon-dtree-peak-weekend", model6.predict(X6_test)) def transform7(_df): out = _df.copy() out["is_weekend"] = out.index.get_level_values("date").dayofweek >= 5 _hr = out.index.get_level_values("hour") out["is_peak"] = (_hr >= 7) & (_hr <= 21) out["rtda"] = out.eval("rtmce - damce") return out.astype(float) X7 = transform7(train_X).ffill(limit=4).bfill(limit=4) X7_test = transform7(test_X).ffill(limit=1) model7 = pipeline.make_pipeline( preprocessing.StandardScaler(), ensemble.RandomForestRegressor(max_depth=6, min_samples_leaf=0.01, max_features="sqrt", n_estimators=600) ) model7.fit(X7, train_y) print(metrics.mean_squared_error(model7.predict(X7), train_y)) upload_response("class-forest-rtda-peak-weekend", model7.predict(X7_test)) train_X.shape all_df = train_X.join(weather, how="left") all_df_test = test_X.join(weather, how="left") X8_test = transform4(all_df_test).ffill(limit=24).bfill(limit=24) X8 = transform4(all_df).ffill(limit=30).bfill(limit=30) model8 = pipeline.make_pipeline( preprocessing.StandardScaler(), linear_model.MultiTaskElasticNetCV(cv=10) ) model8.fit(X8, train_y) metrics.mean_squared_error(model8.predict(X8), train_y) upload_response("class-enet-weather-peak-weekend-real", model8.predict(X8_test)) def transform9(df_train, df_test): df = pd.concat([df_train, df_test]).reset_index() dt = df["date"] + pd.Timedelta(hours=1)*(df["hour"] - 1) df_with_dt = df.assign(dt=dt).set_index("dt").sort_index() date_hour = df_with_dt[["date", "hour"]] df_with_dt = df_with_dt.drop(["date", "hour"], axis=1) rolling_mean = ( df_with_dt .rolling("14D") .mean() ) jan1_filler = all_weather_rolling_mean.loc["2018-01-02", :].shift(-1, freq="D") rolling_mean_full = rolling_mean.fillna(jan1_filler) output = df_with_dt.fillna(rolling_mean_full) # add back in date and hour columns output["date"] = date_hour["date"] output["hour"] = date_hour["hour"] # split into train and test out_original_index = ( output.reset_index(drop=True) .set_index(["date", "hour"]) ) train_X = out_original_index.loc[df_train.index, :] test_X = out_original_index.loc[df_test.index, :] return train_X, test_X df_9, df9_test = transform9(train_X, test_X) X9 = transform4(df_9) X9_test = transform4(df9_test) model9 = pipeline.make_pipeline( preprocessing.StandardScaler(), linear_model.MultiTaskElasticNetCV(cv=10) ) model9.fit(X9, train_y) metrics.mean_squared_error(model9.predict(X9), train_y) upload_response("class-enet-weather-peak-weekend-rolling", model9.predict(X9_test)) def transform10(df_train, df_test): df = pd.concat([df_train, df_test]).reset_index() monthly_mean = ( df.reset_index() .groupby(pd.Grouper(key="date", freq="M")) .mean() ) df["merge_month"] = df.index.get_level_values("date") - pd.Timedelta(days=1) + MonthEnd(1) output = df.merge(monthly_mean, left_on="merge_month", right_index=True) train_X = output.loc[df_train.index, :] test_X = output.loc[df_test.index, :] return train_X, test_X moms = transform10(train_X, test_X) from pandas.tseries.offsets import MonthEnd train_X.index.get_level_values("date") ###Output _____no_output_____
8-Labs/Lab01/Lab01.ipynb
###Markdown **Download** (right-click, save target as ...) this page as a jupyterlab notebook from:[Laboratory 1](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab01/Lab01.ipynb)___ Laboratory 1: A Notebook Like No Other!**LAST NAME, FIRST NAME****R00000000**ENGR 1330 Laboratory 1 - In-Lab Welcome to your first (or second) Jupyter Notebook. This is the medium that we will be using throughout the semester. ___**Why is this called a notebook?**Because you can write stuff in it!**Is that it?**Nope! you can **write** and **run** CODE in this notebook! Plus a bunch of other cool stuff such as making graphs, running tests and simulations, adding images, and building documents (such as this one!). The Environment - Let's have a look around this window!![](https://i.pinimg.com/originals/db/db/ba/dbdbbad5798bfc3ff27a499dc5ca2b30.gif) *Rami Malek in Mr. Robot* - The tabs: - File - Edit - View - Insert - Cell - Kernel - The Icons: - Save - Insert Cell Below - Cut - Copy - Paste Cells Below - Move Up - Move Down - Run - Intruppt Kernel - Restart Kernel - Cell Type Selector (Dropdown list)___The notebook consists of a sequence of cells. A cell is a multiline text input field, and its contents can be executed by using Shift-Enter, or by clicking Run in the menu bar. The execution behavior of a cell is determined by the cell’s type. There are three types of cells: code cells, markdown cells, and raw cells. Every cell starts off being a code cell, but its type can be changed by using a drop-down on the toolbar (which will be “Code”, initially). Code Cells:A code cell allows you to edit and write new code, with full syntax highlighting and tab completion. The programming language you use depends on the kernel. What we will use for this course and the default kernel IPython runs, is Python code.When a code cell is executed, code that it contains is sent to the kernel associated with the notebook. The results that are returned from this computation are then displayed in the notebook as the cell’s output. The output is not limited to text, with many other possible forms of output are also possible, including matplotlib figures and HTML tables. This is known as IPython’s rich display capability. Markdown Cells:You can document the computational process in a literate way, alternating descriptive text with code, using rich text. In IPython this is accomplished by marking up text with the Markdown language. The corresponding cells are called Markdown cells. The Markdown language provides a simple way to perform this text markup, that is, to specify which parts of the text should be emphasized (italics), bold, form lists, etc. In fact, markdown cells allow a variety of cool modifications to be applied:If you want to provide structure for your document, you can use markdown headings. Markdown headings consist of 1 to 5 hash signs followed by a space and the title of your section. (The markdown heading will be converted to a clickable link for a section of the notebook. It is also used as a hint when exporting to other document formats, like PDF.) Here is how it looks: title major headings subheadings 4th level subheadings 5th level subheadings These codes are also quite useful: - Use triple " * " before and after a word (without spacing) to make the word bold and italic B&I: ***string*** - __ or ** before and after a word (without spacing) to make the word bold Bold: __string__ or **string** - _ or * before and after a word (without spacing to make the word italic Italic: _string_ or *string* - Double ~ before and after a word (without spacing to make the word scratched Scratched: ~~string~~ - For line breaks use "br" in the middle of - For colors use this code: ###Code ### change this to a merkdown cell and run it! <font color=blue>Text</font> <br> <font color=red>Text</font> <br> <font color=orange>Text</font> <br> ###Output _____no_output_____ ###Markdown - For indented quoting, use a greater than sign (>) and then a space, then type the text. The text is indented and has a gray horizontal line to the left of it until the next carriage return.> here is an example of how it works!- For bullets, use the dash sign (- ) with a space after it, or a space, a dash, and a space ( - ), to create a circular bullet. To create a sub bullet, use a tab followed a dash and a space. You can also use an asterisk instead of a dash, and it works the same.- For numbered lists, start with 1. followed by a space, then it starts numbering for you. Start each line with some number and a period, then a space. Tab to indent to get subnumbering. 1. first 2. second 3. third 4. ...- For horizontal lines: Use three asterisks: ************- For graphics, you can attach image files directly to a notebook only in Markdown cells. Drag and drop your images to the Mardown cell to attach it to the notebook.![Anaconda.jpg](attachment:Anaconda.jpg)- You can also use images from online sources be using this format:![](put the image address here.image format) Raw Cells:Raw cells provide a place in which you can write output directly. Raw cells are not evaluated by the notebook. ###Code Thi$ is a raw ce11 ###Output _____no_output_____ ###Markdown Let's meet world's most popular python!![](https://media2.giphy.com/media/KAq5w47R9rmTuvWOWa/giphy.gif) What is python?> "Python is an interpreted, high-level and general-purpose programming language. Python's design philosophy emphasizes code readability with its notable use of significant whitespace." - Wikipedia @ [https://en.wikipedia.org/wiki/Python_(programming_language)](https://en.wikipedia.org/wiki/Python_(programming_language)) How to have access to it?There are plenty of ways, from online compilers to our beloved Jupyter Notebook on your local machines. Here are a few examples of online compilers: a. https://www.programiz.com/python-programming/online-compiler/ b. https://www.onlinegdb.com/online_python_compiler c. https://www.w3schools.com/python/python_compiler.asp d. https://repl.it/languages/python3 We can do the exact same thing in this notebook. But we need a CODE cell. ###Code print("Hello World") ###Output Hello World ###Markdown This is the classic "first program" of many languages! The script input is quite simple, we instruct the computer to print the literal string "hello world" to standard input/output device which is the console. Let's change it and see what happens: ###Code print("This is my first notebook!") ###Output This is my first notebook! ###Markdown How to save a notebook?- __As a notebook file (.ipynb):__ Go to File > Download As > Notebook (.ipynb) - __As an HTML file (.html):__ Go to File > Download As > HTML (.html) - __As a Pdf (.pdf):__ Go to File > Download As > PDF via LaTex (.pdf) or Save it as an HTML file and then convert that to a pdf via a website such as https://html2pdf.com/ *Unless stated otherwise, we want you to submit your lab assignments in PDF and your exam and project deliverables in both PDF and .ipynb formats.*___ Readings*This notebook was inspired by several blogposts including:* - __"Markdown for Jupyter notebooks cheatsheet"__ by __Inge Halilovic__ available at *https://medium.com/@ingeh/markdown-for-jupyter-notebooks-cheatsheet-386c05aeebed - __"Jupyter Notebook: An Introduction"__ by __Mike Driscoll__ available at *https://realpython.com/jupyter-notebook-introduction/ *Here are some great reads on this topic:* - __"Jupyter Notebook Tutorial: The Definitive Guide"__ by __Karlijn Willems__ available at *https://www.datacamp.com/community/tutorials/tutorial-jupyter-notebook - __"Introduction to Jupyter Notebooks"__ by __Quinn Dombrowski, Tassie Gniady, and David Kloster__ available at *https://programminghistorian.org/en/lessons/jupyter-notebooks - __"12 Things to know about Jupyter Notebook Markdown"__ by __Dayal Chand Aichara__ available at *https://medium.com/game-of-data/12-things-to-know-about-jupyter-notebook-markdown-3f6cef811707 *Here are some great videos on these topics:* - __"Jupyter Notebook Tutorial: Introduction, Setup, and Walkthrough"__ by __Corey Schafer__ available at *https://www.youtube.com/watch?v=HW29067qVWk - __"Quick introduction to Jupyter Notebook"__ by __Michael Fudge__ available at *https://www.youtube.com/watch?v=jZ952vChhuI - __"What is Jupyter Notebook?"__ by __codebasics__ available at *https://www.youtube.com/watch?v=q_BzsPxwLOE ___ Exercise: Let's see who you are! Similar to the hello world example, use a code cell and print a paragraph about you. You can introduce yourselves and write about interesting things to and about you! A few lines bekow can get you started, replace the ... parts and the other parts to make your paragraph. ###Code print('my name is ...') print('my favorite food is ...') print('I am currently studying to be an ... engineer') print('I speak 3 languages, they are: language 1, language 2, and of course profanity') ###Output my name is ... my favorite food is ... I am currently studying to be an ... engineer I speak 3 languages, they are: language 1, language 2, and of course profanity
benchmarks/2samp_simulations.ipynb
###Markdown Simulations These are same useful functions to import. Since we are calculating the statistical power over all the tests for all the simulations, we can just use a wild card import from the respective modules ###Code import numpy as np import matplotlib.pyplot as plt from tqdm.notebook import tqdm from hyppo.sims import * import seaborn as sns sns.color_palette('Set1') sns.set(color_codes=True, style='white', context='talk', font_scale=2) ###Output _____no_output_____ ###Markdown These are some constants that are used in this notebook. If running these notebook, please only manipulate these constants if you are not running more tests. They define the sample sizes tested upon and the number of replications. The simulations tested over and the independence tests tested over are defined also. ###Code NOISY = 100 NO_NOISE = 1000 indep_sims = [ (linear, "Linear"), (exponential, "Exponential"), (cubic, "Cubic"), (joint_normal, "Joint Normal"), (step, "Step"), (quadratic, "Quadratic"), (w_shaped, "W-Shaped"), (spiral, "Spiral"), (uncorrelated_bernoulli, "Bernoulli"), (logarithmic, "Logarithmic"), (fourth_root, "Fourth Root"), (sin_four_pi, "Sine 4\u03C0"), (sin_sixteen_pi, "Sine 16\u03C0"), (square, "Square"), (two_parabolas, "Two Parabolas"), (circle, "Circle"), (ellipse, "Ellipse"), (diamond, "Diamond"), (multiplicative_noise, "Multiplicative"), (multimodal_independence, "Independence") ] ###Output _____no_output_____ ###Markdown The following code plots the simulation with noise where applicable in blue overlayed with the simulation with no noise in red. For specific equations for each simulation, please refer to the documentation about each simulation. ###Code N = 500 P = 1 DEGREE = 60 def _normalize(x, y): """Normalize input data matricies.""" x = x / np.max(np.abs(x)) y = y / np.max(np.abs(y)) return x, y def _2samp_rotate(sim, x, y, p, degree=90, pow_type="samp"): angle = np.radians(degree) data = np.hstack([x, y]) same_shape = [ "joint_normal", "logarithmic", "sin_four_pi", "sin_sixteen_pi", "two_parabolas", "square", "diamond", "circle", "ellipse", "multiplicative_noise", "multimodal_independence", ] if sim.__name__ in same_shape: rot_shape = 2 * p else: rot_shape = p + 1 rot_mat = np.identity(rot_shape) if pow_type == "dim": if sim.__name__ not in [ "exponential", "cubic", "spiral", "uncorrelated_bernoulli", "fourth_root", "circle", ]: for i in range(rot_shape): mat = np.random.normal(size=(rot_shape, 1)) mat = mat / np.sqrt(np.sum(mat ** 2)) if i == 0: rot = mat else: rot = np.hstack([rot, mat]) rot_mat, _ = np.linalg.qr(rot) if (p % 2) == 1: rot_mat[0] *= -1 else: rot_mat[np.ix_((0, -1), (0, -1))] = np.array( [[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]] ) elif pow_type == "samp": rot_mat[np.ix_((0, 1), (0, 1))] = np.array( [[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]] ) else: raise ValueError("pow_type not a valid flag ('dim', 'samp')") rot_data = (rot_mat @ data.T).T if sim.__name__ in [ "joint_normal", "logarithmic", "sin_four_pi", "sin_sixteen_pi", "two_parabolas", "square", "diamond", "circle", "ellipse", "multiplicative_noise", "multimodal_independence", ]: x_rot, y_rot = np.hsplit(rot_data, 2) else: x_rot, y_rot = np.hsplit(rot_data, [-1]) return x_rot, y_rot def plot_sims(): fig, ax = plt.subplots(nrows=4, ncols=5, figsize=(28,24)) count = 0 sim_markers = [ "+", "x" ] custom_color = [ "#969696", "#525252" ] for i, row in enumerate(ax): for j, col in enumerate(row): count = 5*i + j sim, sim_title = indep_sims[count] if sim.__name__ == "multimodal_independence": x1, y1 = sim(N, P) x2, y2 = sim(N, P) else: if sim.__name__ == "multiplicative_noise": x1, y1 = sim(N, P) else: x1, y1 = sim(N, P, noise=False) if sim.__name__ not in ["linear", "exponential", "cubic"]: pass else: x1, y1 = _normalize(x1, y1) x2, y2 = _2samp_rotate(sim, x1, y1, P, degree=DEGREE, pow_type="samp") col.scatter(x1, y1, marker="+", color="#969696", label="Orginal Sample") col.scatter(x2, y2, marker="x", color="#525252", label="Rotated Sample") col.set_title('{}'.format(sim_title), fontsize=35) col.set_xticks([]) col.set_yticks([]) if sim.__name__ == "multimodal_independence": col.set_ylim([-2, 2]) sns.despine(left=True, bottom=True, right=True) leg = plt.legend(bbox_to_anchor=(0.5, 0.1), bbox_transform=plt.gcf().transFigure, ncol=5, loc='upper center') leg.get_frame().set_linewidth(0.0) for legobj in leg.legendHandles: legobj.set_linewidth(3.0) plt.subplots_adjust(hspace=.5) plt.savefig('../benchmarks/figs/2samp_simulations.pdf', transparent=True, bbox_inches='tight') plot_sims() ###Output _____no_output_____
File 4a - Time evolution - TEBD.ipynb
###Markdown TIME EVOLUTION ###Code # file: mps/evolution.py import numpy as np import scipy.linalg from numbers import Number import mps.state import scipy.sparse as sp from mps.state import _truncate_vector, DEFAULT_TOLERANCE ###Output _____no_output_____ ###Markdown Suzuki-Trotter Decomposition In Suzuki-Trotter decomposition, the Hamiltonians of the nearest neighbor couplings can be decomposed into two non-commuting parts, $H_{\text{odd}} $ and $ H_{\text{even}} $, so that all additive 2-site operators in each part commute with each other.Let us consider a simple example of tight binding model with on-site potential and decompose the Hamiltonian into 2-site terms, so that $H=\sum_i h_{i,i+1}$. \begin{equation}h_{i,i+1} = \left(\frac{\omega}{2} a_i^\dagger a_i \right) + \left(\frac{\omega}{2} a_{i+1}^\dagger a_{i+1} \right) - \left( t a_{i}^\dagger a_{i+1} + \text{h.c.} \right).\end{equation}Since $[h_{i,i+1},h_{i+2,i+3}] = 0$, we can group these terms for even and odd $i$, so that $H = H_{\text{odd}} + H_{\text{even}} $. Note that the local term $ a_i^\dagger a_i$ appears only in one of the groups for $i=1$ and $i=N$. Therefore we need to add two on-site terms $h_1 = \left(\frac{\omega}{2} a_1^\dagger a_1 \right) $ and $h_N = \left(\frac{\omega}{2} a_N^\dagger a_N \right) $, to the corresponding two-site terms. So that $h_{1,2} \rightarrow h_{1,2} + h_1$, and $h_{N-1,N} \rightarrow h_{N-1,N} + h_N$.And for the first order Suzuki-Trotter decomposition, the evolution operator becomes\begin{equation}e^{-i \hat{H} \Delta t} = e^{-i \hat{H}_{\text{odd}} \Delta t} e^{-i \hat{H}_{\text{even}} \Delta t} + O(\Delta t^2).\end{equation} `pairwise_unitaries` creates a list of Trotter unitarities corresponding to two-site operators, $U_{i,i+1} = e^{-i h_{i,i+1} \Delta t}$. The Trotter unitarities associated with $\hat{H}_{\text{odd}}$ and $\hat{H}_{\text{even}}$ are applied separately in consecutive sweeps depending on evenodd value passed to TEBD_sweep class:$$ U = [U_{1,2}, U_{2,3}, U_{3,4}, \dots ]. $$ ###Code # file: mps/evolution.py def pairwise_unitaries(H, δt): return [scipy.linalg.expm((-1j * δt) * H.interaction_term(k)). reshape(H.dimension(k), H.dimension(k+1), H.dimension(k), H.dimension(k+1)) for k in range(H.size-1)] ###Output _____no_output_____ ###Markdown We apply each $U_{i,i+1} = e^{-i h_{i,i+1} \Delta t}$ to two neighbouring tensors, $A_i$ and $A_{i+1}$ simultaneously, as shown below. The resulting tensor $B$ is a two-site tensor. We split this tensor using the canonical form algorithm defined in [this notebook](File%201c%20-%20Canonical%20form.ipynb). ###Code # file: mps/evolution.py def apply_pairwise_unitaries(U, ψ, start, direction, tol=DEFAULT_TOLERANCE): """Apply the list of pairwise unitaries U onto an MPS state ψ in canonical form. Unitaries are applied onto pairs of sites (i,i+1), (i+2,i+3), etc. We start at 'i=start' and move in increasing or decreasing order of sites depending on 'direction' Arguments: U -- List of pairwise unitaries ψ -- State in canonical form start -- First site for applying pairwise unitaries direction -- Direction of sweep. Returns: ψ -- MPS in canonical form""" #print("Apply pairwise unitarities in direction {} and at starting site {} with center {}".format(direction, start, ψ.center)) ψ.recenter(start) if direction > 0: newstart = ψ.size-2 for j in range(start, ψ.size-1, +2): #print('Updating sites ({}, {}), center={}, direction={}'.format(j, j+1, ψ.center, direction)) AA = np.einsum('ijk,klm,nrjl -> inrm', ψ[j], ψ[j+1], U[j]) ψ.update_2site(AA, j, +1, tolerance=tol) if j < newstart: ψ.update_canonical(ψ[j+1], +1, tolerance=tol) #print("New center= {}, new direction = {}".format(ψ.center, direction)) return newstart, -1 else: newstart = 0 for j in range(start, -1, -2): #print('Updating sites ({}, {}), center={}, direction={}'.format(j, j+1, ψ.center, direction)) AA = np.einsum('ijk,klm,nrjl -> inrm', ψ[j], ψ[j+1], U[j]) ψ.update_2site(AA, j, -1, tolerance=tol) if j > 0: ψ.update_canonical(ψ[j], -1, tolerance=tol) #print("New center= {}, new direction = {}".format(ψ.center, direction)) return newstart, +1 # file: mps/evolution.py class TEBD_evolution(object): """TEBD_evolution is a class that continuously updates a quantum state ψ evolving it with a Hamiltonian H over intervals of time dt.""" def __init__(self, ψ, H, dt, timesteps=1, order=1, tol=DEFAULT_TOLERANCE): """Create a TEBD algorithm to evolve a quantum state ψ with a fixed Hamiltonian H. Arguments: ψ -- Quantum state to be updated. The class keeps a copy. H -- NNHamiltonian for the evolution dt -- Size of each Trotter step timesteps -- How many Trotter steps in each call to evolve() order -- Order of the Trotter approximation (1 or 2) tol -- Tolerance in MPS truncation """ self.H = H self.dt = float(dt) self.timesteps = timesteps self.order = order self.tolerance = tol self.Udt = pairwise_unitaries(H, dt) if order == 2: self.Udt2 = pairwise_unitaries(H, dt/2) if not isinstance(ψ, mps.state.CanonicalMPS): ψ = mps.state.CanonicalMPS(ψ, center=0) else: ψ = ψ.copy() self.ψ = ψ if ψ.center <= 1: self.start = 0 self.direction = +1 else: self.start = ψ.size-2 self.direction = -1 def evolve(self, timesteps=None): """Update the quantum state with `timesteps` repetitions of the Trotter algorithms.""" #print("Apply TEBD for {} timesteps in the order {}".format(self.timesteps, self.order)) if timesteps is None: timesteps = self.timesteps for i in range(self.timesteps): #print(i) if self.order == 1: #print("Sweep in direction {} and at starting site {}".format(self.direction, self.start)) self.start, self.direction = apply_pairwise_unitaries(self.Udt, self.ψ, self.start, self.direction, tol=self.tolerance) #print("Sweep in direction {} and at starting site {}".format(self.direction, self.start)) self.start, self.direction = apply_pairwise_unitaries(self.Udt, self.ψ, self.start, self.direction, tol=self.tolerance) else: self.start, self.direction = apply_pairwise_unitaries(self.Udt2, self.ψ, self.start, self.direction, tol=self.tolerance) self.start, self.direction = apply_pairwise_unitaries(self.Udt, self.ψ, self.start, self.direction, tol=self.tolerance) self.start, self.direction = apply_pairwise_unitaries(self.Udt2, self.ψ, self.start, self.direction, tol=self.tolerance) #print("New direction = {} and new starting site = {}".format(self.direction, self.start)) return self.ψ def state(): return self.ψ ###Output _____no_output_____ ###Markdown Error in Suzuki-Trotter decomposition In the first order Suzuki-Trotter decomposition, evolution operator becomes\begin{equation}e^{-i \hat{H} \Delta t} = e^{-i \hat{H}_{\text{odd}} \Delta t} e^{-i \hat{H}_{\text{even}} \Delta t} + O(\Delta t^2).\end{equation}Note that after $T/\Delta t$ time steps, the accumulated error is in the order of $\Delta t$.Higher order Suzuki-Trotter decompositions can be used to reduce error. Tests ###Code # file: mps/test/test_TEBD.py import unittest import scipy.sparse as sp import scipy.sparse.linalg from mps.state import CanonicalMPS from mps.tools import * from mps.test.tools import * from mps.evolution import * from mps.hamiltonians import make_ti_Hamiltonian, ConstantNNHamiltonian def random_wavefunction(n): ψ = np.random.rand(n) - 0.5 return ψ / np.linalg.norm(ψ) class TestTEBD_sweep(unittest.TestCase): @staticmethod def hopping_model(N, t, ω): a = annihilation(2) ad = creation(2) return make_ti_Hamiltonian(N, [t*a, t*ad], [ad, a], local_term = ω*(ad@a)) @staticmethod def hopping_model_Trotter_matrix(N, t, ω): # # Hamiltonian that generates the evolution of the odd hoppings # and local frequencies return sp.diags([[t,0]*(N//2), [ω]+[ω/2]*(N-2)+[ω], [t,0]*(N//2)], offsets=[-1,0,+1], shape=(N,N), dtype=np.float64) @staticmethod def hopping_model_matrix(N, t, ω): return sp.diags([[t]*(N), ω, [t]*(N)], offsets=[-1,0,+1], shape=(N,N)) def inactive_test_apply_pairwise_unitaries(self): N = 2 tt = -np.pi/2 ω = np.pi dt = 0.1 # # Numerically exact solution using Scipy's exponentiation routine ψwave = random_wavefunction(N) print(mps.state.wavepacket(ψwave).tovector()) HMat = self.hopping_model_Trotter_matrix(N, tt, ω) ψwave_final = sp.linalg.expm_multiply(+1j * dt * HMat, ψwave) print(mps.state.wavepacket(ψwave_final).tovector()) print(HMat.todense()) # # Evolution using Trrotter H = self.hopping_model(N, tt, ω) U = pairwise_unitaries(H, dt) ψ = CanonicalMPS(mps.state.wavepacket(ψwave)) start = 0 direction = 1 apply_pairwise_unitaries(U, ψ, start, direction, tol=DEFAULT_TOLERANCE) print(ψ.tovector()) print(np.abs(mps.state.wavepacket(ψwave_final).tovector() - ψ.tovector())) self.assertTrue(similar(abs(mps.state.wavepacket(ψwave_final).tovector()), abs(ψ.tovector()))) def test_TEBD_evolution_first_order(self): # # # N = 19 t = - np.pi/2 ω = np.pi dt = 1e-6 Nt = int(1000) #ψwave = random_wavefunction(N) xx=np.arange(N) x0 = int(N//2) w0 = 5 k0 = np.pi/2 # # Approximate evolution of a wavepacket in a tight-binding model ψwave = np.exp(-(xx-x0)**2 / w0**2 + 1j * k0*xx) ψwave = ψwave / np.linalg.norm(ψwave) Hmat = self.hopping_model_matrix(N, t, ω) ψwave_final = sp.linalg.expm_multiply(-1j * dt* Nt * Hmat, ψwave) # # Trotter solution ψmps = CanonicalMPS(mps.state.wavepacket(ψwave)) H = self.hopping_model(N, t, ω) ψmps = TEBD_evolution(ψmps, H, dt, timesteps=Nt, order=1, tol=DEFAULT_TOLERANCE).evolve() self.assertTrue(similar(abs(mps.state.wavepacket(ψwave_final).tovector()), abs(ψmps.tovector()))) def test_TEBD_evolution_second_order(self): # # # N = 21 t = 0.1 ω = 0.5 dt = 1e-6 Nt = int(1000) #ψwave = random_wavefunction(N) xx=np.arange(N) x0 = int(N//2) w0 = 5 k0 = np.pi/2 # # Approximate evolution of a wavepacket in a tight-binding model ψwave = np.exp(-(xx-x0)**2 / w0**2 + 1j * k0*xx) ψwave = ψwave / np.linalg.norm(ψwave) Hmat = self.hopping_model_matrix(N, t, ω) ψwave_final = sp.linalg.expm_multiply(-1j * dt * Nt * Hmat, ψwave) # # Trotter evolution H = self.hopping_model(N, t, ω) ψmps = CanonicalMPS(mps.state.wavepacket(ψwave)) ψmps = TEBD_evolution(ψmps, H, dt, timesteps=Nt, order=2, tol=DEFAULT_TOLERANCE).evolve() self.assertTrue(similar(abs(mps.state.wavepacket(ψwave_final).tovector()), abs(ψmps.tovector()))) suite1 = unittest.TestLoader().loadTestsFromNames(['__main__.TestTEBD_sweep']) unittest.TextTestRunner(verbosity=2).run(suite1); ###Output _____no_output_____
correlation/Correlation Analysis - Census Block Broadband Dataset/Correlation - Number of Providers/Wireline/Rural/Corr4bR.ipynb
###Markdown About:4/10/2021 Program calculate the correlation coefficient: (1) between numbers of internet providers (2) between states (3) Population (4) between states & internet speeds??? -> Self Assigned NOTE: Make sure to unzip the xlsx files@author: Minh Nguyen @AIA@credit: Qasim, Andrei @AIA ###Code import numpy as np from numpy.random import randn from numpy.random import seed from numpy import array_split import pandas as pd from sklearn import metrics as mt from sklearn import model_selection as md from matplotlib import pyplot as plt import seaborn as sns import sklearn.datasets as ds import random ###Output _____no_output_____ ###Markdown Correlation Calculation ###Code def correlation_cal(df): # Instead of just dropping the missing values, we will fill in N/A values if df.isna().values.any(): while True: missing_type = input("Please enter the type of missing value replacement: mean, medium, mode, or drop from the row") missing_type = missing_type.lower() if(missing_type in ['mean', 'median', 'mode', 'drop']): if(missing_type == 'mean'): df.fillna(df.mean(), inplace=True) elif(missing_type == 'median'): df.fillna(df.median(), inplace=True) elif(missing_type == 'mode'): df.fillna(df.mode(), inplace=True) else: df.dropna() break else: print("Please input the option from the list") # Calculate input data correlation while True: corr_type = input("Please enter type of correlation: Pearson, Spearman, or Kendall: ") corr_type = corr_type.lower() if corr_type in ['pearson', 'spearman', 'kendall']: break else: print("Please try again") # Plot correlation matrix corrMatrix = df.corr(method=corr_type) _, ax = plt.subplots(figsize=(12, 10)) sns.heatmap(corrMatrix, ax = ax, cmap="gray", linewidths = 0.1) # cmap can also be "YlGnBu" ###Output _____no_output_____ ###Markdown (1) Correlation Calculation Between Number of Internet Providers Preprocess Data ###Code !ls def parse_data_bis(file): """ Function load csv files into csv pandas by numbers of internet providers Function parse data by internet speed for all 50 states + nation (row 0) """ dataset = pd.read_excel(file, skiprows=[0], usecols=[4,5,6,7,8,9,10,11]) return dataset df_bis = parse_data_bis("preprocessed_data_w_rural.xlsx") df_bis.head() ###Output _____no_output_____ ###Markdown Run 1: Correlation Pearson - Mean ###Code # Run Correlation Pearson correlation_cal(df_bis) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Pearson ###Markdown Run 2: Correlation Spearman - Mean ###Code # Run Correlation Spearman correlation_cal(df_bis) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Spearman ###Markdown Run 3: Correlation Kendall - Mean ###Code # Run Correlation Kendall correlation_cal(df_bis) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Kendall ###Markdown Analysis - Self Assigned1/ What are the two significant correlation coefficient for Pearson, Spearman, Kendall?2/ What are other analysis we can do with these graphs?3/ Any other metric besides Pearson, Spearman, Kendall? (2) Correlation Calculation Between States ###Code def parse_data_bs(file): """ Function load csv files into csv pandas by internet speed Function parse data by states and nation for all different numbers of internet providers """ dataset = pd.read_excel(file, skiprows=[0,1], usecols=[0,4,5,6,7,8,9,10,11], drop=True) dataset.set_index("Nationwide", inplace=True) dataset = dataset.T return dataset df_bs = parse_data_bs("preprocessed_data_w_rural.xlsx") df_bs.head() ###Output _____no_output_____ ###Markdown Run 1: Correlation Pearson - Mean ###Code # Run Correlation Pearson correlation_cal(df_bs) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Pearson ###Markdown Run 2: Correlation Spearman - Mean ###Code # Run Correlation Spearman correlation_cal(df_bs) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Spearman ###Markdown Run 3: Correlation Kendall - Mean ###Code # Run Correlation Kendall correlation_cal(df_bs) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Kendall ###Markdown (3) Correlation Calculation Between Numbers of Population ###Code def parse_data_bpop(file): """ Function load csv files into csv pandas by internet speed Function parse data by number of poppulation for all different numbers of internet providers """ dataset = pd.read_excel(file, skiprows=[0], usecols=[3,4,5,6,7,8,9,10,11], drop=True) dataset.set_index("Population", inplace=True) dataset = dataset.T return dataset df_bpop = parse_data_bpop("preprocessed_data_w_rural.xlsx") df_bpop.head() ###Output _____no_output_____ ###Markdown Run 1: Correlation Pearson - Mean ###Code # Run Correlation Pearson => Automatically drop the first col since it is string correlation_cal(df_bpop) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Pearson ###Markdown Run 2: Correlation Spearman - Mean ###Code # Run Correlation Spearman => Automatically drop the first col since it is string correlation_cal(df_bpop) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Spearman ###Markdown Run 3: Correlation Kendall - Mean¶ ###Code # Run Correlation Kendall => Automatically drop the first col since it is string correlation_cal(df_bpop) ###Output Please enter type of correlation: Pearson, Spearman, or Kendall: Kendall
tutorials/notebook/cx_site_chart_examples/heatmap_13.ipynb
###Markdown Example: CanvasXpress heatmap Chart No. 13This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:https://www.canvasxpress.org/examples/heatmap-13.htmlThis example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.Everything required for the chart to render is included in the code below. Simply run the code block. ###Code from canvasxpress.canvas import CanvasXpress from canvasxpress.js.collection import CXEvents from canvasxpress.render.jupyter import CXNoteBook cx = CanvasXpress( render_to="heatmap13", data={ "y": { "vars": [ "Var1", "Var2", "Var3", "Var4", "Var5", "Var6", "Var7", "Var8", "Var9", "Var10", "Var11", "Var12", "Var13", "Var14", "Var15", "Var16", "Var17", "Var18", "Var19", "Var20" ], "smps": [ "Sample1", "Sample2", "Sample3", "Sample4", "Sample5", "Sample6", "Sample7", "Sample8", "Sample9", "Sample10" ], "data": [ [ 2.82, -0.06, 1.94, -2.35, 4.19, -0.13, 2.55, 1.49, 3.02, -0.7 ], [ 3.001, 0.3289, 2.55, -0.552, 3.766, -0.28, 4.153, -1.759, 3.363, 2.09 ], [ 2.82, -0.53, 2.87, -0.89, 4.84, -1.12, 3.11, -1.26, 1.58, 0.19 ], [ 2.5, 0.86, 4.89, 1.7, 2.54, 0.95, 2.65, 1.22, 1.82, 0.64 ], [ 2.81, 0.64, 3.47, -0.74, 3.17, 0.85, 1.66, 0.89, 4.18, -0.17 ], [ 2.23, 0.18, 3.27, -1.53, 4.3, -1.49, 2.16, -0.48, 2.93, 2.26 ], [ 2.98, -2.8, 3.1, -0.03, 1.89, -0.48, 3.53, 0.68, 3.71, 0.04 ], [ 3.49, -0.79, 2.36, 0.62, 5.25, -1.51, 2.65, -0.99, 2.21, 1.18 ], [ 3.35, 0.47, 3.58, -0.5, 2.15, -0.7, 1.74, -0.82, 2.76, 1.1 ], [ 3.72, 0.27, 2.91, -0.82, 1.42, 1.44, 4.32, -1.72, 2.8, -0.15 ], [ 2.53, 1.33, -1.41, 2.36, -1.2, 1.48, 0.56, 1.48, -0.81, 1.39 ], [ -2.23, 3.04, -1.05, 1.43, 0.53, 1.14, 1.09, 2.57, 0.37, 3.53 ], [ -1.3, 1.48, -1.1, 1.99, 0.25, 1.91, -1.11, 1.99, -0.62, 0.76 ], [ 0.4, 2.37, 0.49, 1.07, 1.03, 2.06, 0.11, 0.1, 1.08, 2.02 ], [ -0.42, 6.68, 0.19, 6.96, -0.01, 6.49, 0.33, 5.97, 0.24, 6.17 ], [ -0.24, 6.11, -0.69, 6.6, 0.2, 6.23, 0.13, 6.6, -0.48, 7.22 ], [ 0.8, 7.04, 0.46, 5.7, -1.68, 6.41, 0.48, 6.2, -0.36, 6.04 ], [ 0.02, 5.44, 0.65, 5.03, 2.4, 6.52, -1.02, 6.13, 1.16, 6.5 ], [ 0.01, 7.7, 1.17, 5.98, -2.31, 5.01, 0.6, 4.81, 0.25, 5.74 ], [ 1.28, 3.76, 0.08, 5.34, 1.1, 5, 0.46, 6.59, -1.98, 7.75 ] ] }, "x": { "CellType": [ "CT1", "CT2", "CT1", "CT2", "CT1", "CT2", "CT1", "CT2", "CT1", "CT2" ], "Time": [ "t1", "t2", "t3", "t4", "t5", "t1", "t2", "t3", "t4", "t5" ], "Dose": [ 40, 40, 20, 20, 15, 30, 50, 15, 30, 50 ], "Drug": [ "A", "B", "A", "B", "A", "B", "A", "B", "A", "B" ] }, "z": { "GeneClass": [ "Path1", "Path1", "Path1", "Path1", "Path1", "Path1", "Path1", "Path1", "Path1", "Path1", "Path2", "Path2", "Path2", "Path2", "Path3", "Path3", "Path3", "Path3", "Path3", "Path3" ], "ProteinA": [ 2.82, 0.32, 4.89, 3.27, 5.7, 6.41, 0.48, 1.98, 7.04, 4.18, 2.31, 3.52, 1.72, 6.51, 5.44, 1.28, 0.46, 4.32, 1.39, 7.77 ] } }, config={ "graphType": "Heatmap", "heatmapIndicatorPosition": "right", "heatmapSmpSeparateBy1": "CellType", "heatmapVarSeparateBy1": "GeneClass", "overlayFontStyle": "bold", "overlayScaleFontFactor": 2, "samplesClustered": True, "showLevelOverlays": False, "showSmpOverlaysLegend": True, "showVarOverlaysLegend": True, "smpDendrogramPosition": "right", "smpOverlayProperties": { "Dose": { "type": "Bar", "showLegend": "True", "color": "blue", "thickness": 80, "position": "left", "spectrum": [ "rgb(69,117,180)", "rgb(145,191,219)", "rgb(224,243,248)", "rgb(255,255,191)", "rgb(254,224,144)", "rgb(252,141,89)", "rgb(215,48,39)" ], "scheme": "User", "showName": True, "showBox": True, "rotate": False }, "Time": { "position": "right", "showLegend": "True", "scheme": "Greens", "type": "Default", "color": "rgb(105,150,150)", "spectrum": [ "rgb(69,117,180)", "rgb(145,191,219)", "rgb(224,243,248)", "rgb(255,255,191)", "rgb(254,224,144)", "rgb(252,141,89)", "rgb(215,48,39)" ], "showName": True, "showBox": True, "rotate": False }, "CellType": { "scheme": "Matlab", "showLegend": "True", "type": "Default", "position": "right", "color": "rgb(248,204,3)", "spectrum": [ "rgb(69,117,180)", "rgb(145,191,219)", "rgb(224,243,248)", "rgb(255,255,191)", "rgb(254,224,144)", "rgb(252,141,89)", "rgb(215,48,39)" ], "showName": True, "showBox": True, "rotate": False }, "Drug": { "thickness": 30, "position": "left", "type": "Increase", "scheme": "Lancet", "showLegend": "True", "color": "rgb(254,41,108)", "spectrum": [ "rgb(69,117,180)", "rgb(145,191,219)", "rgb(224,243,248)", "rgb(255,255,191)", "rgb(254,224,144)", "rgb(252,141,89)", "rgb(215,48,39)" ], "showName": True, "showBox": True, "rotate": False } }, "smpOverlays": [ "Drug", "-", "Dose", "CellType", "-", "Time" ], "smpTitleLabelPosition": "right", "varOverlayProperties": { "ProteinA": { "type": "Line", "position": "top", "thickness": 45, "color": "green", "spectrum": [ "rgb(69,117,180)", "rgb(145,191,219)", "rgb(224,243,248)", "rgb(255,255,191)", "rgb(254,224,144)", "rgb(252,141,89)", "rgb(215,48,39)" ], "scheme": "User", "showLegend": True, "showName": True, "showBox": True, "rotate": False }, "GeneClass": { "showLegend": "True", "scheme": "GGPlot", "type": "Default", "position": "top", "thickness": 20, "color": "rgb(167,206,49)", "spectrum": [ "rgb(69,117,180)", "rgb(145,191,219)", "rgb(224,243,248)", "rgb(255,255,191)", "rgb(254,224,144)", "rgb(252,141,89)", "rgb(215,48,39)" ], "showName": True, "showBox": True, "rotate": False } }, "varOverlays": [ "ProteinA", "-", "GeneClass" ], "varTitleLabelPosition": "bottom", "variablesClustered": True }, width=813, height=655, events=CXEvents(), after_render=[], other_init_params={ "version": 35, "events": False, "info": False, "afterRenderInit": False, "noValidate": True } ) display = CXNoteBook(cx) display.render(output_file="heatmap_13.html") ###Output _____no_output_____
recommender-system/ml-1m/memory.ipynb
###Markdown Pearson Similarity ###Code corr_matrix = np.corrcoef(ratings_mtx_df.T) np.fill_diagonal(corr_matrix, 0 ) corr = pd.DataFrame(corr_matrix) corr.head() P = corr_matrix[inp] max(P) list(movie_index[(P>0.28) & (P<0.35)]) ###Output _____no_output_____
my/testing.ipynb
###Markdown 使用如下方式计算相似度, dis 为欧氏距离,KRCC 为肯德尔系数肯德尔系数:$\tau=\frac{\text { (number of concordant pairs })-(\text { number of discordant pairs })}{n(n-1) / 2}$$\operatorname{Sim}\left(s_{i}, s_{j}\right)=\alpha\left(1-\frac{d i s\left(s_{i}, s_{j}\right)}{\sqrt{2}}\right)+(1-\alpha) \operatorname{KRCC}\left(s_{i}, s_{j}\right)$ ###Code # 现在我们拥有了相似度向量,还需要对于 item 的评分 taus = np.array([scipy.stats.kendalltau(user.representative_item_value, item).correlation for item in feature_vectors]) print('taus:', taus, sep='\n') # 欧氏距离 distances = np.array([np.linalg.norm(user.representative_item_value - item) for item in feature_vectors]) print('distances:', distances, sep='\n') # alpha 是一个平衡参数 alpha = 0.5 scores = np.array([alpha * distances[i] + (1-alpha) * taus[i] for i in range(len(taus))]) print('scores:', scores, sep='\n') # 根据特征矩阵和 scores 生成核矩阵 kernel_matrix = scores.reshape((item_size, 1)) * similarities * scores.reshape((1, item_size)) print('kernel_matrix', kernel_matrix, sep='\n') # dpp 核心算法 def dpp(kernel_matrix, max_length, epsilon=1E-10): cis = np.zeros((max_length, item_size)) di2s = np.copy(np.diag(kernel_matrix)) selected_items = list() selected_item = np.argmax(di2s) selected_items.append(selected_item) while len(selected_items) < max_length: k = len(selected_items) - 1 ci_optimal = cis[:k, selected_item] di_optimal = math.sqrt(di2s[selected_item]) elements = kernel_matrix[selected_item, :] eis = (elements - np.dot(ci_optimal, cis[:k, :])) / di_optimal cis[k, :] = eis di2s -= np.square(eis) di2s[selected_item] = -np.inf selected_item = np.argmax(di2s) if di2s[selected_item] < epsilon: break selected_items.append(selected_item) return selected_items # 调用 dpp 算法 selected_items = dpp(kernel_matrix, max_length) print('selected_items:', selected_items, sep='\n') print('user.representative_item_index:', user.representative_item_index, sep='\n') print(scores[1278]) for (index, item) in enumerate(selected_items): print(f"index: {index}, item: {item}, scores: {scores[item - 1]}") # 计算推荐列表的准确性 dcg_value = np.sum([2 ** scores[item - 1] / np.log2(index + 2) for (index, item) in enumerate(selected_items)]) print('dcg_value:', dcg_value, sep='\n') ###Output dcg_value: 6.456515902069293
content/Module01/M01_N04_Moments_of_distributions.ipynb
###Markdown Moments of distributions***Reading: Emile-Geay: Chapter 3***"Climate is what you expect. Weather is what you get""Expectaion is what you expect. The random variable is what you get" ###Code %reset import numpy as np import matplotlib.pyplot as plt # These are some parameters to make figures nice (and big) %matplotlib inline %config InlineBackend.figure_format = 'retina' plt.rcParams['figure.figsize'] = 16,8 params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 5), 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} plt.rcParams.update(params) ###Output _____no_output_____ ###Markdown 1. Moments of distributions 1.1 Expected value/meanThe expected value of a random variable is the average value we would expect to get if we could sample a random variable an infinite number of times. It represents a average of all the possible outcomes, weighted by how probably they are. The *expected value* of a random variable is also called its *first order moment*, *mean*, or *average*. It is computed using the *expectation operator*:$$E(X)=\mu=\begin{cases}\sum_{i=1}^{N}x_{i}P(X=x_{i}) & \text{if }X\text{ is discrete}\\\int_{\mathbb{R}}xf(x)dx & \text{if }X\text{ is continuous}\end{cases}$$**Key property: linearity**$$E(aX+bY)=aE(x)+bE(y)$$We can also define the expected value, or mean, of any function of a random variable:$$E(g(X))=\begin{cases}\sum_{i=1}^{N}g(x_{i})P(X=x_{i}) & \text{if }X\text{ is discrete}\\\int_{\mathbb{R}}g(x)f(x)dx & \text{if }X\text{ is continuous}\end{cases}$$ 1.2 Higher Order MomentsWe can define higher order moments of a distribution as$$ m(X,n)=E(X^n)=\sum_{i=1}^N x_i^nP(X=x_i)$$$$ m(X,n)=E(X^n)=\int_\mathbb{R}xf(x)dx$$for, respectively, discrete and continuous r.v.s 1.3 VarianceA closely related notion to the second order moment is the **variance** or centered second moment, defined as:$$V(X)=E([X-E(x)]^2)=E([X-\mu]^2)=\int_\mathbb{R}(x-\mu)^2f(x)dx$$Expanding the square and using the linearity of the expectation operator, we can show that the variane can also be written as:$$V(X)=E(X^2)-(E(X))^2=E(X^2)-\mu^2$$Variance is a measure of the spread of a distribution. 1.4 Standard deviation Another closely related measure is standard deviation, devined simply as the square root of the variance$$\text{std}=\sqrt{V(X)}=\sqrt{E([X-\mu]^2)}$$ Important Properties:$$ V(X+b)=V(x)$$$$ V(aX)=a^2V(x)$$$$ \text{std}(aX)=a \cdot\text{std}(X)$$ 1.4 Examples Uniform distributionsThe pdf of a r.v. uniformly distributed over the interval $[a,b]$ is$$f(x)=\frac{1}{b-a}$$You can check yourselves that $$ E(X)=\frac{1}{2}(a+b)$$$$ V(X)=\frac{1}{12}(b-a)^2$$$$\text{std}=\frac{1}{\sqrt{12}}(b-a)$$ Normal distributionThe pdf of a normally distributed r.v. with location parameter $\mu$ and scale parameter $\sigma$ is$$f(x)=\frac{1}{\sqrt{2\pi\sigma^2}}\exp\left[-\frac{1}{2}\left(\frac{x-\mu}{\sigma}\right)^2\right]$$You can check yourselves that $$ E(X)=\mu$$$$ V(X)=\sigma^2$$$$\text{std}=\sigma$$![image.png](attachment:image.png) 2. Law of large numbers ###Code from scipy import stats import numpy as np mu=2; sigma=5; # you should also play arond with the number of draws and bins of the histogram. # there are some guidelines for choosing the number of bins (Emile-Geay's book talks a bit about them) Ndraws=100000000; # generate random variables and define edges (note we want the integers to be in the bins, not at the edges) X_norm=stats.norm.rvs(loc=mu,scale=sigma, size=Ndraws) print(np.mean(X_norm)) print(np.abs(np.mean(X_norm)-mu)) ###Output 1.9998280591197672 0.0001719408802327571
Udacity-ML/boston_housing-master_4/boston_housing.ipynb
###Markdown 机器学习工程师纳米学位 模型评价与验证 项目 1: 预测波士顿房价欢迎来到机器学习工程师纳米学位的第一个项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能来让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以**'练习'**开始的标题表示接下来的内容中有需要你必须实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以**'TODO'**标出。请仔细阅读所有的提示!除了实现代码外,你还**必须**回答一些与项目和实现有关的问题。每一个需要你回答的问题都会以**'问题 X'**为标题。请仔细阅读每个问题,并且在问题后的**'回答'**文字框中写出完整的答案。你的项目将会根据你对问题的回答和撰写代码所实现的功能来进行评分。>**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。 开始在这个项目中,你将利用马萨诸塞州波士顿郊区的房屋信息数据训练和测试一个模型,并对模型的性能和预测能力进行测试。通过该数据训练后的好的模型可以被用来对房屋做特定预测---尤其是对房屋的价值。对于房地产经纪等人的日常工作来说,这样的预测模型被证明非常有价值。此项目的数据集来自[UCI机器学习知识库](https://archive.ics.uci.edu/ml/datasets/Housing)。波士顿房屋这些数据于1978年开始统计,共506个数据点,涵盖了麻省波士顿不同郊区房屋14种特征的信息。本项目对原始数据集做了以下处理:- 有16个`'MEDV'` 值为50.0的数据点被移除。 这很可能是由于这些数据点包含**遗失**或**看不到的值**。- 有1个数据点的 `'RM'` 值为8.78. 这是一个异常值,已经被移除。- 对于本项目,房屋的`'RM'`, `'LSTAT'`,`'PTRATIO'`以及`'MEDV'`特征是必要的,其余不相关特征已经被移除。- `'MEDV'`特征的值已经过必要的数学转换,可以反映35年来市场的通货膨胀效应。运行下面区域的代码以载入波士顿房屋数据集,以及一些此项目所需的Python库。如果成功返回数据集的大小,表示数据集已载入成功。 ###Code # Import libraries necessary for this project # 载入此项目所需要的库 import numpy as np import pandas as pd import visuals as vs # Supplementary code from sklearn.model_selection import ShuffleSplit # Pretty display for notebooks # 让结果在notebook中显示 %matplotlib inline # Load the Boston housing dataset # 载入波士顿房屋的数据集 data = pd.read_csv('housing.csv') prices = data['MEDV'] features = data.drop('MEDV', axis = 1) # Success # 完成 print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape) ###Output Boston housing dataset has 489 data points with 4 variables each. ###Markdown 分析数据在项目的第一个部分,你会对波士顿房地产数据进行初步的观察并给出你的分析。通过对数据的探索来熟悉数据可以让你更好地理解和解释你的结果。由于这个项目的最终目标是建立一个预测房屋价值的模型,我们需要将数据集分为**特征(features)**和**目标变量(target variable)**。**特征** `'RM'`, `'LSTAT'`,和 `'PTRATIO'`,给我们提供了每个数据点的数量相关的信息。**目标变量**:` 'MEDV'`,是我们希望预测的变量。他们分别被存在`features`和`prices`两个变量名中。 练习:基础统计运算你的第一个编程练习是计算有关波士顿房价的描述统计数据。我们已为你导入了` numpy `,你需要使用这个库来执行必要的计算。这些统计数据对于分析模型的预测结果非常重要的。在下面的代码中,你要做的是:- 计算`prices`中的`'MEDV'`的最小值、最大值、均值、中值和标准差;- 将运算结果储存在相应的变量中。 ###Code # TODO: Minimum price of the data #目标:计算价值的最小值 minimum_price = np.min(prices) # TODO: Maximum price of the data #目标:计算价值的最大值 maximum_price = np.max(prices) # TODO: Mean price of the data #目标:计算价值的平均值 mean_price = np.mean(prices) # TODO: Median price of the data #目标:计算价值的中值 median_price = np.median(prices) # TODO: Standard deviation of prices of the data #目标:计算价值的标准差 std_price = np.std(prices) # Show the calculated statistics #目标:输出计算的结果 print "Statistics for Boston housing dataset:\n" print "Minimum price: ${:,.2f}".format(minimum_price) print "Maximum price: ${:,.2f}".format(maximum_price) print "Mean price: ${:,.2f}".format(mean_price) print "Median price ${:,.2f}".format(median_price) print "Standard deviation of prices: ${:,.2f}".format(std_price) ###Output Statistics for Boston housing dataset: Minimum price: $105,000.00 Maximum price: $1,024,800.00 Mean price: $454,342.94 Median price $438,900.00 Standard deviation of prices: $165,171.13 ###Markdown 问题1 - 特征观察如前文所述,本项目中我们关注的是其中三个值:`'RM'`、`'LSTAT'` 和`'PTRATIO'`,对每一个数据点:- `'RM'` 是该地区中每个房屋的平均房间数量;- `'LSTAT'` 是指该地区有多少百分比的房东属于是低收入阶层(有工作但收入微薄);- `'PTRATIO'` 是该地区的中学和小学里,学生和老师的数目比(`学生/老师`)。_凭直觉,上述三个特征中对每一个来说,你认为增大该特征的数值,`'MEDV'`的值会是**增大**还是**减小**呢?每一个答案都需要你给出理由。_**提示:**你预期一个`'RM'` 值是6的房屋跟`'RM'` 值是7的房屋相比,价值更高还是更低呢? **回答: ** *RM 增大,MEDV 增大,因为房屋面积变大;* *LSTAT 增大,MEDV 减小,因为低收入者变多;* *PTRATIO 增大,MEDV 减小,因为教育资源变得更加稀缺* 建模在项目的第二部分中,你需要了解必要的工具和技巧来让你的模型进行预测。用这些工具和技巧对每一个模型的表现做精确的衡量可以极大地增强你预测的信心。 练习:定义衡量标准如果不能对模型的训练和测试的表现进行量化地评估,我们就很难衡量模型的好坏。通常我们会定义一些衡量标准,这些标准可以通过对某些误差或者拟合程度的计算来得到。在这个项目中,你将通过运算[*决定系数*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination) R2 来量化模型的表现。模型的决定系数是回归分析中十分常用的统计信息,经常被当作衡量模型预测能力好坏的标准。R2的数值范围从0至1,表示**目标变量**的预测值和实际值之间的相关程度平方的百分比。一个模型的R2 值为0还不如直接用**平均值**来预测效果好;而一个R2 值为1的模型则可以对目标变量进行完美的预测。从0至1之间的数值,则表示该模型中目标变量中有百分之多少能够用**特征**来解释。_模型也可能出现负值的R2,这种情况下模型所做预测有时会比直接计算目标变量的平均值差很多。_在下方代码的 `performance_metric` 函数中,你要实现:- 使用 `sklearn.metrics` 中的 `r2_score` 来计算 `y_true` 和 `y_predict`的R2值,作为对其表现的评判。- 将他们的表现评分储存到`score`变量中。 ###Code # TODO: Import 'r2_score' from sklearn.metrics import r2_score def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true, y_predict) # Return the score return score ###Output _____no_output_____ ###Markdown 问题2 - 拟合程度假设一个数据集有五个数据且一个模型做出下列目标变量的预测:| 真实数值 | 预测数值 || :-------------: | :--------: || 3.0 | 2.5 || -0.5 | 0.0 || 2.0 | 2.1 || 7.0 | 7.8 || 4.2 | 5.3 |*你会觉得这个模型已成功地描述了目标变量的变化吗?如果成功,请解释为什么,如果没有,也请给出原因。* 运行下方的代码,使用`performance_metric`函数来计算模型的决定系数。 ###Code # Calculate the performance of this model score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3]) print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score) ###Output Model has a coefficient of determination, R^2, of 0.923. ###Markdown **回答:** 我觉得成功描述了。因为决定系数很的范围为 0 ~ 1,越接近1,说明这个模型可以对目标变量进行预测的效果越好,结果决定系数计算出来为 0.923 ,说明模型对目标变量的变化进行了良好的描述。 练习: 数据分割与重排接下来,你需要把波士顿房屋数据集分成训练和测试两个子集。通常在这个过程中,数据也会被重新排序,以消除数据集中由于排序而产生的偏差。在下面的代码中,你需要:- 使用 `sklearn.model_selection` 中的 `train_test_split`, 将`features`和`prices`的数据都分成用于训练的数据子集和用于测试的数据子集。 - 分割比例为:80%的数据用于训练,20%用于测试; - 选定一个数值以设定 `train_test_split` 中的 `random_state` ,这会确保结果的一致性;- 最终分离出的子集为`X_train`,`X_test`,`y_train`,和`y_test`。 ###Code # TODO: Import 'train_test_split' from sklearn.model_selection import train_test_split # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, random_state=42) # Success print "Training and testing split was successful." ###Output Training and testing split was successful. ###Markdown 问题 3- 训练及测试*将数据集按一定比例分为训练用的数据集和测试用的数据集对学习算法有什么好处?***提示:** 如果没有数据来对模型进行测试,会出现什么问题? **答案: ** *这样做,可以使得我们可以通过测试用的数据集来对模型的泛化误差进行评估,检验模型的好坏。* ---- 分析模型的表现在项目的第三部分,我们来看一下几个模型针对不同的数据集在学习和测试上的表现。另外,你需要专注于一个特定的算法,用全部训练集训练时,提高它的`'max_depth'` 参数,观察这一参数的变化如何影响模型的表现。把你模型的表现画出来对于分析过程十分有益。可视化可以让我们看到一些单看结果看不到的行为。 学习曲线下方区域内的代码会输出四幅图像,它们是一个决策树模型在不同最大深度下的表现。每一条曲线都直观的显示了随着训练数据量的增加,模型学习曲线的训练评分和测试评分的变化。注意,曲线的阴影区域代表的是该曲线的不确定性(用标准差衡量)。这个模型的训练和测试部分都使用决定系数R2来评分。运行下方区域中的代码,并利用输出的图形回答下面的问题。 ###Code # Produce learning curves for varying training set sizes and maximum depths vs.ModelLearning(features, prices) ###Output _____no_output_____ ###Markdown 问题 4 - 学习数据*选择上述图像中的其中一个,并给出其最大深度。随着训练数据量的增加,训练曲线的评分有怎样的变化?测试曲线呢?如果有更多的训练数据,是否能有效提升模型的表现呢?***提示:**学习曲线的评分是否最终会收敛到特定的值? **答案: ** 第二个,最大深度为3。训练曲线开始逐渐降低,测试曲线开始逐渐升高,但它们最后都趋于平稳,所以并不能有效提升模型的表现。 复杂度曲线下列代码内的区域会输出一幅图像,它展示了一个已经经过训练和验证的决策树模型在不同最大深度条件下的表现。这个图形将包含两条曲线,一个是训练的变化,一个是测试的变化。跟**学习曲线**相似,阴影区域代表该曲线的不确定性,模型训练和测试部分的评分都用的 `performance_metric` 函数。运行下方区域中的代码,并利用输出的图形并回答下面的两个问题。 ###Code vs.ModelComplexity(X_train, y_train) ###Output _____no_output_____ ###Markdown 问题 5- 偏差与方差之间的权衡取舍*当模型以最大深度 1训练时,模型的预测是出现很大的偏差还是出现了很大的方差?当模型以最大深度10训练时,情形又如何呢?图形中的哪些特征能够支持你的结论?* **提示:** 你如何得知模型是否出现了偏差很大或者方差很大的问题? **答案: ** *为1时,出现了很大的偏差,因为此时无论是测试数据还是训练数据b标准系数都很低,测试数据和训练数据的标准系数之间差异很小,说明模型无法对数据进行良好预测。*       *为 10 时,出现了很大的方差,测试数据和训练数据的标准系数之间差异很大,说明出现了过拟合情况。* 问题 6- 最优模型的猜测*你认为最大深度是多少的模型能够最好地对未见过的数据进行预测?你得出这个答案的依据是什么?* **答案: ** 3。因为此时测试数据和训练数据的分数之间差异最小,且测试数据的标准系数达到最高。 ----- 评价模型表现在这个项目的最后,你将自己建立模型,并使用最优化的`fit_model`函数,基于客户房子的特征来预测该房屋的价值。 问题 7- 网格搜索(Grid Search)*什么是网格搜索法?如何用它来优化学习算法?* **回答: ** 是一种把参数网格化的算法。它会自动生成一个不同参数值组成的“网格”:=================================== ('param1', param3) | ('param1', param4)('param2', param3) | ('param2', param4)==================================通过尝试所有"网格"中使用的参数,找到 k(可能的选择为 'param1' 和 'param2' )和 C(可能的选择为 'param3' 和 'param4')的最佳组合,并从中选取最佳的参数组合来优化学习算法。 问题 8- 交叉验证*什么是K折交叉验证法(k-fold cross-validation)?优化模型时,使用这种方法对网格搜索有什么好处?网格搜索是如何结合交叉验证来完成对最佳参数组合的选择的?* **提示:** 跟为何需要一组测试集的原因差不多,网格搜索时如果不使用交叉验证会有什么问题?GridSearchCV中的[`'cv_results'`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)属性能告诉我们什么? **答案: ** K折交叉验证法(k-fold cross-validation)是一种模型评估和验证的方法,这种方法把数据集分成k份,其中1份是测试集,剩下的k-1份是训练集。由于一般的随机把数据分成训练集和测试集在概率上具有随机性,得到的模型最好的模型存在偶然性。而利用K折交叉验证法(k-fold cross-validation)可以减少这种随机性和偶然性。同时,利用此法比较好的利用训练数据集,让模型可以学习到潜在的特征。使用交叉验证的话,可以获得多组验证集,网格搜索是一种调整参数的算法,我们是基于验证集的性能来调参的,有多组验证集的话,就可以进行多次尝试,而如果不使用交叉验证的话或者其他产生验证集的算法,我们无法进行参数调整。网格搜索可以使拟合函数尝试所有的参数组合,并返回一个合适的分类器,自动调整至最佳参数组合。 练习:训练模型在最后一个练习中,你将需要将所学到的内容整合,使用**决策树演算法**训练一个模型。为了保证你得出的是一个最优模型,你需要使用网格搜索法训练模型,以找到最佳的 `'max_depth'` 参数。你可以把`'max_depth'` 参数理解为决策树算法在做出预测前,允许其对数据提出问题的数量。决策树是**监督学习算法**中的一种。此外,你会发现你的实现使用的是 `ShuffleSplit()` 。它也是交叉验证的一种方式(见变量 `'cv_sets'`)。虽然这不是**问题8**中描述的 K-Fold 交叉验证,这个教程验证方法也很有用!这里 `ShuffleSplit()` 会创造10个(`'n_splits'`)混洗过的集合,每个集合中20%(`'test_size'`)的数据会被用作**验证集**。当你在实现的时候,想一想这跟 K-Fold 交叉验证有哪些相同点,哪些不同点?在下方 `fit_model` 函数中,你需要做的是:- 使用 `sklearn.tree` 中的 [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) 创建一个决策树的回归函数; - 将这个回归函数储存到 `'regressor'` 变量中;- 为 `'max_depth'` 创造一个字典,它的值是从1至10的数组,并储存到 `'params'` 变量中;- 使用 `sklearn.metrics` 中的 [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) 创建一个评分函数; - 将 `performance_metric` 作为参数传至这个函数中; - 将评分函数储存到 `'scoring_fnc'` 变量中;- 使用 `sklearn.model_selection` 中的 [`GridSearchCV`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) 创建一个网格搜索对象; - 将变量`'regressor'`, `'params'`, `'scoring_fnc'`, 和 `'cv_sets'` 作为参数传至这个对象中; - 将 `GridSearchCV` 存到 `'grid'` 变量中。 如果有同学对python函数如何传递多个参数不熟悉,可以参考这个MIT课程的[视频](http://cn-static.udacity.com/mlnd/videos/MIT600XXT114-V004200_DTH.mp4)。 ###Code # TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import make_scorer from sklearn.model_selection import GridSearchCV def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor(random_state=0) # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth': range(1, 11)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor, params, scoring_fnc, cv=cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_ ###Output _____no_output_____ ###Markdown 做出预测当我们用数据训练出一个模型,它现在就可用于对新的数据进行预测。在决策树回归函数中,模型已经学会对新输入的数据*提问*,并返回对**目标变量**的预测值。你可以用这个预测来获取数据未知目标变量的信息,这些数据必须是不包含在训练数据之内的。 问题 9- 最优模型*最优模型的最大深度(maximum depth)是多少?此答案与你在**问题 6**所做的猜测是否相同?*运行下方区域内的代码,将决策树回归函数代入训练数据的集合,以得到最优化的模型。 ###Code # Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']) ###Output Parameter 'max_depth' is 4 for the optimal model. ###Markdown **Answer: ** 4。与猜测不同,猜测结果为3。 问题 10 - 预测销售价格想像你是一个在波士顿地区的房屋经纪人,并期待使用此模型以帮助你的客户评估他们想出售的房屋。你已经从你的三个客户收集到以下的资讯:| 特征 | 客戶 1 | 客戶 2 | 客戶 3 || :---: | :---: | :---: | :---: || 房屋内房间总数 | 5 间房间 | 4 间房间 | 8 间房间 || 社区贫困指数(%被认为是贫困阶层) | 17% | 32% | 3% || 邻近学校的学生-老师比例 | 15:1 | 22:1 | 12:1 |*你会建议每位客户的房屋销售的价格为多少?从房屋特征的数值判断,这样的价格合理吗?* **提示:**用你在**分析数据**部分计算出来的统计信息来帮助你证明你的答案。运行下列的代码区域,使用你优化的模型来为每位客户的房屋价值做出预测。 ###Code # Produce a matrix for client data client_data = [[5, 17, 15], # Client 1 [4, 32, 22], # Client 2 [8, 3, 12]] # Client 3 # Show predictions for i, price in enumerate(reg.predict(client_data)): print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price) data['MEDV'].describe() ###Output _____no_output_____ ###Markdown **答案: ** 第一个顾客: $403,025.00. 第二个顾客:: $237,478.72. 第三个顾客:: $931,636.36. 这样的价格是合理的,以第三个顾客为例,他的房间数最多,社区贫困指数最低,且教育资源最丰富,因而价格最贵。以此类推,顾客一二的预测也是合理地。其次根据 `data['MEDV'].describe()` 运行的结果比较,三个价格也在合理范围类,因而价格是合理的。 敏感度一个最优的模型不一定是一个健壮模型。有的时候模型会过于复杂或者过于简单,以致于难以泛化新增添的数据;有的时候模型采用的学习算法并不适用于特定的数据结构;有的时候样本本身可能有太多噪点或样本过少,使得模型无法准确地预测目标变量。这些情况下我们会说模型是欠拟合的。执行下方区域中的代码,采用不同的训练和测试集执行 `fit_model` 函数10次。注意观察对一个特定的客户来说,预测是如何随训练数据的变化而变化的。 ###Code vs.PredictTrials(features, prices, fit_model, client_data) ###Output Trial 1: $391,183.33 Trial 2: $424,935.00 Trial 3: $415,800.00 Trial 4: $420,622.22 Trial 5: $418,377.27 Trial 6: $411,931.58 Trial 7: $399,663.16 Trial 8: $407,232.00 Trial 9: $351,577.61 Trial 10: $413,700.00 Range in prices: $73,357.39 ###Markdown 问题 11 - 实用性探讨*简单地讨论一下你建构的模型能否在现实世界中使用?* **提示:** 回答几个问题,并给出相应结论的理由:- *1978年所采集的数据,在今天是否仍然适用?*- *数据中呈现的特征是否足够描述一个房屋?*- *模型是否足够健壮来保证预测的一致性?*- *在波士顿这样的大都市采集的数据,能否应用在其它乡镇地区?* **答案: ** 不能,首先这只是波士顿的房价,并不具有代表性,而且时间久远;不能,房屋的价格还和其他特性有关,比如装修的程度;不足够健壮,因为它采集的数据在今天并不适用了,且呈现的特征并不足够去描述一个房屋;不能应用,因为模型并不足够健壮,不具有普适性。 可选问题 - 预测北京房价(本题结果不影响项目是否通过)通过上面的实践,相信你对机器学习的一些常用概念有了很好的领悟和掌握。但利用70年代的波士顿房价数据进行建模的确对我们来说意义不是太大。现在你可以把你上面所学应用到北京房价数据集中`bj_housing.csv`。免责声明:考虑到北京房价受到宏观经济、政策调整等众多因素的直接影响,预测结果仅供参考。这个数据集的特征有:- Area:房屋面积,平方米- Room:房间数,间- Living: 厅数,间- School: 是否为学区房,0或1- Year: 房屋建造时间,年- Floor: 房屋所处楼层,层目标变量:- Value: 房屋人民币售价,万你可以参考上面学到的内容,拿这个数据集来练习数据分割与重排、定义衡量标准、训练模型、评价模型表现、使用网格搜索配合交叉验证对参数进行调优并选出最佳参数,比较两者的差别,最终得出最佳模型对验证集的预测分数。 ###Code ### 你的代码 # Import libraries necessary for this project # 载入此项目所需要的库 import numpy as np import pandas as pd import visuals as vs # Supplementary code from sklearn.model_selection import ShuffleSplit from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split # TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import make_scorer from sklearn.model_selection import GridSearchCV # Pretty display for notebooks # 让结果在notebook中显示 %matplotlib inline # Load the Boston housing dataset # 载入波士顿房屋的数据集 data = pd.read_csv('bj_housing.csv') prices = data['Value'] features = data.drop('Value', axis = 1) print features.head() print prices.head() # Success # 完成 # print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape) def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true, y_predict) # Return the score return score # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, random_state=42) # Success print "Training and testing split was successful." def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor(random_state=0) # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth': range(1, 11)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor, params, scoring_fnc, cv=cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_ # Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']) client_data = [[128, 3, 2, 0, 2005, 13], [150, 3, 2, 0, 2005, 13]] # Show predictions for i, price in enumerate(reg.predict(client_data)): print "Predicted selling price for Client {}'s home: ¥{:,.2f}".format(i+1, price) ###Output Area Room Living School Year Floor 0 128 3 1 1 2004 21 1 68 1 2 1 2000 6 2 125 3 2 0 2003 5 3 129 2 2 0 2005 16 4 118 3 2 0 2003 6 0 370 1 330 2 355 3 278 4 340 Name: Value, dtype: int64 Training and testing split was successful. Parameter 'max_depth' is 7 for the optimal model. Predicted selling price for Client 1's home: ¥473.92 Predicted selling price for Client 2's home: ¥454.59
90_video_classification_eco/9-4_1_kinetics_download_for_python2.ipynb
###Markdown 9.4.1 Kinetics動画のダウンロード本ファイルでは、Kinetics動画をダウンロードします。 解説 https://github.com/activitynet/ActivityNet/tree/master/Crawler/KineticsのKineticsの動画ダウンロード環境を利用します。Python2系なので、新しい仮想環境を作成します。今いるディレクトリはUbuntuのhomeです。以下のコマンドをターミナルで実行し、フォルダ「video_download」のファイル「environment.yml」に記載したパッケージをインストールした仮想環境を作成します- source deactivate- conda env create -f ./9_video_classification_eco/video_download/environment.yml作成した仮想環境に入ります- source activate kineticsyoutube-dlのパッケージを更新インストールします- pip install --upgrade youtube-dl- pip install --upgrade joblibこれで準備ができたので、JupyteNotebookを立ち上げ、本ファイルを実行します- jupyter notebook --port 9999 ###Code import os # フォルダ「data」が存在しない場合は作成する data_dir = "./data/" if not os.path.exists(data_dir): os.mkdir(data_dir) # フォルダ「kinetics_videos」が存在しない場合は作成する data_dir = "./data/kinetics_videos/" if not os.path.exists(data_dir): os.mkdir(data_dir) # フォルダ「video_download」のpytnonファイル「download.py」を実行します # 取得するyoutubeデータはフォルダ「video_download」のkinetics-400_val_8videos.csvに記載した8動画です # 保存先はフォルダ「data」内のフォルダ「kinetics_videos」です !python2 ./video_download/download.py ./video_download/kinetics-400_val_8videos.csv ./data/kinetics_videos/ ###Output _____no_output_____
Chapter13_CaseStudies/CaseStudyRegression/CaseStudyRegression.ipynb
###Markdown Helper ###Code def print_grid_cv_results(grid_result): print( f"Best model score: {grid_result.best_score_} " f"Best model params: {grid_result.best_params_} " ) means = grid_result.cv_results_["mean_test_score"] stds = grid_result.cv_results_["std_test_score"] params = grid_result.cv_results_["params"] for mean, std, param in zip(means, stds, params): mean = round(mean, 4) std = round(std, 4) print(f"{mean} (+/- {2 * std}) with: {param}") ###Output _____no_output_____ ###Markdown LOAD DATASET ###Code cal_housing = fetch_california_housing() x = cal_housing.data y = cal_housing.target x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3) ###Output _____no_output_____ ###Markdown NORMALIZE DATASET ###Code scaler = StandardScaler() scaler.fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test) ###Output _____no_output_____ ###Markdown Metrics ###Code scoring_metrics = { 'r2_score': make_scorer(r2_score) } ###Output _____no_output_____ ###Markdown LINEAR REGRESSION: ###Code from sklearn.linear_model import LinearRegression regr = LinearRegression() cv_results = cross_validate( regr, x_train, y_train, cv=3, scoring=scoring_metrics ) test_r2_score = cv_results['test_r2_score'] print(f"Mean R2: {np.mean(test_r2_score)}") ###Output Mean R2: 0.6075224720706819 ###Markdown KNN REGRSSOR ###Code from sklearn.neighbors import KNeighborsRegressor params = { "n_neighbors": [i for i in range(2, 24, 2)], "weights": ["uniform", "distance"] } regr = KNeighborsRegressor() grid = GridSearchCV(regr, params, cv=3, n_jobs=-1) grid_result = grid.fit(x_train, y_train) print_grid_cv_results(grid_result) ###Output Best model score: 0.6836115816373965 Best model params: {'n_neighbors': 14, 'weights': 'distance'} 0.613 (+/- 0.0122) with: {'n_neighbors': 2, 'weights': 'uniform'} 0.6147 (+/- 0.0124) with: {'n_neighbors': 2, 'weights': 'distance'} 0.6609 (+/- 0.0078) with: {'n_neighbors': 4, 'weights': 'uniform'} 0.664 (+/- 0.0082) with: {'n_neighbors': 4, 'weights': 'distance'} 0.6717 (+/- 0.0024) with: {'n_neighbors': 6, 'weights': 'uniform'} 0.6759 (+/- 0.0028) with: {'n_neighbors': 6, 'weights': 'distance'} 0.6742 (+/- 0.0058) with: {'n_neighbors': 8, 'weights': 'uniform'} 0.6791 (+/- 0.0052) with: {'n_neighbors': 8, 'weights': 'distance'} 0.6774 (+/- 0.0062) with: {'n_neighbors': 10, 'weights': 'uniform'} 0.6824 (+/- 0.0054) with: {'n_neighbors': 10, 'weights': 'distance'} 0.6784 (+/- 0.0068) with: {'n_neighbors': 12, 'weights': 'uniform'} 0.6836 (+/- 0.0056) with: {'n_neighbors': 12, 'weights': 'distance'} 0.6782 (+/- 0.0064) with: {'n_neighbors': 14, 'weights': 'uniform'} 0.6836 (+/- 0.0054) with: {'n_neighbors': 14, 'weights': 'distance'} 0.6778 (+/- 0.0078) with: {'n_neighbors': 16, 'weights': 'uniform'} 0.6833 (+/- 0.0066) with: {'n_neighbors': 16, 'weights': 'distance'} 0.6764 (+/- 0.0078) with: {'n_neighbors': 18, 'weights': 'uniform'} 0.6823 (+/- 0.0068) with: {'n_neighbors': 18, 'weights': 'distance'} 0.6748 (+/- 0.0078) with: {'n_neighbors': 20, 'weights': 'uniform'} 0.6809 (+/- 0.0068) with: {'n_neighbors': 20, 'weights': 'distance'} 0.6736 (+/- 0.0062) with: {'n_neighbors': 22, 'weights': 'uniform'} 0.6799 (+/- 0.0058) with: {'n_neighbors': 22, 'weights': 'distance'} ###Markdown RANDOM FOREST REGRESSOR ###Code from sklearn.ensemble import RandomForestRegressor params = { "n_estimators": [50*i for i in range(4, 10)], "max_depth": [i for i in range(20, 51, 10)] + [None] } regr = RandomForestRegressor() grid = GridSearchCV(regr, params, cv=3, n_jobs=-1) grid_result = grid.fit(x_train, y_train) print_grid_cv_results(grid_result) ###Output Best model score: 0.7943913775553941 Best model params: {'max_depth': 50, 'n_estimators': 400} 0.7936 (+/- 0.0138) with: {'max_depth': 20, 'n_estimators': 200} 0.7936 (+/- 0.0124) with: {'max_depth': 20, 'n_estimators': 250} 0.7943 (+/- 0.0116) with: {'max_depth': 20, 'n_estimators': 300} 0.793 (+/- 0.0116) with: {'max_depth': 20, 'n_estimators': 350} 0.7935 (+/- 0.0122) with: {'max_depth': 20, 'n_estimators': 400} 0.793 (+/- 0.0136) with: {'max_depth': 20, 'n_estimators': 450} 0.7935 (+/- 0.0124) with: {'max_depth': 30, 'n_estimators': 200} 0.7933 (+/- 0.012) with: {'max_depth': 30, 'n_estimators': 250} 0.7935 (+/- 0.0124) with: {'max_depth': 30, 'n_estimators': 300} 0.7942 (+/- 0.0124) with: {'max_depth': 30, 'n_estimators': 350} 0.7944 (+/- 0.0114) with: {'max_depth': 30, 'n_estimators': 400} 0.7941 (+/- 0.0126) with: {'max_depth': 30, 'n_estimators': 450} 0.7925 (+/- 0.0128) with: {'max_depth': 40, 'n_estimators': 200} 0.7935 (+/- 0.012) with: {'max_depth': 40, 'n_estimators': 250} 0.7932 (+/- 0.0128) with: {'max_depth': 40, 'n_estimators': 300} 0.7938 (+/- 0.0126) with: {'max_depth': 40, 'n_estimators': 350} 0.7938 (+/- 0.0132) with: {'max_depth': 40, 'n_estimators': 400} 0.7937 (+/- 0.013) with: {'max_depth': 40, 'n_estimators': 450} 0.7936 (+/- 0.012) with: {'max_depth': 50, 'n_estimators': 200} 0.7938 (+/- 0.0122) with: {'max_depth': 50, 'n_estimators': 250} 0.7935 (+/- 0.0128) with: {'max_depth': 50, 'n_estimators': 300} 0.7941 (+/- 0.0128) with: {'max_depth': 50, 'n_estimators': 350} 0.7944 (+/- 0.0114) with: {'max_depth': 50, 'n_estimators': 400} 0.7934 (+/- 0.012) with: {'max_depth': 50, 'n_estimators': 450} 0.7938 (+/- 0.0126) with: {'max_depth': None, 'n_estimators': 200} 0.7932 (+/- 0.0136) with: {'max_depth': None, 'n_estimators': 250} 0.7927 (+/- 0.012) with: {'max_depth': None, 'n_estimators': 300} 0.7937 (+/- 0.013) with: {'max_depth': None, 'n_estimators': 350} 0.7942 (+/- 0.0116) with: {'max_depth': None, 'n_estimators': 400} 0.7939 (+/- 0.012) with: {'max_depth': None, 'n_estimators': 450} ###Markdown GRADIENT BOOSTING REGRESSOR ###Code from sklearn.ensemble import GradientBoostingRegressor params = { "n_estimators": [50*i for i in range(4, 10)], "max_depth": [i for i in range(20, 51, 10)] + [None] } regr = GradientBoostingRegressor() grid = GridSearchCV(regr, params, cv=3, n_jobs=-1) grid_result = grid.fit(x_train, y_train) print_grid_cv_results(grid_result) ###Output Best model score: 0.6490966673085837 Best model params: {'max_depth': 20, 'n_estimators': 300} 0.6478 (+/- 0.0224) with: {'max_depth': 20, 'n_estimators': 200} 0.6488 (+/- 0.0254) with: {'max_depth': 20, 'n_estimators': 250} 0.6491 (+/- 0.0244) with: {'max_depth': 20, 'n_estimators': 300} 0.6489 (+/- 0.0234) with: {'max_depth': 20, 'n_estimators': 350} 0.6483 (+/- 0.022) with: {'max_depth': 20, 'n_estimators': 400} 0.6465 (+/- 0.0242) with: {'max_depth': 20, 'n_estimators': 450} 0.6084 (+/- 0.0368) with: {'max_depth': 30, 'n_estimators': 200} 0.6083 (+/- 0.0374) with: {'max_depth': 30, 'n_estimators': 250} 0.6084 (+/- 0.037) with: {'max_depth': 30, 'n_estimators': 300} 0.6088 (+/- 0.0356) with: {'max_depth': 30, 'n_estimators': 350} 0.6089 (+/- 0.0364) with: {'max_depth': 30, 'n_estimators': 400} 0.6099 (+/- 0.036) with: {'max_depth': 30, 'n_estimators': 450} 0.6083 (+/- 0.0388) with: {'max_depth': 40, 'n_estimators': 200} 0.6087 (+/- 0.0358) with: {'max_depth': 40, 'n_estimators': 250} 0.6092 (+/- 0.0386) with: {'max_depth': 40, 'n_estimators': 300} 0.6089 (+/- 0.0376) with: {'max_depth': 40, 'n_estimators': 350} 0.6084 (+/- 0.0354) with: {'max_depth': 40, 'n_estimators': 400} 0.6091 (+/- 0.0406) with: {'max_depth': 40, 'n_estimators': 450} 0.6078 (+/- 0.0414) with: {'max_depth': 50, 'n_estimators': 200} 0.6084 (+/- 0.0388) with: {'max_depth': 50, 'n_estimators': 250} 0.6102 (+/- 0.0372) with: {'max_depth': 50, 'n_estimators': 300} 0.6084 (+/- 0.0388) with: {'max_depth': 50, 'n_estimators': 350} 0.6084 (+/- 0.0402) with: {'max_depth': 50, 'n_estimators': 400} 0.6097 (+/- 0.0398) with: {'max_depth': 50, 'n_estimators': 450} 0.6081 (+/- 0.0364) with: {'max_depth': None, 'n_estimators': 200} 0.6089 (+/- 0.0358) with: {'max_depth': None, 'n_estimators': 250} 0.609 (+/- 0.0354) with: {'max_depth': None, 'n_estimators': 300} 0.6074 (+/- 0.0362) with: {'max_depth': None, 'n_estimators': 350} 0.6086 (+/- 0.0364) with: {'max_depth': None, 'n_estimators': 400} 0.6084 (+/- 0.0398) with: {'max_depth': None, 'n_estimators': 450} ###Markdown SVM REGRESSOR: ###Code from sklearn.svm import SVR params = { "kernel": ["linear", "sigmoid", "rbf", "poly"] } regr = SVR() grid = GridSearchCV(regr, params, cv=3, n_jobs=-1) grid_result = grid.fit(x_train, y_train) print_grid_cv_results(grid_result) ###Output Best model score: 0.7275190171474754 Best model params: {'kernel': 'rbf'} -1.5376 (+/- 6.0066) with: {'kernel': 'linear'} -4294.631 (+/- 1709.0764) with: {'kernel': 'sigmoid'} 0.7275 (+/- 0.0108) with: {'kernel': 'rbf'} -2945.7243 (+/- 8261.0024) with: {'kernel': 'poly'} ###Markdown BEST MODEL: ###Code best_params = {'max_depth': 50, 'n_estimators': 400} best_regressor = RandomForestRegressor regr = best_regressor(**best_params) regr.fit(x_train, y_train) y_pred = regr.predict(x_test) mse = mean_squared_error(y_test, y_pred) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print(f"MSE: {mse}") print(f"MAE: {mae}") print(f"R2: {r2}") ###Output MSE: 0.2539344596366583 MAE: 0.33027872972787503 R2: 0.806532038049789 ###Markdown Residual Plot of Best Model ###Code def plot_residuals(regr, x_train, y_train, x_test, y_test): y_pred_train = regr.predict(x_train) y_pred_test = regr.predict(x_test) min_val = min(np.min(y_pred_train), np.min(y_pred_test)) max_val = max(np.max(y_pred_train), np.max(y_pred_test)) plt.scatter(y_pred_train, y_pred_train - y_train, color="blue") plt.scatter(y_pred_test, y_pred_test - y_test, color="red") plt.hlines(y=0, xmin=min_val, xmax=max_val) plt.legend(["Train", "Test"]) plt.show() plot_residuals(regr, x_train, y_train, x_test, y_test) ###Output _____no_output_____
deepLSpec/c2/w3/assgn3.ipynb
###Markdown TensorFlow TutorialWelcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow: - Initialize variables- Start your own session- Train algorithms - Implement a Neural NetworkPrograming frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code. Updates If you were working on the notebook before this update...* The current notebook is version "v3b".* You can find your original work saved in the notebook with the previous version name (it may be either TensorFlow Tutorial version 3" or "TensorFlow Tutorial version 3a.) * To view the file directory, click on the "Coursera" icon in the top left of this notebook. List of updates* forward_propagation instruction now says 'A1' instead of 'a1' in the formula for Z2; and are updated to say 'A2' instead of 'Z2' in the formula for Z3.* create_placeholders instruction refer to the data type "tf.float32" instead of float.* in the model function, the x axis of the plot now says "iterations (per fives)" instead of iterations(per tens)* In the linear_function, comments remind students to create the variables in the order suggested by the starter code. The comments are updated to reflect this order.* The test of the cost function now creates the logits without passing them through a sigmoid function (since the cost function will include the sigmoid in the built-in tensorflow function).* In the 'model' function, the minibatch_cost is now divided by minibatch_size (instead of num_minibatches).* Updated print statements and 'expected output that are used to check functions, for easier visual comparison. 1 - Exploring the Tensorflow LibraryTo start, you will import the library: ###Code import math import numpy as np import h5py import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.python.framework import ops from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict %matplotlib inline np.random.seed(1) ###Output _____no_output_____ ###Markdown Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example. $$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$ ###Code y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36. y = tf.constant(39, name='y') # Define y. Set to 39 loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss init = tf.global_variables_initializer() # When init is run later (session.run(init)), # the loss variable will be initialized and ready to be computed with tf.Session() as session: # Create a session and print the output session.run(init) # Initializes the variables print(session.run(loss)) # Prints the loss ###Output 9 ###Markdown Writing and running programs in TensorFlow has the following steps:1. Create Tensors (variables) that are not yet executed/evaluated. 2. Write operations between those Tensors.3. Initialize your Tensors. 4. Create a Session. 5. Run the Session. This will run the operations you'd written above. Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value.Now let us look at an easy example. Run the cell below: ###Code a = tf.constant(2) b = tf.constant(10) c = tf.multiply(a,b) print(c) ###Output Tensor("Mul:0", shape=(), dtype=int32) ###Markdown As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it. ###Code sess = tf.Session() print(sess.run(c)) ###Output 20 ###Markdown Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**. Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later. To specify values for a placeholder, you can pass in values by using a "feed dictionary" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session. ###Code # Change the value of x in the feed_dict x = tf.placeholder(tf.int64, name = 'x') print(sess.run(2 * x, feed_dict = {x: 3})) sess.close() ###Output 6 ###Markdown When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session. Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph. 1.1 - Linear functionLets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector. **Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):```pythonX = tf.constant(np.random.randn(3,1), name = "X")```You might find the following functions helpful: - tf.matmul(..., ...) to do a matrix multiplication- tf.add(..., ...) to do an addition- np.random.randn(...) to initialize randomly ###Code # GRADED FUNCTION: linear_function def linear_function(): """ Implements a linear function: Initializes X to be a random tensor of shape (3,1) Initializes W to be a random tensor of shape (4,3) Initializes b to be a random tensor of shape (4,1) Returns: result -- runs the session for Y = WX + b """ np.random.seed(1) """ Note, to ensure that the "random" numbers generated match the expected results, please create the variables in the order given in the starting code below. (Do not re-arrange the order). """ ### START CODE HERE ### (4 lines of code) X = tf.constant(np.random.randn(3,1), name = "X") W = tf.constant(np.random.randn(4,3), name = "W") b = tf.constant(np.random.randn(4,1), name = "b") Y = tf.add(tf.matmul(W, X), b) ### END CODE HERE ### # Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate ### START CODE HERE ### sess = tf.Session() result = sess.run(fetches=Y) ### END CODE HERE ### # close the session sess.close() return result print( "result = \n" + str(linear_function())) ###Output [[-2.15657382] [ 2.95891446] [-1.08926781] [-0.84538042]] result = None ###Markdown *** Expected Output ***: ```result = [[-2.15657382] [ 2.95891446] [-1.08926781] [-0.84538042]]``` 1.2 - Computing the sigmoid Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input. You will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session. ** Exercise **: Implement the sigmoid function below. You should use the following: - `tf.placeholder(tf.float32, name = "...")`- `tf.sigmoid(...)`- `sess.run(..., feed_dict = {x: z})`Note that there are two typical ways to create and use sessions in tensorflow: **Method 1:**```pythonsess = tf.Session() Run the variables initialization (if needed), run the operationsresult = sess.run(..., feed_dict = {...})sess.close() Close the session```**Method 2:**```pythonwith tf.Session() as sess: run the variables initialization (if needed), run the operations result = sess.run(..., feed_dict = {...}) This takes care of closing the session for you :)``` ###Code # GRADED FUNCTION: sigmoid def sigmoid(z): """ Computes the sigmoid of z Arguments: z -- input value, scalar or vector Returns: results -- the sigmoid of z """ ### START CODE HERE ### ( approx. 4 lines of code) # Create a placeholder for x. Name it 'x'. x = tf.placeholder(tf.float32, name = "x") # compute sigmoid(x) sigmoid = tf.sigmoid(x) # Create a session, and run it. Please use the method 2 explained above. # You should use a feed_dict to pass z's value to x. with tf.Session() as sess: # Run session and call the output "result" result = sess.run(fetches=sigmoid, feed_dict = {x: z}) ### END CODE HERE ### return result print ("sigmoid(0) = " + str(sigmoid(0))) print ("sigmoid(12) = " + str(sigmoid(12))) ###Output sigmoid(0) = 0.5 sigmoid(12) = 0.999994 ###Markdown *** Expected Output ***: **sigmoid(0)**0.5 **sigmoid(12)**0.999994 **To summarize, you how know how to**:1. Create placeholders2. Specify the computation graph corresponding to operations you want to compute3. Create the session4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values. 1.3 - Computing the CostYou can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m: $$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$you can do it in one line of code in tensorflow!**Exercise**: Implement the cross entropy loss. The function you will use is: - `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)`Your code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes$$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{[2](i)}) + (1-y^{(i)})\log (1-\sigma(z^{[2](i)})\large )\small\tag{2}$$ ###Code # GRADED FUNCTION: cost def cost(logits, labels): """     Computes the cost using the sigmoid cross entropy          Arguments:     logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)     labels -- vector of labels y (1 or 0) Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels" in the TensorFlow documentation. So logits will feed into z, and labels into y.          Returns:     cost -- runs the session of the cost (formula (2)) """ ### START CODE HERE ### # Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines) z = tf.placeholder(tf.float32, name='logits') y = tf.placeholder(tf.float32, name='labels') # Use the loss function (approx. 1 line) cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y) # Create a session (approx. 1 line). See method 1 above. sess = tf.Session() # Run the session (approx. 1 line). cost = sess.run(fetches=cost, feed_dict={z: logits, y: labels} ) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return cost logits = np.array([0.2,0.4,0.7,0.9]) cost = cost(logits, np.array([0,0,1,1])) print ("cost = " + str(cost)) ###Output cost = [ 0.79813886 0.91301525 0.40318605 0.34115386] ###Markdown ** Expected Output** : ```cost = [ 0.79813886 0.91301525 0.40318605 0.34115386]``` 1.4 - Using One Hot encodingsMany times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code: - tf.one_hot(labels, depth, axis) **Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this. ###Code # GRADED FUNCTION: one_hot_matrix def one_hot_matrix(labels, C): """ Creates a matrix where the i-th row corresponds to the ith class number and the jth column corresponds to the jth training example. So if example j had a label i. Then entry (i,j) will be 1. Arguments: labels -- vector containing the labels C -- number of classes, the depth of the one hot dimension Returns: one_hot -- one hot matrix """ ### START CODE HERE ### # Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line) C = tf.constant(C, name='C') # Use tf.one_hot, be careful with the axis (approx. 1 line) one_hot_matrix = tf.one_hot(indices=labels, depth=C, axis=0) # Create the session (approx. 1 line) sess = tf.Session() # Run the session (approx. 1 line) one_hot = sess.run(fetches=one_hot_matrix) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return one_hot labels = np.array([1,2,3,0,2,1]) one_hot = one_hot_matrix(labels, C = 4) print ("one_hot = \n" + str(one_hot)) ###Output one_hot = [[ 0. 0. 0. 1. 0. 0.] [ 1. 0. 0. 0. 0. 1.] [ 0. 1. 0. 0. 1. 0.] [ 0. 0. 1. 0. 0. 0.]] ###Markdown **Expected Output**: ```one_hot = [[ 0. 0. 0. 1. 0. 0.] [ 1. 0. 0. 0. 0. 1.] [ 0. 1. 0. 0. 1. 0.] [ 0. 0. 1. 0. 0. 0.]]``` 1.5 - Initialize with zeros and onesNow you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively. **Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones). - tf.ones(shape) ###Code # GRADED FUNCTION: ones def ones(shape): """ Creates an array of ones of dimension shape Arguments: shape -- shape of the array you want to create Returns: ones -- array containing only ones """ ### START CODE HERE ### # Create "ones" tensor using tf.ones(...). (approx. 1 line) ones = tf.ones(shape) # Create the session (approx. 1 line) sess = tf.Session() # Run the session to compute 'ones' (approx. 1 line) ones = sess.run(fetches=ones) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return ones print ("ones = " + str(ones([3]))) ###Output ones = [ 1. 1. 1.] ###Markdown **Expected Output:** **ones** [ 1. 1. 1.] 2 - Building your first neural network in tensorflowIn this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:- Create the computation graph- Run the graphLet's delve into the problem you'd like to solve! 2.0 - Problem statement: SIGNS DatasetOne afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.- **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).- **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels. **Figure 1**: SIGNS dataset Run the following code to load the dataset. ###Code # Loading the dataset X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() ###Output _____no_output_____ ###Markdown Change the index below and run the cell to visualize some examples in the dataset. ###Code # Example of a picture index = 31 plt.imshow(X_train_orig[index]) print ("y = " + str(np.squeeze(Y_train_orig[:, index]))) ###Output y = 0 ###Markdown As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so. ###Code # Flatten the training and test images X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T # Normalize image vectors X_train = X_train_flatten/255. X_test = X_test_flatten/255. # Convert training and test labels to one hot matrices Y_train = convert_to_one_hot(Y_train_orig, 6) Y_test = convert_to_one_hot(Y_test_orig, 6) print ("number of training examples = " + str(X_train.shape[1])) print ("number of test examples = " + str(X_test.shape[1])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) ###Output number of training examples = 1080 number of test examples = 120 X_train shape: (12288, 1080) Y_train shape: (6, 1080) X_test shape: (12288, 120) Y_test shape: (6, 120) ###Markdown **Note** that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing. **Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one. **The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes. 2.1 - Create placeholdersYour first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session. **Exercise:** Implement the function below to create the placeholders in tensorflow. ###Code # GRADED FUNCTION: create_placeholders def create_placeholders(n_x, n_y): """ Creates the placeholders for the tensorflow session. Arguments: n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288) n_y -- scalar, number of classes (from 0 to 5, so -> 6) Returns: X -- placeholder for the data input, of shape [n_x, None] and dtype "tf.float32" Y -- placeholder for the input labels, of shape [n_y, None] and dtype "tf.float32" Tips: - You will use None because it let's us be flexible on the number of examples you will for the placeholders. In fact, the number of examples during test/train is different. """ ### START CODE HERE ### (approx. 2 lines) X = tf.placeholder(dtype=tf.float32, shape=[n_x, None], name='X') Y = tf.placeholder(dtype=tf.float32, shape=[n_y, None], name='Y') ### END CODE HERE ### return X, Y X, Y = create_placeholders(12288, 6) print ("X = " + str(X)) print ("Y = " + str(Y)) ###Output X = Tensor("X_1:0", shape=(12288, ?), dtype=float32) Y = Tensor("Y:0", shape=(6, ?), dtype=float32) ###Markdown **Expected Output**: **X** Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1) **Y** Tensor("Placeholder_2:0", shape=(6, ?), dtype=float32) (not necessarily Placeholder_2) 2.2 - Initializing the parametersYour second task is to initialize the parameters in tensorflow.**Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use: ```pythonW1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())```Please use `seed = 1` to make sure your results match ours. ###Code # GRADED FUNCTION: initialize_parameters def initialize_parameters(): """ Initializes parameters to build a neural network with tensorflow. The shapes are: W1 : [25, 12288] b1 : [25, 1] W2 : [12, 25] b2 : [12, 1] W3 : [6, 12] b3 : [6, 1] Returns: parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3 """ tf.set_random_seed(1) # so that your "random" numbers match ours ### START CODE HERE ### (approx. 6 lines of code) W1 = tf.get_variable("W1", [25,12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable("b1", [25,1], initializer=tf.zeros_initializer()) W2 = tf.get_variable("W2", [12,25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable("b2", [12,1], initializer=tf.zeros_initializer()) W3 = tf.get_variable("W3", [6,12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable("b4", [6,1], initializer=tf.zeros_initializer()) ### END CODE HERE ### parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3} return parameters tf.reset_default_graph() with tf.Session() as sess: parameters = initialize_parameters() print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ###Output W1 = <tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref> b1 = <tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref> W2 = <tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref> b2 = <tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref> ###Markdown **Expected Output**: **W1** **b1** **W2** **b2** As expected, the parameters haven't been evaluated yet. 2.3 - Forward propagation in tensorflow You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are: - `tf.add(...,...)` to do an addition- `tf.matmul(...,...)` to do a matrix multiplication- `tf.nn.relu(...)` to apply the ReLU activation**Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`! ###Code # GRADED FUNCTION: forward_propagation def forward_propagation(X, parameters): """ Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, X), b1, name='Z1') # Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(features=Z1, name='A1') # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2, name='Z2') # Z2 = np.dot(W2, A1) + b2 A2 = tf.nn.relu(features=Z2, name='A2') # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3, name='Z3') # Z3 = np.dot(W3, A2) + b3 ### END CODE HERE ### return Z3 tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(12288, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) print("Z3 = " + str(Z3)) ###Output Z3 = Tensor("Z3:0", shape=(6, ?), dtype=float32) ###Markdown **Expected Output**: **Z3** Tensor("Add_2:0", shape=(6, ?), dtype=float32) You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation. 2.4 Compute costAs seen before, it is very easy to compute the cost using:```pythontf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))```**Question**: Implement the cost function below. - It is important to know that the "`logits`" and "`labels`" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.- Besides, `tf.reduce_mean` basically does the summation over the examples. ###Code # GRADED FUNCTION: compute_cost def compute_cost(Z3, Y): """ Computes the cost Arguments: Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples) Y -- "true" labels vector placeholder, same shape as Z3 Returns: cost - Tensor of the cost function """ # to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...) logits = tf.transpose(Z3) labels = tf.transpose(Y) ### START CODE HERE ### (1 line of code) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) ### END CODE HERE ### return cost tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(12288, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) cost = compute_cost(Z3, Y) print("cost = " + str(cost)) ###Output cost = Tensor("Mean:0", shape=(), dtype=float32) ###Markdown **Expected Output**: **cost** Tensor("Mean:0", shape=(), dtype=float32) 2.5 - Backward propagation & parameter updatesThis is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.After you compute the cost function. You will create an "`optimizer`" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.For instance, for gradient descent the optimizer would be:```pythonoptimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)```To make the optimization you would do:```python_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})```This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.**Note** When coding, we often use `_` as a "throwaway" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable). 2.6 - Building the modelNow, you will bring it all together! **Exercise:** Implement the model. You will be calling the functions you had previously implemented. ###Code def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 32, print_cost = True): """ Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size = 12288, number of training examples = 1080) Y_train -- test set, of shape (output size = 6, number of training examples = 1080) X_test -- training set, of shape (input size = 12288, number of training examples = 120) Y_test -- test set, of shape (output size = 6, number of test examples = 120) learning_rate -- learning rate of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost -- True to print the cost every 100 epochs Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep consistent results seed = 3 # to keep consistent results (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set) n_y = Y_train.shape[0] # n_y : output size costs = [] # To keep track of the cost # Create Placeholders of shape (n_x, n_y) ### START CODE HERE ### (1 line) X, Y = create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the forward propagation in the tensorflow graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add cost function to tensorflow graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE ### (1 line) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): epoch_cost = 0. # Defines a cost related to an epoch num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on a minibatch. # Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y). ### START CODE HERE ### (1 line) _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost / minibatch_size # Print the cost every epoch if print_cost == True and epoch % 100 == 0: print ("Cost after epoch %i: %f" % (epoch, epoch_cost)) if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per fives)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # lets save the parameters in a variable parameters = sess.run(parameters) print ("Parameters have been trained!") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train})) print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test})) return parameters ###Output _____no_output_____ ###Markdown Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.048222. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes! ###Code parameters = model(X_train, Y_train, X_test, Y_test) ###Output Cost after epoch 0: 1.913693 ###Markdown **Expected Output**: **Train Accuracy** 0.999074 **Test Accuracy** 0.716667 Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.**Insights**:- Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting. - Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters. 2.7 - Test with your own image (optional / ungraded exercise)Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the following code 4. Run the code and check if the algorithm is right! ###Code import scipy from PIL import Image from scipy import ndimage ## START CODE HERE ## (PUT YOUR IMAGE NAME) my_image = "thumbs_up.jpg" ## END CODE HERE ## # We preprocess your image to fit your algorithm. fname = "images/" + my_image image = np.array(ndimage.imread(fname, flatten=False)) image = image/255. my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T my_image_prediction = predict(my_image, parameters) plt.imshow(image) print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction))) ###Output _____no_output_____
notebooks/basic-user-intent.ipynb
###Markdown This is one of the Objectiv example notebooks. For more examples visit the [example notebooks](https://objectiv.io/docs/modeling/example-notebooks/) section of our docs. The notebooks can run with the demo data set that comes with the our [quickstart](https://objectiv.io/docs/home/quickstart-guide/), but can be used to run on your own collected data as well.All example notebooks are also available in our [quickstart](https://objectiv.io/docs/home/quickstart-guide/). With the quickstart you can spin up a fully functional Objectiv demo pipeline in five minutes. This also allows you to run these notebooks and experiment with them on a demo data set. Basic user intent analysis In this notebook, we briefly demonstrate how you can easily do basic user intent analysis on your data. Getting started Import the required packages for this notebookThe open model hub package can be installed with `pip install objectiv-modelhub` (this installs Bach as well). If you are running this notebook from our quickstart, the model hub and Bach are already installed, so you don't have to install it separately. ###Code from modelhub import ModelHub from bach import display_sql_as_markdown import bach import pandas as pd from datetime import timedelta ###Output _____no_output_____ ###Markdown At first we have to instantiate the Objectiv DataFrame object and the model hub. ###Code # instantiate the model hub and set the default time aggregation to daily modelhub = ModelHub(time_aggregation='%Y-%m-%d') # get the Bach DataFrame with Objectiv data df = modelhub.get_objectiv_dataframe(start_date='2022-02-01', end_date='2022-05-01') ###Output _____no_output_____ ###Markdown If you are running this example on your own collected data, setup the db connection like this and replace above cell: ###Code # df = modelhub.get_objectiv_dataframe(db_url='postgresql://USER:PASSWORD@HOST:PORT/DATABASE', # start_date='2022-06-01', # end_date='2022-06-30', # table_name='data') ###Output _____no_output_____ ###Markdown The columns 'global_contexts' and the 'location_stack' contain most of the event specific data. These columnsare json type columns and we can extract data from it based on the keys of the json objects using `SeriesGlobalContexts` or `SeriesLocationStack` methods to extract the data. ###Code # adding specific contexts to the data as columns df['application'] = df.global_contexts.gc.application df['root_location'] = df.location_stack.ls.get_from_context_with_type_series(type='RootLocationContext', key='id') ###Output _____no_output_____ ###Markdown Exploring root locationThe `root_location` context in the `location_stack` uniquely represents the top-level UI location of the user. As a first step of grasping user intent, this is a good starting point to see in what main areas of your product users are spending time. ###Code # model hub: unique users per root location users_root = modelhub.aggregate.unique_users(df, groupby=['application', 'root_location']) users_root.head(10) ###Output _____no_output_____ ###Markdown Exploring session durationThe average `session_duration` model from the [open model hub](https://objectiv.io/docs/modeling/open-model-hub/) isanother good pointer to explore first for user intent. ###Code # model hub: duration, per root location duration_root = modelhub.aggregate.session_duration(df, groupby=['application', 'root_location']).sort_index() duration_root.head(10) ###Output _____no_output_____ ###Markdown Now, we can look at the distribution of time spent. We used the Bach `quantile` operation for this. We'll use this distribution to define the different stages of user intent. ###Code # how is this time spent distributed? session_duration = modelhub.aggregate.session_duration(df, groupby='session_id') # materialization is needed because the expression of the created series contains aggregated data, and it is not allowed to aggregate that. session_duration = session_duration.materialize() # show quantiles session_duration.quantile(q=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]).head(10) ###Output _____no_output_____ ###Markdown Defining different stages of user intentAfter exploring the `root_location` and `session_duration` (both per root location and quantiles), we can make a simple definition of different stages of user intent.Based on the objectiv.io website data in the quickstart:We think that users that spent most time (90th percentile) and specifically in our documentation sections are in the Implement phase of Objectiv. As there is a jump beyond the one minute mark at the 70th percentile, it feels sensible to deem that users beyond the 70th up to 90th perctile in our documentation sections are Exploring. The remaining users are Informing themselves about the product. Those users are spending less than 1:40 in the docs and/or spend any amount of time on our main website.| User intent | Root locations | Duration || :--- | :--- | :--- || 1 - Inform | *all sections other than the ones mentioned below* | *any time spent* || 1 - Inform | Docs: modeling, taxonomy, tracking, home | less than 1:40 || 2 - Explore | Docs: modeling, taxonomy, tracking, home | between 1:40 and 11:30 || 3 - Implement | Docs: modeling, taxonomy, tracking, home | more than 11:30 | This is just for illustration purposes, you can adjust these definitions based on your own collected data. Assigning user intentBased on the definitions above, we can start assigning a stage of intent to each user. ###Code # set the root locations that we will use based on the definitions above roots = bach.DataFrame.from_pandas(engine=df.engine, df=pd.DataFrame({'roots': ['modeling', 'taxonomy', 'tracking', 'home', 'docs']}), convert_objects=True).roots # now we calculate the total time spent per _user_ and create a data frame from it user_intent_buckets = modelhub.agg.session_duration(df, groupby=['user_id'], method='sum', exclude_bounces=False).to_frame() # same as above, but for selected roots only explore_inform_users_session_duration = modelhub.agg.session_duration((df[(df.root_location.isin(roots)) & (df.application=='objectiv-docs')]), groupby='user_id', method='sum') # and set it as column user_intent_buckets['explore_inform_duration'] = explore_inform_users_session_duration # first, we set the Inform bucket as a catch-all, meaning users that do not fall into Explore and Implement will be defined as Inform user_intent_buckets['bucket'] = '1 - inform' # calculate buckets duration user_intent_buckets.loc[(user_intent_buckets.explore_inform_duration >= timedelta(0, 100)) & (user_intent_buckets.explore_inform_duration <= timedelta(0, 690)), 'bucket'] = '2 - explore' user_intent_buckets.loc[user_intent_buckets.explore_inform_duration > timedelta(0, 690), 'bucket'] = '3 - implement' ###Output _____no_output_____ ###Markdown Now, we have assigned intent to each user and can for example look at the total number of users per intent bucket. ###Code # total number of users per intent bucket user_intent_buckets.reset_index().groupby('bucket').agg({'user_id': 'nunique'}).head() ###Output _____no_output_____ ###Markdown What's next? The are many next possible analysis steps, for example:- What product features do each of the intent groups use? - What kind of intent users come from different marketing campaigns? - How can we drive more users to the 'Implement' stage? Look at different product features that users with the 'Implement' intent use, compared to 'Explore'.A good starting point for these analyses on top of the user intent buckets is the basic product analytics example in the [example notebooks](https://objectiv.io/docs/modeling/example-notebooks/). Get the SQL for this user intent analysis ###Code # get the SQL to use this analysis in for example your BI tooling display_sql_as_markdown(user_intent_buckets) ###Output _____no_output_____
machine_learning/udacity/project_1/boston_housing.ipynb
###Markdown Machine Learning Engineer NanodegreeModel Evaluation & ValidationProject 1: Predicting Boston Housing PricesWelcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been written. You will need to implement additional functionality to successfully answer all of the questions for this project. Unless it is requested, do not modify any of the code that has already been included. In this template code, there are four sections which you must complete to successfully produce a prediction with your model. Each section where you will write code is preceded by a **STEP X** header with comments describing what must be done. Please read the instructions carefully!In addition to implementing code, there will be questions that you must answer that relate to the project and your implementation. Each section where you will answer a question is preceded by a **QUESTION X** header. Be sure that you have carefully read each question and provide thorough answers in the text boxes that begin with "**Answer:**". Your project submission will be evaluated based on your answers to each of the questions. A description of the dataset can be found [here](https://archive.ics.uci.edu/ml/datasets/Housing), which is provided by the **UCI Machine Learning Repository**. Getting StartedTo familiarize yourself with an iPython Notebook, **try double clicking on this cell**. You will notice that the text changes so that all the formatting is removed. This allows you to make edits to the block of text you see here. This block of text (and mostly anything that's not code) is written using [Markdown](http://daringfireball.net/projects/markdown/syntax), which is a way to format text using headers, links, italics, and many other options! Whether you're editing a Markdown text block or a code block (like the one below), you can use the keyboard shortcut **Shift + Enter** or **Shift + Return** to execute the code or text block. In this case, it will show the formatted text.Let's start by setting up some code we will need to get the rest of the project up and running. Use the keyboard shortcut mentioned above on the following code block to execute it. Alternatively, depending on your iPython Notebook program, you can press the **Play** button in the hotbar. You'll know the code block executes successfully if the message *"Boston Housing dataset loaded successfully!"* is printed. ###Code # Importing a few necessary libraries import numpy as np import matplotlib.pyplot as pl from sklearn import datasets from sklearn.tree import DecisionTreeRegressor # Make matplotlib show our plots inline (nicely formatted in the notebook) %matplotlib inline # Create our client's feature set for which we will be predicting a selling price CLIENT_FEATURES = [[11.95, 0.00, 18.100, 0, 0.6590, 5.6090, 90.00, 1.385, 24, 680.0, 20.20, 332.09, 12.13]] # Load the Boston Housing dataset into the city_data variable city_data = datasets.load_boston() # Initialize the housing prices and housing features housing_prices = city_data.target housing_features = city_data.data print "Boston Housing dataset loaded successfully!" ###Output Boston Housing dataset loaded successfully! ###Markdown Statistical Analysis and Data ExplorationIn this first section of the project, you will quickly investigate a few basic statistics about the dataset you are working with. In addition, you'll look at the client's feature set in `CLIENT_FEATURES` and see how this particular sample relates to the features of the dataset. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand your results. Step 1In the code block below, use the imported `numpy` library to calculate the requested statistics. You will need to replace each `None` you find with the appropriate `numpy` coding for the proper statistic to be printed. Be sure to execute the code block each time to test if your implementation is working successfully. The print statements will show the statistics you calculate! ###Code import numpy # Number of houses in the dataset total_houses = housing_features.shape[0] # Number of features in the dataset total_features = housing_features.shape[1] # Minimum housing value in the dataset minimum_price = housing_prices.min() # Maximum housing value in the dataset maximum_price = housing_prices.min() # Mean house value of the dataset mean_price = housing_prices.mean() # Median house value of the dataset median_price = numpy.median(housing_prices) # Standard deviation of housing values of the dataset std_dev = numpy.std(housing_prices) # Show the calculated statistics print "Boston Housing dataset statistics (in $1000's):\n" print "Total number of houses:", total_houses print "Total number of features:", total_features print "Minimum house price:", minimum_price print "Maximum house price:", maximum_price print "Mean house price: {0:.3f}".format(mean_price) print "Median house price:", median_price print "Standard deviation of house price: {0:.3f}".format(std_dev) ###Output Boston Housing dataset statistics (in $1000's): Total number of houses: 506 Total number of features: 13 Minimum house price: 5.0 Maximum house price: 5.0 Mean house price: 22.533 Median house price: 21.2 Standard deviation of house price: 9.188 ###Markdown Question 1As a reminder, you can view a description of the Boston Housing dataset [here](https://archive.ics.uci.edu/ml/datasets/Housing), where you can find the different features under **Attribute Information**. The `MEDV` attribute relates to the values stored in our `housing_prices` variable, so we do not consider that a feature of the data.*Of the features available for each data point, choose three that you feel are significant and give a brief description for each of what they measure.*Remember, you can **double click the text box below** to add your answer! ###Code import pandas import seaborn columns = ('crime big_lots industrial charles_river nox rooms old distance' ' highway_access tax_rate pupil_teacher_ratio blacks lower_status'.split()) housing_data = pandas.DataFrame(housing_features, columns=columns) housing_data['median_value'] = housing_prices client_data = pandas.DataFrame(CLIENT_FEATURES, columns=columns) seaborn.set_style('whitegrid') for column in housing_data.columns: grid = seaborn.lmplot(column, 'median_value', data=housing_data, size=8) axe = grid.fig.gca() title = axe.set_title('{0} vs price'.format(column)) ###Output _____no_output_____ ###Markdown CRIM - the per-capita crime rate. INDUS - the proportion of non-retail business acres per town. LSTAT - the percentage of the population that is of lower status. Question 2*Using your client's feature set `CLIENT_FEATURES`, which values correspond with the features you've chosen above?* **Hint: ** Run the code block below to see the client's data. ###Code print CLIENT_FEATURES print(client_data.crime) print(client_data.industrial) print(client_data.lower_status) ###Output 0 11.95 Name: crime, dtype: float64 0 18.1 Name: industrial, dtype: float64 0 12.13 Name: lower_status, dtype: float64 ###Markdown CRIM : 11.95, INDUS: 18.1, LSTAT: 12.13 Evaluating Model PerformanceIn this second section of the project, you will begin to develop the tools necessary for a model to make a prediction. Being able to accurately evaluate each model's performance through the use of these tools helps to greatly reinforce the confidence in your predictions. Step 2In the code block below, you will need to implement code so that the `shuffle_split_data` function does the following:- Randomly shuffle the input data `X` and target labels (housing values) `y`.- Split the data into training and testing subsets, holding 30% of the data for testing.If you use any functions not already acessible from the imported libraries above, remember to include your import statement below as well! Ensure that you have executed the code block once you are done. You'll know if the `shuffle_split_data` function is working if the statement *"Successfully shuffled and split the data!"* is printed. ###Code # Put any import statements you need for this code block here from sklearn import cross_validation def shuffle_split_data(X, y): """ Shuffles and splits data into 70% training and 30% testing subsets, then returns the training and testing subsets. """ # Shuffle and split the data X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=.3, random_state=0) # Return the training and testing data subsets return X_train, y_train, X_test, y_test # Test shuffle_split_data try: X_train, y_train, X_test, y_test = shuffle_split_data(housing_features, housing_prices) print "Successfully shuffled and split the data!" except: print "Something went wrong with shuffling and splitting the data." ###Output Successfully shuffled and split the data! ###Markdown Question 4*Why do we split the data into training and testing subsets for our model?* So that we can assess the model using a different data-set than what it was trained on, thus reducing the likelihood of overfitting the model to the training data and increasing the likelihood that it will generalize to other data. Step 3In the code block below, you will need to implement code so that the `performance_metric` function does the following:- Perform a total error calculation between the true values of the `y` labels `y_true` and the predicted values of the `y` labels `y_predict`.You will need to first choose an appropriate performance metric for this problem. See [the sklearn metrics documentation](http://scikit-learn.org/stable/modules/classes.htmlsklearn-metrics-metrics) to view a list of available metric functions. **Hint: ** Look at the question below to see a list of the metrics that were covered in the supporting course for this project.Once you have determined which metric you will use, remember to include the necessary import statement as well! Ensure that you have executed the code block once you are done. You'll know if the `performance_metric` function is working if the statement *"Successfully performed a metric calculation!"* is printed. ###Code # Put any import statements you need for this code block here from sklearn.metrics import mean_squared_error def performance_metric(y_true, y_predict): """ Calculates and returns the total error between true and predicted values based on a performance metric chosen by the student. """ error = mean_squared_error(y_true, y_predict) return error # Test performance_metric try: total_error = performance_metric(y_train, y_train) print "Successfully performed a metric calculation!" except: print "Something went wrong with performing a metric calculation." ###Output Successfully performed a metric calculation! ###Markdown Question 4*Which performance metric below did you find was most appropriate for predicting housing prices and analyzing the total error. Why?*- *Accuracy*- *Precision*- *Recall*- *F1 Score*- *Mean Squared Error (MSE)*- *Mean Absolute Error (MAE)* Mean Squared Error was the most appropriate performance metric for predicting housing prices because we are predicting a numeric value (this is a regression problem) and while Mean Absolute Error could also be used, the MSE emphasizes larger errors more (due to the squaring) and so is preferable. Step 4 (Final Step)In the code block below, you will need to implement code so that the `fit_model` function does the following:- Create a scoring function using the same performance metric as in **Step 2**. See the [sklearn `make_scorer` documentation](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html).- Build a GridSearchCV object using `regressor`, `parameters`, and `scoring_function`. See the [sklearn documentation on GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.GridSearchCV.html).When building the scoring function and GridSearchCV object, *be sure that you read the parameters documentation thoroughly.* It is not always the case that a default parameter for a function is the appropriate setting for the problem you are working on.Since you are using `sklearn` functions, remember to include the necessary import statements below as well! Ensure that you have executed the code block once you are done. You'll know if the `fit_model` function is working if the statement *"Successfully fit a model to the data!"* is printed. ###Code # Put any import statements you need for this code block from sklearn.metrics import make_scorer from sklearn.grid_search import GridSearchCV def fit_model(X, y): """ Tunes a decision tree regressor model using GridSearchCV on the input data X and target labels y and returns this optimal model. """ # Create a decision tree regressor object regressor = DecisionTreeRegressor() # Set up the parameters we wish to tune parameters = {'max_depth':(1,2,3,4,5,6,7,8,9,10)} # Make an appropriate scoring function scoring_function = make_scorer(mean_squared_error, greater_is_better=False) # Make the GridSearchCV object reg = GridSearchCV(regressor, param_grid=parameters, scoring=scoring_function, cv=10) # Fit the learner to the data to obtain the optimal model with tuned parameters reg.fit(X, y) # Return the optimal model return reg # Test fit_model on entire dataset try: reg = fit_model(housing_features, housing_prices) print "Successfully fit a model!" except: print "Something went wrong with fitting a model." ###Output Successfully fit a model! ###Markdown Question 5*What is the grid search algorithm and when is it applicable?* The GridSearchCV algorithm exhaustively works through the parameters it is given to tune the model. Because it is exhaustive it is appropriate when the parameters are relatively limited and the model-creation is not computationally intensive, otherwise its run-time might be infeasible. Question 6*What is cross-validation, and how is it performed on a model? Why would cross-validation be helpful when using grid search?* Cross-validation is a method of testing a model by partitioning the data into subsets, with each subset taking a turn as the test set while the data not being used as a test-set is used as the training set. This allows the model to be tested against all the data-points, rather than having some data reserved exclusively as training data and the remainder exclusively as testing data.Because grid-search attempts to find the optimal parameters for a model, it's advantageous to use the same training and testing data in each case (case meaning a particular permutation of the parameters) so that the comparisons are equitable. One could simply perform an initial train-validation-test split and use this throughout the grid search, but this then risks the possibility that there was something in the initial split that will bias the outcome. By using all the partitions of the data as both test and training data, as cross-validation does, the chance of a bias in the splitting is reduced and at the same time all the parameter permutations are given the same data to be tested against. Checkpoint!You have now successfully completed your last code implementation section. Pat yourself on the back! All of your functions written above will be executed in the remaining sections below, and questions will be asked about various results for you to analyze. To prepare the **Analysis** and **Prediction** sections, you will need to intialize the two functions below. Remember, there's no need to implement any more code, so sit back and execute the code blocks! Some code comments are provided if you find yourself interested in the functionality. ###Code def learning_curves(X_train, y_train, X_test, y_test): """ Calculates the performance of several models with varying sizes of training data. The learning and testing error rates for each model are then plotted. """ print "Creating learning curve graphs for max_depths of 1, 3, 6, and 10. . ." # Create the figure window fig = pl.figure(figsize=(10,8)) # We will vary the training set size so that we have 50 different sizes sizes = np.round(np.linspace(1, len(X_train), 50)) train_err = np.zeros(len(sizes)) test_err = np.zeros(len(sizes)) # Create four different models based on max_depth for k, depth in enumerate([1,3,6,10]): for i, s in enumerate(sizes): # Setup a decision tree regressor so that it learns a tree with max_depth = depth regressor = DecisionTreeRegressor(max_depth = depth) # Fit the learner to the training data regressor.fit(X_train[:s], y_train[:s]) # Find the performance on the training set train_err[i] = performance_metric(y_train[:s], regressor.predict(X_train[:s])) # Find the performance on the testing set test_err[i] = performance_metric(y_test, regressor.predict(X_test)) # Subplot the learning curve graph ax = fig.add_subplot(2, 2, k+1) ax.plot(sizes, test_err, lw = 2, label = 'Testing Error') ax.plot(sizes, train_err, lw = 2, label = 'Training Error') ax.legend() ax.set_title('max_depth = %s'%(depth)) ax.set_xlabel('Number of Data Points in Training Set') ax.set_ylabel('Total Error') ax.set_xlim([0, len(X_train)]) # Visual aesthetics fig.suptitle('Decision Tree Regressor Learning Performances', fontsize=18, y=1.03) fig.tight_layout() fig.show() def model_complexity(X_train, y_train, X_test, y_test): """ Calculates the performance of the model as model complexity increases. The learning and testing errors rates are then plotted. """ print "Creating a model complexity graph. . . " # We will vary the max_depth of a decision tree model from 1 to 14 max_depth = np.arange(1, 14) train_err = np.zeros(len(max_depth)) test_err = np.zeros(len(max_depth)) for i, d in enumerate(max_depth): # Setup a Decision Tree Regressor so that it learns a tree with depth d regressor = DecisionTreeRegressor(max_depth = d) # Fit the learner to the training data regressor.fit(X_train, y_train) # Find the performance on the training set train_err[i] = performance_metric(y_train, regressor.predict(X_train)) # Find the performance on the testing set test_err[i] = performance_metric(y_test, regressor.predict(X_test)) # Plot the model complexity graph pl.figure(figsize=(7, 5)) pl.title('Decision Tree Regressor Complexity Performance') pl.plot(max_depth, test_err, lw=2, label = 'Testing Error') pl.plot(max_depth, train_err, lw=2, label = 'Training Error') pl.legend() pl.xlabel('Maximum Depth') pl.ylabel('Total Error') pl.show() ###Output _____no_output_____ ###Markdown Analyzing Model PerformanceIn this third section of the project, you'll take a look at several models' learning and testing error rates on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing `max_depth` parameter on the full training set to observe how model complexity affects learning and testing errors. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone. ###Code learning_curves(X_train, y_train, X_test, y_test) ###Output _____no_output_____ ###Markdown Question 7*Choose one of the learning curve graphs that are created above. What is the max depth for the chosen model? As the size of the training set increases, what happens to the training error? What happens to the testing error?* Looking at the model with max-depth of 3, as the size of the training set increases, the training error gradually increases. The testing error initially decreases, the seems to more or less stabilize. Question 8*Look at the learning curve graphs for the model with a max depth of 1 and a max depth of 10. When the model is using the full training set, does it suffer from high bias or high variance when the max depth is 1? What about when the max depth is 10?* The training and testing plots for the model with max-depth 1 move toward convergence with an error near 50, indicating a high bias (the model is too simple, and the additional data isn't improving the generalization of the model). For the model with max-depth 10, the curves haven't converged, and the training error remains near 0, indicating that it suffers from high variance, and should be improved with more data. ###Code model_complexity(X_train, y_train, X_test, y_test) ###Output _____no_output_____ ###Markdown Question 9*From the model complexity graph above, describe the training and testing errors as the max depth increases. Based on your interpretation of the graph, which max depth results in a model that best generalizes the dataset? Why?* As max-depth increases the training error improves, while the testing error decreases up until a depth of 5 and then begins a slight increase as the depth is increased. Based on this I would say that the max-depth of 5 created the model that best generalized the dataset, as it minimized the testing error. Model PredictionIn this final section of the project, you will make a prediction on the client's feature set using an optimized model from `fit_model`. *To answer the following questions, it is recommended that you run the code blocks several times and use the median or mean value of the results.* Question 10*Using grid search on the entire dataset, what is the optimal `max_depth` parameter for your model? How does this result compare to your intial intuition?* **Hint: ** Run the code block below to see the max depth produced by your optimized model. ###Code print "Final model optimal parameters:", reg.best_params_ repetitions = 1000 models = [fit_model(housing_features, housing_prices) for model in range(repetitions)] params_scores = [(model.best_params_, model.best_score_) for model in models] parameters = numpy.array([param_score[0]['max_depth'] for param_score in params_scores]) scores = numpy.array([param_score[1] for param_score in params_scores]) best_models = pandas.DataFrame.from_dict({'parameter':parameters, 'score': scores}) x_labels = sorted(best_models.parameter.unique()) figure = pl.figure() axe = figure.gca() grid = seaborn.boxplot('parameter', 'score', data = best_models, order=x_labels, ax=axe) title = axe.set_title("Best Parameters vs Best Scores") best_index = np.where(scores==np.max(scores)) print(scores[best_index]) print(parameters[best_index]) bin_range = best_models.parameter.max() - best_models.parameter.min() bins = pandas.cut(best_models.parameter, bin_range) parameter_group = pandas.groupby(best_models, 'parameter') parameter_group.median() parameter_group.max() ###Output _____no_output_____ ###Markdown While a max-depth of 4 was the most common best-parameter, the max-depth of 5 was the median max-depth, had the highest median score, and had the highest overall score, so I will say that the optimal `max_depth` parameter is 5. This is in line with what I had guessed, based on the Complexity Performance plot. Question 11*With your parameter-tuned model, what is the best selling price for your client's home? How does this selling price compare to the basic statistics you calculated on the dataset?* **Hint: ** Run the code block below to have your parameter-tuned model make a prediction on the client's home. ###Code best_model = models[best_index[0][0]] sale_price = best_model.predict(CLIENT_FEATURES) predicted = sale_price[0] * 1000 actual_median = housing_data.median_value.median() * 1000 print ("Predicted value of client's home: ${0:,.2f}".format(predicted)) print("Median Value - predicted: ${0:,.2f}".format(actual_median - predicted)) ###Output _____no_output_____
Mtech_seed_PCA.ipynb
###Markdown PCA cluster for SEED dataset ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline from sklearn import linear_model #from sklearn import linear_model.fit from sklearn.linear_model import LinearRegression from sklearn.decomposition import PCA from sklearn.decomposition import FactorAnalysis from factor_analyzer import FactorAnalyzer # loading dataset into Pandas DataFrame df = pd.read_csv("Seed_data.csv") df #Importing libraries from SKLEARN import matplotlib.pyplot as plt from sklearn import datasets from sklearn.decomposition import PCA seed = pd.read_csv('Seed_data.csv') X = seed.iloc[:, [0, 1, 2, 3, 4, 5, 6]].values Y = seed['target'] pca = PCA(n_components=7) X_r = pca.fit(X).transform(X) ###Output _____no_output_____ ###Markdown PCA plot for Seed dataset ###Code plt.scatter(X_r[Y == 0, 2], X_r[Y == 0, 3], s =50, c = 'orange', label = 'Target 0') plt.scatter(X_r[Y == 1, 0], X_r[Y == 1, 5], s =50, c = 'yellow', label = 'Target 1') plt.scatter(X_r[Y == 2, 0], X_r[Y == 2, 4], s =50, c = 'green', label = 'Target 2') plt.title('PCA plot for Seed dataset') plt.legend() '''KNN classifier which is a type of supervised Machine Learning Technique. This is used to detect the accuracy and classification of the given dataset''' # Importing Libraries for Modelling. from sklearn import neighbors, datasets, preprocessing from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix # Assigning values of X and y from dataset y = df['target'] # Split off classifications X = df.iloc[:, [0, 1, 2, 3, 4, 5, 6]].values ''' Here X is assigned as all the column data and y is assigned as Target value of seed dataset''' #Setting training and testing values Xtrain, Xtest, y_train, y_test = train_test_split(X, y) scaler = preprocessing.StandardScaler().fit(Xtrain) Xtrain = scaler.transform(Xtrain) Xtest = scaler.transform(Xtest) # Modeling is done using KNN classifiers. knn = neighbors.KNeighborsClassifier(n_neighbors=5) knn.fit(Xtrain, y_train) y_pred = knn.predict(Xtest) # Display the Output print('Accuracy Score:', accuracy_score(y_test, y_pred)) print('Confusion matrix \n', confusion_matrix(y_test, y_pred)) print('Classification \n', classification_report(y_test, y_pred)) from sklearn.metrics import cohen_kappa_score cluster = cohen_kappa_score(y_test, y_pred) cluster y = df['target'] # Split off classifications X = df.iloc[:, [0, 1, 2, 3, 4, 5, 6]].values Xtrain, Xtest, y_train, y_test = train_test_split(X, y) ###Output _____no_output_____ ###Markdown Logistic Regression Accuracy ###Code #Logistic Regression from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression classifier = LogisticRegression() classifier.fit(Xtrain,y_train) y_pred = classifier.predict(Xtest) cm = confusion_matrix(y_test,y_pred) accuracy = accuracy_score(y_test,y_pred) print("Logistic Regression :") print("Accuracy = ", accuracy) print(cm) ###Output Logistic Regression : Accuracy = 0.9622641509433962 [[19 1 0] [ 1 16 0] [ 0 0 16]] ###Markdown Cohen Kappa Accuracy for LR ###Code from sklearn.metrics import cohen_kappa_score cluster = cohen_kappa_score(y_test, y_pred) cluster ###Output _____no_output_____ ###Markdown K-Nearest Neighbors Accuracy ###Code #K Nearest Neighbors from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski') classifier.fit(Xtrain,y_train) y_pred = classifier.predict(Xtest) cm = confusion_matrix(y_test,y_pred) accuracy = accuracy_score(y_test,y_pred) print("K Nearest Neighbors :") print("Accuracy = ", accuracy) print(cm) ###Output K Nearest Neighbors : Accuracy = 0.9206349206349206 [[18 0 1] [ 4 23 0] [ 0 0 17]] ###Markdown Cohen Kappa Accuracy for KNN ###Code from sklearn.metrics import cohen_kappa_score cluster = cohen_kappa_score(y_test, y_pred) cluster ###Output _____no_output_____ ###Markdown Support Vector Machine Accuracy ###Code Xtrain, Xtest, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0) #Support Vector Machine from sklearn.svm import SVC classifier = SVC() classifier.fit(Xtrain,y_train) y_pred = classifier.predict(Xtest) cm = confusion_matrix(y_test,y_pred) accuracy = accuracy_score(y_test,y_pred) print("Support Vector Machine:") print("Accuracy = ", accuracy) print(cm) ###Output Support Vector Machine: Accuracy = 0.8761904761904762 [[27 3 6] [ 3 32 0] [ 1 0 33]] ###Markdown Cohen Kappa Accuracy for SVM ###Code from sklearn.metrics import cohen_kappa_score cluster = cohen_kappa_score(y_test, y_pred) cluster ###Output _____no_output_____ ###Markdown Gaussian Naive Bayes Accuracy ###Code #Gaussian Naive Bayes from sklearn.naive_bayes import GaussianNB Xtrain, Xtest, y_train, y_test = train_test_split(X, y) classifier = GaussianNB() classifier.fit(Xtrain,y_train) y_pred = classifier.predict(Xtest) cm = confusion_matrix(y_test,y_pred) accuracy = accuracy_score(y_test,y_pred) print("Gaussian Naive Bayes :") print("Accuracy = ", accuracy) print(cm) ###Output Gaussian Naive Bayes : Accuracy = 0.9245283018867925 [[17 0 2] [ 1 13 0] [ 1 0 19]] ###Markdown Cohen Kappa Accuracy for GNB ###Code from sklearn.metrics import cohen_kappa_score cluster = cohen_kappa_score(y_test, y_pred) cluster ###Output _____no_output_____ ###Markdown Decision Tree Classifier Accuracy ###Code #Decision Tree Classifier from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier as DT from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score classifier = DT(criterion='entropy', random_state=0) classifier.fit(Xtrain,y_train) y_pred = classifier.predict(Xtest) cm = confusion_matrix(y_test,y_pred) print("Decision Tree Classifier :") print("Accuracy = ", accuracy) print(cm) ###Output Decision Tree Classifier : Accuracy = 0.9245283018867925 [[16 2 1] [ 1 13 0] [ 3 0 17]] ###Markdown Cohen Kappa Accuracy for DTC ###Code from sklearn.metrics import cohen_kappa_score cluster = cohen_kappa_score(y_test, y_pred) cluster ###Output _____no_output_____ ###Markdown Random Forest Classifier Accuracy ###Code #Random Forest Classifier from sklearn.ensemble import RandomForestClassifier as RF Xtrain, Xtest, y_train, y_test = train_test_split(X, y) classifier = RF(n_estimators=10, criterion='entropy', random_state=0) classifier.fit(Xtrain,y_train) y_pred = classifier.predict(Xtest) cm = confusion_matrix(y_test,y_pred) print("Random Forest Classifier :") print("Accuracy = ", accuracy) print(cm) ###Output Random Forest Classifier : Accuracy = 0.9245283018867925 [[14 0 0] [ 0 17 0] [ 6 0 16]] ###Markdown Cohen Kappa Accuracy for RFC ###Code from sklearn.metrics import cohen_kappa_score cluster = cohen_kappa_score(y_test, y_pred) cluster ###Output _____no_output_____
code/evo_tune_201023/.ipynb_checkpoints/transformer_encoder_evotune-checkpoint.ipynb
###Markdown Proceed weight updates using motor_balanced ###Code import time start_time = time.time() print_every = 1000 # loss_vector = [] for epoch in np.arange(0, pfamA_motors_balanced.shape[0]): seq = pfamA_motors_balanced.iloc[epoch, 3] sentence_in = prepare_sequence(seq) targets = prepare_labels(seq) # sentence_in = sentence_in.to(device = device) sentence_in = sentence_in.unsqueeze(1).to(device = device) targets = targets.to(device = device) optimizer.zero_grad() output = model(sentence_in) # print("targets size: ", targets.size()) loss = criterion(output.view(-1, ntokens), targets) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) optimizer.step() if epoch % print_every == 0: print(f"At Epoch: %.1f"% epoch) print(f"Loss %.4f"% loss) elapsed = time.time() - start_time print(f"time elapsed %.4f"% elapsed) # torch.save(model.state_dict(), "../../data/transformer_encoder_201025.pt") # loss_vector.append(loss) torch.save(model.state_dict(), "../../data/evotune_transformerencoder_balanced.pt") print("done") ###Output _____no_output_____ ###Markdown Proceed weight updates using the entire pfam_motor set ###Code start_time = time.time() print_every = 1000 # loss_vector = [] for epoch in np.arange(0, pfamA_target.shape[0]): seq = pfamA_target.iloc[epoch, 3] sentence_in = prepare_sequence(seq) targets = prepare_labels(seq) # sentence_in = sentence_in.to(device = device) sentence_in = sentence_in.unsqueeze(1).to(device = device) targets = targets.to(device = device) optimizer.zero_grad() output = model(sentence_in) # print("targets size: ", targets.size()) loss = criterion(output.view(-1, ntokens), targets) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) optimizer.step() if epoch % print_every == 0: print(f"At Epoch: %.1f"% epoch) print(f"Loss %.4f"% loss) elapsed = time.time() - start_time print(f"time elapsed %.4f"% elapsed) # torch.save(model.state_dict(), "../../data/transformer_encoder_201025.pt") # loss_vector.append(loss) torch.save(model.state_dict(), "../../data/evotune_transformerencoder_balanced_target.pt") print("done") ###Output _____no_output_____
src/02_loops_condicionais_metodos_funcoes/04_loop_while.ipynb
###Markdown Loop While ###Code # Imprimir valores de 0 à 9 counter = 0 while ( counter < 10 ): print(counter) counter += 1 # Usar else para encerrar o laço x = 0 while ( x < 10 ): print('O valor de x nesta iteração é: ', x) print(' x ainda é menor que 10, 1 a x') x += 1 else: print('Loop concluído') ###Output O valor de x nesta iteração é: 0 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 1 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 2 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 3 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 4 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 5 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 6 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 7 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 8 x ainda é menor que 10, 1 a x O valor de x nesta iteração é: 9 x ainda é menor que 10, 1 a x Loop concluído
Export Jupyter notebooks to Python library.ipynb
###Markdown Export Jupyter NotebooksThe following code takes a bunch of Jupyter notebooks and extracts code cells building a library out of them. The goal is to have a tool that allows us to write all Python code in the form of Jupyter notebooks, so as to better document their development, underlying algorithms and even sample applications, and then build useful (and compact) libraries out of those self-documented files.This notebook itself is an example of the intended use, being written as a collection of text and code cells, which are selectively exported into a single Python file, the library `exportnb.py` The logic of the program is as follows. It takes a bunch of Jupyter notebook file names and reads all of those notebooks, one after another. For each cell, it looks for the first line to find whether it contains the line ` file: xxxxx`, where `xxxxx` can be any valid file name. If so, it adds the cell to the code that is to be exported into the file with that name.Some remarks:* File names may contain directory names. By default, those directories will be built by the exporting routine.* Multiple cells can be written to the same Python file. They are written in the order in which they appear, which is the usual one.* Also, a single Jupyter notebook can contain code for multiple files.* If the file name contains spaces, use enclose it in double-quotes, as in ` file: "my long file name.py"`. I am not sure this is useful, though, as `import` statements do not allow spaces in file names.* By default, the code inserts empty newlines between cells. This avoids frequent errors that originate from different indentations in different cells, functions being joined, etc. The library We begin the code importing libraries to read Jupyter notebooks, which are nothing but JSON code -- i.e. javascript parseable data. ###Code # file: exportnb.py # # exportnb.py # # Library for exporting multiple Jupyter notebooks into a series of # Python files. Main interface provided by function `export_notebooks` # below. # # Author: Juan José García Ripoll # License: See http://opensource.org/licenses/MIT # Version: 1.0 (15/07/2018) # import sys, json, re, pathlib ###Output _____no_output_____ ###Markdown We then build the function that parses a cell and determines whether it is to be exported. ###Code # file: exportnb.py def file_cell(lines): # # Determine whether a cell is to be saved as code. This # is done by inspecting the lines of the cell and looking # for a line with a comment of the form # file: xxxxx # If so, it eliminates this line and collects the remaining # text as code. # if len(lines): ok = re.search('^#[ ]+file:[ ]+("[^\\"]*"|[^ \n]*)[ \n]*$', lines[0]) if ok: return ok.group(1), lines[1:] return False, lines ###Output _____no_output_____ ###Markdown This function uses the previous one to decide whether to add the lines to a dictionary that associates file names with text content (the lines of the cell that we received above). ###Code # file: exportnb.py def register_cell(dictionary, cell_lines, add_newline=True): # # Input: # - dictionary: a map from file names to lists of lines # of code that will be written to the file # - cell_lines: lines of a cell in a Jupyter notebook # - add_newline: add empty line after each cell # # Output: # - updated dictionary # file, lines = file_cell(cell_lines) if file: if file in dictionary: lines = dictionary[file] + lines if add_newline: lines += ['\n'] dictionary[file] = lines return dictionary ###Output _____no_output_____ ###Markdown We now create a file that parses a whole notebook, loading the content into a dictionary that associates files with cell content. ###Code # file: exportnb.py def read_notebook(dictionary, notebook, add_newline=True, verbose=False): if verbose: print(f'Reading notebook {notebook}') with open(notebook, 'r', encoding='utf-8') as f: j = json.load(f) if j["nbformat"] >=4: for i,cell in enumerate(j["cells"]): dictionary = register_cell(dictionary, cell["source"], add_newline) else: for i,cell in enumerate(j["worksheets"][0]["cells"]): dictionary = register_cell(dictionary, cell["input"], add_newline) ###Output _____no_output_____ ###Markdown Finally, we save the content of a whole dictionary, overwriting files. We add some intelligence, ensuring that directories are properly built. ###Code # file: exportnb.py def write_notebooks(dictionary, root='', mkdirs=True, verbose=False): # # Input: # - dictionary: a map from file names to list of lines of codes # to be written # - root: prefix to be added to all file names # - mkdirs: create parent directories if they do not exist # for file in dictionary.keys(): path = pathlib.Path(file) if mkdirs: path.parent.mkdir(parents=True, exist_ok=True) if verbose: print(f'Exporting file {file}') with path.open('w', encoding='utf-8') as f: for line in dictionary[file]: f.write(line) ###Output _____no_output_____ ###Markdown All these functions are combined into a single interface, give by the next one. ###Code # file: exportnb.py def export_notebooks(notebooks, root='', add_newline=True, mkdirs=True, verbose=False): # # Input: # - notebooks: list of notebooks as file names # - root: prefix for exporting all notebooks # - add_linewline: add empty lines between cells # dictionary = {} for nb in notebooks: read_notebook(dictionary, nb, add_newline=add_newline, verbose=verbose) write_notebooks(dictionary, root=root, mkdirs=mkdirs, verbose=verbose) ###Output _____no_output_____ ###Markdown Test and build the library We test the library with the following line, which packs the whole file into a single file `exportnb.py`, as specified above. ###Code export_notebooks(['Export Jupyter notebooks to Python library.ipynb'], verbose=True) ###Output Reading notebook Export Jupyter notebooks to Python library.ipynb Exporting file exportnb.py
lectures/07b-taylor-series.ipynb
###Markdown CHEM 1000 - Spring 2022Prof. Geoffrey Hutchison, University of Pittsburgh 7 Maclaurin and Taylor SeriesChapter 7 in [*Mathematical Methods for Chemists*](http://sites.bu.edu/straub/mathematical-methods-for-molecular-science/)By the end of this session, you should be able to:- Understand the concepts behind Maclaurin and Taylor series approximations- Use Maclaurin and Taylor expansions for exp, log, sin, cos, and other functions SeriesLast time, we saw that infinite series can be useful for chemistry and physics. Many times, we can either use a ***series limit*** for mathematical purposes. We can also use finite approximations from ***partial sums*** to estimate a more difficult or complex system.In particular, we used a ***power series*** to approximate $e^x$ and saw that related power series could be derived for other functions.We will now use two new kinds of series to approximate functions.Mathematical functions that are continuous and infinitely differentiable may be represented in terms of the function’s derivatives at a single point. The ***Maclaurin series*** represents a function as power series in x with coefficients depending on the derivatives of the function at x = 0. The ***Taylor series*** represents a function in terms of derivatives of the function at an arbitrary point x0 and powers of the displacement x - x0. We'll start with the Maclaurin series, since the derivations are a bit "cleaner." Maclaurin SeriesWhile we saw a few functions evaluated as power series, we would like to know how to express *any* function as a power series...$$f(x)=\sum_{n=0}^{\infty} a_{n} x^{n}=a_{0}+a_{1} x+a_{2} x^{2}+\ldots$$The key question, of course is how to calculate the coefficients $a_0$, $a_1$, etc.Since we will perform this approximation near $x = 0$ we can easily get the first coefficient $a_0$:$$f(0) = a_0 + a_1\cdot 0 + a_2\cdot 0 + \ldots$$We can get the *next* coefficient with the first derivative:$$f'(0) = a_1 + 2 a_2 \cdot 0 + \ldots$$Not surprisingly, we can continue this with second derivatives, third derivatives, etc.$$\left.\frac{d^{2}}{d x^{2}} f(x)\right|_{x=0}=2 a_{2}$$$$\left.\frac{d^{3}}{d x^{3}} f(x)\right|_{x=0}=2 \cdot 3 a_{3}$$So in general, the coefficients will all be:$$a_{n}=\left.\frac{1}{n !} \frac{d^{n}}{d x^{n}} f(x)\right|_{x=0}$$This may seem like a boring and tedious procedure. On the other hand, it means that we can express *any* function as a power series around $x = 0$, which can be very useful.We will see throughout the course that there are several methods to approximate any function (even something amazingly complex) as an infinite series of simple "basis" functions. ###Code # Let's do some examples from sympy import init_session init_session() ###Output _____no_output_____ ###Markdown We already know the power series for $e^x$ - we saw it last class:$$e^{x}=\sum_{n=0}^{\infty} \frac{1}{n !} x^{n}=1+x+\frac{1}{2 !} x^{2}+\frac{1}{3 !} x^{3}+\ldots$$Let's see how the Maclauren series works and if we can derive this power series for $e^x$. Remember that what we need are the $a_n$: ###Code f = exp(x) print('a0, ', f.subs(x, 0)) print('a1, ', 1*diff(f, x).subs(x, 0)) print('a2, ', 1/2*diff(f, x, 2).subs(x, 0)) ###Output _____no_output_____ ###Markdown This is obviously boring, because the derivative of $e^x$ is *always* $e^x$ and at $x=0$ the number is *always* one.$$a_{n}=\left.\frac{1}{n !} \frac{d^{n}}{d x^{n}} f(x)\right|_{x=0}=\frac{1}{n !} \times 1$$Thus, it's not a surprise that the Maclaurin series for $e^x$ is:$$e^{x}=\sum_{n=0}^{\infty} \frac{1}{n !} x^{n}=1+x+\frac{1}{2 !} x^{2}+\frac{1}{3 !} x^{3}+\ldots$$Let's do a more interesting example for $\sin x$: ###Code f = sin(x) print('a0, ', f.subs(x, 0)) print('a1, ', 1/factorial(1)*diff(f, x).subs(x, 0)) print('a2, ', 1/factorial(2)*diff(f, x, 2).subs(x, 0)) print('a3, ', 1/factorial(3)*diff(f, x, 3).subs(x, 0)) ###Output _____no_output_____ ###Markdown Let's think about what this means.- constant: $\sin 0 = 0$- first derivative: $d/dx = \cos x$ and of course $\cos 0 = 1$- second derivative: $d^2/dx^2 = -\sin x$ so 0 again.- third derivative: $d^3/dx^3 = -\cos x$ so $a_3 = \frac{1}{3!} \times -1 = \frac{-1}{6}$Okay, I obviously could keep going by hand, but larger terms clearly call for a loop: ###Code # or more fun... for n in range(6): print(n, 1/factorial(n)*diff(f, x, n).subs(x, 0)) ###Output _____no_output_____ ###Markdown Notice that for even orders, the resulting derivative will be $\sin x$ and thus the coefficient is zero.I want to plot these... ###Code # Let's plot it! import numpy as np import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina' plt.style.use('./chem1000.mplstyle') x = np.arange(-2*np.pi,2*np.pi,0.1) plt.plot(x, np.sin(x), color='blue', label='sin() function') # zero order is zero b/c sin(0) = 0 plt.plot(x, 1/factorial(1) * x, label='Taylor - 1st order') # second order is the same b/c sin(0) = 0 plt.plot(x, 1/factorial(1) * x - 1/factorial(3)*x**3, label='Taylor - 3rd order') # fourth order is the same again plt.plot(x, 1/factorial(1) * x - 1/factorial(3)*x**3 + 1/factorial(5)*x**5, label='Taylor - 5th order') plt.ylim([-7,4]) plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Notice that as we add higher terms, we get an increasing accuracy of the Taylor series expansion for $\sin x$.Of course, Sympy can do this too... ###Code x = symbols('x') f = sin(x) # Sympy already knows how to do a Maclaurin or Taylor expension of a function # around x0 up to (but not including) order 'n' # expression.series(x, x0, n) # if x0 is omitted, it's assumed to be around x0 = 0 (Maclaurin series) # if n is omitted, it's assumed to be n=6 f.series(x, x0 = 0, n=6) # the "O(x**4)" expression means that terms above x**4 are omitted x = symbols('x') g = cos(x) g.series(x, x0 = 0, n=4) ###Output _____no_output_____ ###Markdown Quick ApproximationsThe Maclaurin series is useful for some quick / easy approximations for functions near $x=0$:- $e^x \approx 1+x+\frac{x^2}{2}$ - or even just $1 + x$- $\sin x \approx x$- $\cos x \approx 1 - \frac{x^2}{2}$Obviously, these aren't very good outside some region of convergence, but when doing mental math, they're useful shortcuts. Taylor SeriesThe Maclaurin series derivation is only for $x = 0$, but what if we want to expand around a different point (e.g., an optimum bond length isn't going to be 0.0 Å...It's not hard to imagine what we need to do:- Write the power series with ($x - x_0$)- Evaluate the derivatives at $x_0$ instead of zero.That's about it. The math notation looks a bit messy, but:$$f(x)=\sum_{n=0}^{\infty} a_{n}\left(x-x_{0}\right)^{n}=a_{0}+a_{1}\left(x-x_{0}\right)+a_{2}\left(x-x_{0}\right)^{2}+\ldots$$ Example log(1)We can't take the natural log of zero. So let's consider:$$f(x) = \ln x$$around $x_0 = 1$. Well, that's going to be:$$\ln (x)=\ln (1)+\left.\frac{d}{d x} \ln (x)\right|_{x=1}(x-1)+\left.\frac{1}{2 !} \frac{d^{2}}{d x^{2}} \ln (x)\right|_{x=1}(x-1)^{2}+\ldots$$Remember that the derivative of $\ln x = 1/x$:- $\ln 1 = 0$- $1/x = 1$- $-1/x^2 = -1$- $2/x^3 = 2$- (etc.)So the formula looks something like:$$\ln \approx 0+\left.\frac{1}{x}\right|_{x=1}(x-1)+\frac{1}{2 !}\left[\frac{-1}{x^{2}}\right]_{x=1}(x-1)^{2}+\frac{1}{3 !}\left[\frac{2}{x^{3}}\right]_{x=1}(x-1)^{3}+\ldots$$Then doing a little clean-up:$$\begin{aligned}\ln (x) &=(x-1)-\frac{1}{2 !}(x-1)^{2}+\frac{2}{3 !}(x-1)^{3}-\frac{2 \cdot 3}{4 !}(x-1)^{4}+\ldots \\&=(x-1)-\frac{1}{2}(x-1)^{2}+\frac{1}{3}(x-1)^{3}-\frac{1}{4}(x-1)^{4}+\ldots\end{aligned}$$Putting it slightly differently for small deviations $x$:$$\ln (1+x)=x-\frac{1}{2} x^{2}+\frac{1}{3} x^{3}-\frac{1}{4} x^{4}+\cdots=\sum_{n=1}^{\infty}(-1)^{n+1} \frac{1}{n} x^{n}$$ ###Code # plot it! x = np.arange(-0.5,2,0.1) plt.plot(x, np.log(x+1), color='red', label='ln(x+1)') plt.plot(x, x, label='1st order') plt.plot(x, x - x**2/2, label='2nd order') plt.plot(x, x - x**2/2 + x**3/3, label='3rd order') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown We previously talked about the Morse potential energy function:$$V(r)=\varepsilon\left[1-e^{-\beta\left(r-r_{0}\right)}\right]^{2}=\varepsilon\left[1-2 e^{-\beta\left(r-r_{0}\right)}+e^{-2 \beta\left(r-r_{0}\right)}\right]$$ ###Code x, x0, epsilon, beta = symbols('x x0 epsilon beta') V = epsilon * (1 - exp(-beta * (x-x0)))**2 # harmonic approximation (i.e., terms in 0, 1, 2) V.series(x, x0, n=3) ###Output _____no_output_____ ###Markdown Notice that the simplest approximation of the potential energy is a harmonic expression (e.g., like a spring):$$V(r) \approx k(r - r_0)^2$$Near the optimal bond length, $r_0$ this looks fairly good. The problems come with higher energy, if the bond is greatly stretched or compressed.At 298K, thermal energy is ≈0.025 eV = 0.592 kcal/mol, while we can see *anharmonic* parts of the curve (e.g., stretching at longer bond lengths) this is usually not a large effect. ###Code # let's plot it! x = np.linspace(0.5, 2.5, 100) beta = 2.253 epsilon = 265.79 x0 = 0.9624 # Angstroms plt.plot(x, epsilon * (1 - np.exp(-beta * (x-x0)))**2, color='red') plt.plot(x, beta**2*epsilon*(x - x0)**2, color='green') plt.xlabel('Bond Length (Å)') plt.ylabel('Relative Energy (kcal/mol)') plt.ylim(0, 500) plt.xlim(0.5, 1.75) plt.show() ###Output _____no_output_____ ###Markdown We can go to a higher-order Taylor series expansion to gain accuracy across a larger region. Here's the expansion up to 4th order. (Remember that Python counts from 0, so this goes up to n-1 terms, and n=5 is the O() error term.) ###Code x, x0 = symbols('x x0') V.series(x, x0, n=5) # let's plot the expansion terms x = np.linspace(0.5, 2.5, 100) beta = 2.253 epsilon = 265.79 x0 = 0.9624 # Angstroms plt.plot(x, epsilon * (1 - np.exp(-beta * (x-x0)))**2, color='red', label='Morse function') plt.plot(x, beta**2*epsilon*(x - x0)**2, color='green', label='2nd order') plt.plot(x, beta**2*epsilon*(x - x0)**2 - beta**3*epsilon*(x-x0)**3, label='3rd order') plt.plot(x, beta**2*epsilon*(x - x0)**2 - beta**3*epsilon*(x-x0)**3 + 7/12*beta**4*epsilon*(x-x0)**4, label='4th order') plt.xlabel('Bond Length (Å)') plt.ylabel('Relative Energy (kcal/mol)') plt.ylim(0, 1000) plt.xlim(0.5, 1.75) plt.legend() plt.show() ###Output _____no_output_____ ###Markdown While the 3rd order term goes negative at large separation (uh oh!), the 4th order term looks pretty successful from ~0.6-1.3 Å. We can continue to expand the Taylor series if we wish to have higher accuracy. Notice that the odd orders have negative sign, so we'll need to be careful to pick even orders to avoid negative energies. ###Code x, x0 = symbols('x x0') V.series(x, x0, n=9) # let's plot the expansion terms x = np.linspace(0.5, 2.5, 100) beta = 2.253 epsilon = 265.79 x0 = 0.9624 # Angstroms plt.plot(x, epsilon * (1 - np.exp(-beta * (x-x0)))**2, color='red') plt.plot(x, beta**2*epsilon*(x - x0)**2, color='green') plt.plot(x, beta**2*epsilon*(x - x0)**2 - beta**3*epsilon*(x-x0)**3 + 7/12*beta**4*epsilon*(x-x0)**4, color='blue') plt.plot(x, beta**2*epsilon*(x - x0)**2 - beta**3*epsilon*(x-x0)**3 + 7/12*beta**4*epsilon*(x-x0)**4 - 0.25*beta**5*epsilon*(x-x0)**5 + 31/360*beta**6*epsilon*(x-x0)**6, color='gold') plt.plot(x, beta**2*epsilon*(x - x0)**2 - beta**3*epsilon*(x-x0)**3 + 7/12*beta**4*epsilon*(x-x0)**4 - 1/4*beta**5*epsilon*(x-x0)**5 + 31/360*beta**6*epsilon*(x-x0)**6 - 1/40*beta**7*epsilon*(x-x0)**7 + 127/20160*beta**8*epsilon*(x-x0)**8, color='purple') plt.xlabel('Bond Length (Å)') plt.ylabel('Relative Energy (kcal/mol)') plt.ylim(0, 750) plt.xlim(0.5, 1.75) plt.show() ###Output _____no_output_____
2020_03_06/Talk_Part01_DifferentLibraries.ipynb
###Markdown Introduction - Plotting a Timeseries Let's load one trace of motion from an fMRI scan ###Code import numpy as np motion = np.loadtxt('MOTION_FILE.1D') TR = 0.780 NAcq = motion.shape[0] time = np.linspace(0,TR*NAcq,NAcq) ###Output _____no_output_____ ###Markdown 1. Static Plotting with Matplot Lib ###Code import matplotlib.pyplot as plt plt.figure(figsize=(15,5)) plt.plot(time,motion) plt.xlabel('Time [s]') plt.ylabel('Motion') ###Output _____no_output_____ ###Markdown 2. Dynamic Plotting with Bokeh ###Code from bokeh.plotting import figure, output_notebook, show output_notebook() # create a new plot with a title and axis labels p = figure(title="Absolute Motion", x_axis_label='Time', y_axis_label='Motion', tools=['hover','pan','reset','wheel_zoom','save','box_zoom'], plot_width=1000, plot_height=300) # add a line renderer with legend and line thickness p.line(time, motion, legend_label="Motion", line_width=2) # show the results show(p) ###Output _____no_output_____ ###Markdown 3. Dynamic Plotting with Holoviews* With HoloViews, instead of building a plot using direct calls to a plotting library, you first describe your data with a small amount of crucial semantic information required to make it visualizable.* Then you specify additional metadata as needed to determine more detailed aspects of your visualization. This approach provides immediate, automatic visualization that can be effortlessly requested at any time as your data evolves, rendered automatically by one of the supported plotting libraries (such as Bokeh or Matplotlib). ###Code import holoviews as hv hv.extension('bokeh') hv_mot = hv.Curve((time,motion),'Time','Motion') print(hv_mot) hv_mot.opts(width=1000, height=300, tools=['hover']) hv.extension('matplotlib') hv_mot.opts(fig_size=450, aspect=3.5) ###Output _____no_output_____ ###Markdown 4. Dynamic Plotting via hvplot ###Code import hvplot.pandas import pandas as pd hv.extension('bokeh') motion_df = pd.DataFrame() motion_df['Time'] = time motion_df['Motion'] = motion motion_df.head() a = motion_df.hvplot(x='Time',y='Motion').opts(width=1000) b = motion_df['Motion'].hvplot(kind='hist') type(b) (a + b).cols(1) ###Output _____no_output_____
exercises/E03-CrossVal-CreditScoring.ipynb
###Markdown Integrantes:- Jorge Eduardo Rodriguez Cardozo - 200711501- German Augusto Carvajal Murcia - 201313516 Exercise 03 Data preparation and model evaluation exercise with credit scoringBanks play a crucial role in market economies. They decide who can get acces to finance, the terms of the loans, and by doing so, they can make or break investment decisions. For markets and society to function, individuals and companies need access to credit. Credit scoring algorithms, which make a guess at the probability of default, are the method banks use to determine whether or not a loan should be granted. This competition requires participants to improve on the state of the art in credit scoring, by predicting the probability that somebody will experience financial distress in the next two years. [Dataset](https://www.kaggle.com/c/GiveMeSomeCredit)Attribute Information:|Variable Name | Description | Type||----|----|----||SeriousDlqin2yrs | Person experienced 90 days past due delinquency or worse | Y/N||RevolvingUtilizationOfUnsecuredLines | Total balance on credit divided by the sum of credit limits | percentage||age | Age of borrower in years | integer||NumberOfTime30-59DaysPastDueNotWorse | Number of times borrower has been 30-59 days past due | integer||DebtRatio | Monthly debt payments | percentage||MonthlyIncome | Monthly income | real||NumberOfOpenCreditLinesAndLoans | Number of Open loans | integer||NumberOfTimes90DaysLate | Number of times borrower has been 90 days or more past due. | integer||NumberRealEstateLoansOrLines | Number of mortgage and real estate loans | integer||NumberOfTime60-89DaysPastDueNotWorse | Number of times borrower has been 60-89 days past due |integer||NumberOfDependents | Number of dependents in family | integer| Read the data into Pandas ###Code import pandas as pd pd.set_option('display.max_columns', 500) import zipfile with zipfile.ZipFile('/Users/germancarvajal/Dropbox/Universidad-201818/Deep_learning_y_redes_neuronales/AppliedDeepLearningClass/datasets/KaggleCredit2.csv.zip', 'r') as z: f = z.open('KaggleCredit2.csv') data = pd.io.parsers.read_table(f, sep=',') data.head() y = data['SeriousDlqin2yrs'] X = data.drop(['SeriousDlqin2yrs','Unnamed: 0'], axis=1) ###Output _____no_output_____ ###Markdown Exercise 3.1Input the missing values of the Age and Number of Dependents ###Code print(X.isnull().sum()) X.age=X.age.fillna(X.age.mean()) X.NumberOfDependents=X.NumberOfDependents.fillna(X.NumberOfDependents.median()) print(X.isnull().sum()) ###Output RevolvingUtilizationOfUnsecuredLines 0 age 4267 NumberOfTime30-59DaysPastDueNotWorse 0 DebtRatio 0 MonthlyIncome 0 NumberOfOpenCreditLinesAndLoans 0 NumberOfTimes90DaysLate 0 NumberRealEstateLoansOrLines 0 NumberOfTime60-89DaysPastDueNotWorse 0 NumberOfDependents 4267 dtype: int64 RevolvingUtilizationOfUnsecuredLines 0 age 0 NumberOfTime30-59DaysPastDueNotWorse 0 DebtRatio 0 MonthlyIncome 0 NumberOfOpenCreditLinesAndLoans 0 NumberOfTimes90DaysLate 0 NumberRealEstateLoansOrLines 0 NumberOfTime60-89DaysPastDueNotWorse 0 NumberOfDependents 0 dtype: int64 ###Markdown - The missing values in the age and the number of dependants is imputed with the mean adn the meadian respectevly, according to the variables nature as continious or discrete count numeric vectors. Exercise 3.2From the set of featuresSelect the features that maximize the **F1Score** the model using K-Fold cross-validation ###Code import random random.seed(123) import numpy as np import matplotlib.pyplot as plt from sklearn.feature_selection import RFECV from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, fbeta_score reg = LogisticRegression() model=RFECV(reg,step=1,cv=2,scoring='f1',n_jobs=-1,verbose=1) model.fit(X,y) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("F1 Score") plt.plot(range(1, len(model.grid_scores_) + 1), model.grid_scores_) plt.show() pd.DataFrame(X.columns,columns=['Features'])[model.support_] print("Optimal number of features :", model.n_features_) print("Optimal F1 score : ", model.grid_scores_[model.n_features_]) print('Acuracy: '+ str(accuracy_score(y, model.predict(X)))) print('precision_score ', precision_score(y, model.predict(X))) print('recall_score ', recall_score(y, model.predict(X))) print('f1_score ', f1_score(y, model.predict(X))) print('F_beta_score ', fbeta_score(y, model.predict(X),beta=10)) ###Output Optimal number of features : 5 Optimal F1 score : 0.0774055560896 Acuracy: 0.93345436833 precision_score 0.594444444444 recall_score 0.0421481092437 f1_score 0.0787150564002 F_beta_score 0.0425394284515 ###Markdown Exercise 3.3Now which is the best set of features selected by AUC ###Code model=RFECV(reg,step=1,cv=2,scoring='roc_auc',n_jobs=-1,verbose=1) model.fit(X,y) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("AUC") plt.plot(range(1, len(model.grid_scores_) + 1), model.grid_scores_) plt.show() pd.DataFrame(X.columns,columns=['Features'])[model.support_] print("Optimal number of features :", model.n_features_) print("Optimal AUC score : ", model.grid_scores_[model.n_features_]) print('Acuracy: '+ str(accuracy_score(y, model.predict(X)))) print('precision_score ', precision_score(y, model.predict(X))) print('recall_score ', recall_score(y, model.predict(X))) print('f1_score ', f1_score(y, model.predict(X))) print('F_beta_score ', fbeta_score(y, model.predict(X),beta=10)) ###Output Optimal number of features : 8 Optimal AUC score : 0.6965286405497385 Acuracy: 0.93332152504096 precision_score 0.5798165137614679 recall_score 0.04149159663865546 f1_score 0.07744149001347875 F_beta_score 0.041876545801651906 ###Markdown Exercise 03 Data preparation and model evaluation exercise with credit scoringBanks play a crucial role in market economies. They decide who can get finance and on what terms and can make or break investment decisions. For markets and society to function, individuals and companies need access to credit. Credit scoring algorithms, which make a guess at the probability of default, are the method banks use to determine whether or not a loan should be granted. This competition requires participants to improve on the state of the art in credit scoring, by predicting the probability that somebody will experience financial distress in the next two years. [Dataset](https://www.kaggle.com/c/GiveMeSomeCredit)Attribute Information:|Variable Name | Description | Type||----|----|----||SeriousDlqin2yrs | Person experienced 90 days past due delinquency or worse | Y/N||RevolvingUtilizationOfUnsecuredLines | Total balance on credit divided by the sum of credit limits | percentage||age | Age of borrower in years | integer||NumberOfTime30-59DaysPastDueNotWorse | Number of times borrower has been 30-59 days past due | integer||DebtRatio | Monthly debt payments | percentage||MonthlyIncome | Monthly income | real||NumberOfOpenCreditLinesAndLoans | Number of Open loans | integer||NumberOfTimes90DaysLate | Number of times borrower has been 90 days or more past due. | integer||NumberRealEstateLoansOrLines | Number of mortgage and real estate loans | integer||NumberOfTime60-89DaysPastDueNotWorse | Number of times borrower has been 60-89 days past due |integer||NumberOfDependents | Number of dependents in family | integer| Read the data into Pandas ###Code import pandas as pd pd.set_option('display.max_columns', 500) import zipfile with zipfile.ZipFile('../datasets/KaggleCredit2.csv.zip', 'r') as z: f = z.open('KaggleCredit2.csv') data = pd.io.parsers.read_table(f, sep=',') data.head() y = data['SeriousDlqin2yrs'] X = data.drop('SeriousDlqin2yrs', axis=1) ###Output _____no_output_____
PWM-RC.ipynb
###Markdown Cálculo del rizado y el tiempo de respuesta en un filtro RC para PWMQueremos filtrar una señal PWM para obtener una señal analógica de voltaje variable. Lo más sencillo para conseguir este objetivo es utilizar un simple filtro RC de primer orden:A continuación vamos a estudiar las características de este filtro, mostrando cómo ajustarlo para filtrar una señal PWM de forma óptima. Función de transferencia del filtro RCLa función de transferencia de un filtro RC de primer orden es la siguiente:$$H(s) = \frac{1}{1 + RCs}=\frac{1}{1 + \tau s}\implies H(\omega) = \frac{1}{1 + \tau j\omega}$$donde $\tau = RC$ es la *constante de tiempo* del filtro.Se puede demostrar que la frecuencia de corte del filtro es:$$f_c = \frac{1}{2\pi\tau}$$La función de transferencia también se puede expresar en función de la frecuencia $f$, sabiendo que $\omega = 2\pi f$:$$H(f) = \frac{1}{1 + j\frac{f}{f_c}}$$Para estudiar la calidad del filtrado en señales PWM, nos interesa conocer la amplitud de la respuesta:$$\left|H(f)\right| = \frac{1}{\sqrt{1 + \left(\frac{f}{f_c}\right)^2}}$$ Filtrado de señal PWMLos dos principales parámetros que definen la calidad de la señal filtrada son el rizado y el tiempo de respuesta: * El rizado es la diferencia entre los voltajes máximo y mínimo de la señal, cuando el *duty cycle* del PWM es constante. * El tiempo de respuesta es lo que tarda la señal filtrada en alcanzar el 90% del valor final, ante una entrada escalón. El rizado viene determinado por la frecuencia del PWM y la frecuencia de corte del filtro, mientras que el tiempo de respuesta sólo depende de esta última. Así, para una frecuencia de PWM fija, bajar la frecuencia de corte tiene un efecto positivo (reduce el rizado) pero otro negativo (aumenta el tiempo de respuesta). Aumentar la frecuencia del PWM permite mejorar el rizado sin afectar al tiempo de respuesta, pero no siempre es posible. Hay microcontroladores, como el Arduino Uno, que no permiten modificarla (a no ser que se utilicen funciones de bajo nivel). Y además, frecuencias de PWM elevadas aumentan el consumo energético cuando se controlan transistores, porque éstos disipan más energía cuando trabajan en su zona lineal, y cada ciclo de PWM implica dos pasos por la misma. Cálculo del rizadoNormalmente el rizado se define para el PWM trabajando al 50%, ya que es el peor caso. Este valor se puede estimar usando la siguiente expresión:$$V_R \approx \frac{\pi}{2}\left|H\left(f_{PWM}\right)\right|V_{PWM}$$donde $f_{PWM}$ y $V_{PWM}$ son la frecuencia y la amplitud de la señal PWM, respectivamente. Cálculo del tiempo de respuestaLa respuesta del filtro tiene la forma:$$V(t) = V(0) e^{-\tau t}$$Por lo tanto, el tiempo de respuesta se puede calcular como:$$T_r = -RC\ln(1 - 0.9)$$ EjemploA continuación de muestra en una gráfica la señal PWM de entrada, en color azul, junto a la señal obtenida tras el filtrado, en color naranja, para un PWM al 50% en régimen permanente. Se puede comprobar cómo cambia el resultado al variar los valores de $R$, $C$ o $f_{PWM}$. ###Code import matplotlib.pyplot as plt import numpy as np import math R = 2e4 # Resistencia en Ohm C = 330e-9 # Condensador en F F = 490.0 # Frecuencia del PWM en Hz (Arduino Uno: 490) V = 5.0 # Voltaje del PWM en V (Arduino Uno: 5) # Frecuencia de corte tau = R*C fc = 1/(2*np.pi*tau) # Función de transferencia (amplitud) H = lambda f: 1/np.sqrt(1 + (f/fc)**2) M = lambda f: 20*math.log10(H(f)) # Rizado al 50% de ciclo, en mV r50 = 0.5*math.pi*abs(H(F))*V*1e3 # Tiempo de respuesta hasta 90%, en ms t90 = -tau*math.log(1 - 0.9)*1e3 # Respuesta en régimen permanente nc = 5 # Número de ciclos en la gráfica nd = 1000 # Número de divisiones (resolución) h = nc/F/nd # Paso de tiempo t = np.linspace(0, h*nd, nd + 1) y = np.zeros(nd + 1) + 0.5*V - 0.0005*r50 u = np.zeros(nd + 1) for i in range(1, nd): if i%(nd/nc) <= 0.5*nd/nc: u[i] = V y[i] = tau/(tau + h)*y[i - 1] + h/(tau + h)*u[i] plt.plot(1e3*t, u, 1e3*t, y) plt.title("Rizado: %0.0f mV" % r50) plt.xlabel("Tiempo (ms)") plt.ylabel("Amplitud (V)"); ###Output _____no_output_____ ###Markdown Para ilustrar el efecto de $T_r$, en la siguiente gráfica se muestra la respuesta a una entrada escalón del 100% (el PWM pasa repentinamente de 0 a 100% en el instante inicial). El tiempo de resupesta es el que tarda la señal filtrada (en color naranja) en alcanzar el 90% de $V_{PWM}$. En este caso no existe rizado, ya que al estar el PWM al 100%, la señal de entrada es constante. ###Code # Respuesta transitoria (entrada escalón) t = np.linspace(0, 5*t90*1e-3, nd + 1) u = np.append(0, np.ones(nd)*V) y = map(lambda x: V*(1 - math.exp(-x/tau)), t) plt.plot(1e3*t, u, 1e3*t, list(y)) plt.title("Tiempo de respuesta 90%%: %0.1f ms" % t90) plt.xlabel("Tiempo (ms)") plt.ylabel("Amplitud (V)"); ###Output _____no_output_____ ###Markdown Por último, se muestra la respuesta en frecuencia del filtro: ###Code # Respuesta en frecuencia fr = np.logspace(-1, 3, 1000) plt.semilogx(fr, list(map(M, fr))) plt.title("Frecuencia de corte: %0.3f Hz" % fc) plt.xlabel("Frecuencia (Hz)") plt.ylabel("Amplitud (dB)") plt.grid(which = "both") ###Output _____no_output_____
usc-csci-ml/hw4/src/CSCI567_hw4_fall16.ipynb
###Markdown Load Data ###Code ## (c) X_tr, Y_tr, X_te, Y_te = loaddata(data_path) print "Train X, Y :", X_tr.shape, Y_tr.shape print "Test X, Y :", X_te.shape, Y_te.shape X_tr, X_te = normalize(X_tr, X_te) d_in = X_tr.shape[1] # input features d_out = Y_tr.shape[1] # Output predictions ## (d) # default args args = { 'actfn':'linear', 'last_act':'softmax', 'reg_coeffs': [0.0], 'num_epoch': 30, 'batch_size': 1000, 'sgd_lr': 0.001, 'sgd_decays': [0.0], 'sgd_moms': [0.0], 'sgd_Nesterov': False, 'EStop': False, 'verbose': False } # FIXME: BEGIN : For quick testing.. Remove this later args['batch_size'] = 10 X_tr, Y_tr = X_tr[0:100], Y_tr[:100] X_te, Y_te = X_te[0:20], Y_te[:20] print "Train X, Y :", X_tr.shape, Y_tr.shape # FIXME: END print("\n\n## (d) Linear Activation ") archs = [[d_in, d_out], [d_in, 50, d_out], [d_in, 50, 50, d_out], [d_in, 50, 50, 50, d_out]] res = testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) archs = [[d_in, 50, d_out], [d_in, 500, d_out], [d_in, 500, 300, d_out], [d_in, 800, 500, 300, d_out], [d_in, 800, 800, 500, 300, d_out]] res = testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) # (e) print("\n\n## (e) Sigmoid Activation") args['actfn'] = 'sigmoid' _ = testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) # (f) print("\n\n## (f) ReLu Activation") args['actfn'] = 'relu' args['sgd_lr'] = 5e-4 _ = testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) # (g) print("\n\n## (g) Regularization Coefficients") archs = [[d_in, 800, 500, 300, d_out]] args['reg_coeffs'] = [1e-7, 5e-7, 1e-6, 5e-6, 1e-5] best, _ = testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) best_lambda_noEstop = best[1] print("Best Regularization Coefficient=", best_lambda_noEstop) # (h) print("\n\n## (h) Regularization Coefficients -- Early stop") args['EStop'] = True best, _ = testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) best_lambda_EStop = best[1] print("Best Regularization Coefficient with early stopping=", best_lambda_EStop) # (i) print("\n\n## (i) SGD Decay") args['reg_coeffs'] = [5e-7] args['num_epoch'] = 100 args['sgd_lr'] = 1e-5 args['sgd_decays'] = [1e-5, 5e-5, 1e-4, 3e-4, 7e-4, 1e-3] args['EStop'] = False best, _ = testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) best_decay = best[2] print("Best Decay", best_decay) # (j) print("\n\n## (j) SGD Momentum") args['reg_coeffs'] = [0.0] args['num_epoch'] = 50 args['sgd_decays'] = [best_decay] # TODO: get this from the best value of previous step args['sgd_Nesterov'] = True args['sgd_moms']= [0.99, 0.98, 0.95, 0.9, 0.85] best, _ = testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) best_mom = best[3] print("Best moemntum", best_mom) # (k) print("\n\n## (k) Combining all") args['num_epoch'] = 100 args['sgd_lr'] = 1e-5 args['sgd_Nesterov'] = True args['EStop'] = True #TODO: Best values from previous steps args['sgd_decays'] = [best_decay] args['sgd_moms']= [best_mom] args['reg_coeffs'] = [best_lambda_EStop] testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) # (l) Grid Search print("\n\n## (j) Grid Search ") archs = [[d_in, 50, d_out], [d_in, 500, d_out], [d_in, 500, 300, d_out], [d_in, 800, 500, 300, d_out], [d_in, 800, 800, 500, 300, d_out]] args = { 'actfn':'relu', 'last_act':'softmax', 'num_epoch': 100, 'batch_size': 1000, 'sgd_lr': 1e-5, 'sgd_Nesterov': True, 'sgd_moms': [0.99], 'EStop': True, 'verbose': False, 'reg_coeffs': [1e-7, 5e-7, 1e-6, 5e-6, 1e-5], 'sgd_decays': [1e-5, 5e-5, 1e-4], } testmodels(X_tr, Y_tr, X_te, Y_te, archs, **args) ###Output ## (j) Grid Search Epoch 00007: early stopping architecture=[50, 50, 2], lambda=1e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.899999976158 | time=1.21030306816 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=1e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.449999988079 | time=1.2862830162 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=1e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.40000000596 | time=1.39065003395 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=5e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.25 | time=1.21554899216 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=5e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.25 | time=1.33358192444 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=5e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.300000011921 | time=1.34109997749 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=1e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.15000000596 | time=1.25166296959 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=1e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.449999988079 | time=1.30975294113 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=1e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.699999988079 | time=1.35929894447 Epoch 00012: early stopping architecture=[50, 50, 2], lambda=5e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=1.31907081604 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=5e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.550000011921 | time=1.33358502388 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=5e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.699999988079 | time=1.47901797295 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=1e-05, decay=1e-05, momentum=0.99, actfn=relu: score=0.40000000596 | time=1.31222605705 Epoch 00009: early stopping architecture=[50, 50, 2], lambda=1e-05, decay=5e-05, momentum=0.99, actfn=relu: score=0.649999976158 | time=1.47111296654 Epoch 00007: early stopping architecture=[50, 50, 2], lambda=1e-05, decay=0.0001, momentum=0.99, actfn=relu: score=0.25 | time=1.27420687675 Epoch 00010: early stopping architecture=[50, 500, 2], lambda=1e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.449999988079 | time=1.46441507339 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=1e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.34999999404 | time=1.4722058773 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=1e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.15000000596 | time=1.31370186806 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=5e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.300000011921 | time=1.3919467926 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=5e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=1.38250207901 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=5e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.600000023842 | time=1.37583708763 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=1e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=1.36326503754 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=1e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.449999988079 | time=1.24810910225 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=1e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.800000011921 | time=1.37515521049 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=5e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.300000011921 | time=1.38642597198 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=5e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=1.28251886368 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=5e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.449999988079 | time=1.35393500328 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=1e-05, decay=1e-05, momentum=0.99, actfn=relu: score=0.550000011921 | time=1.23606085777 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=1e-05, decay=5e-05, momentum=0.99, actfn=relu: score=0.699999988079 | time=1.32297897339 Epoch 00007: early stopping architecture=[50, 500, 2], lambda=1e-05, decay=0.0001, momentum=0.99, actfn=relu: score=0.699999988079 | time=1.31705713272 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=1.72202992439 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.800000011921 | time=1.77702879906 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.600000023842 | time=1.85370397568 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=5e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.5 | time=1.79700708389 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=5e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.34999999404 | time=1.84643912315 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=5e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.5 | time=1.83488607407 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=1.68571710587 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.850000023842 | time=1.80838012695 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.40000000596 | time=1.81895780563 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=5e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.5 | time=1.83311510086 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=5e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.5 | time=1.68818712234 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=5e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.34999999404 | time=1.85359096527 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-05, decay=1e-05, momentum=0.99, actfn=relu: score=0.449999988079 | time=1.98514699936 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-05, decay=5e-05, momentum=0.99, actfn=relu: score=0.449999988079 | time=2.13911509514 Epoch 00007: early stopping architecture=[50, 500, 300, 2], lambda=1e-05, decay=0.0001, momentum=0.99, actfn=relu: score=0.15000000596 | time=1.99159502983 Epoch 00008: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.449999988079 | time=2.97883582115 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=2.65910792351 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.40000000596 | time=2.57715702057 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=5e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.40000000596 | time=2.76571798325 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=5e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=2.67143392563 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=5e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.40000000596 | time=2.74462294579 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.449999988079 | time=2.81343007088 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=2.76092219353 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.649999976158 | time=2.72733187675 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=5e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.800000011921 | time=2.41824197769 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=5e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=2.54658317566 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=5e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.550000011921 | time=2.38805699348 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-05, decay=1e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=2.25120592117 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-05, decay=5e-05, momentum=0.99, actfn=relu: score=0.800000011921 | time=2.45753717422 Epoch 00007: early stopping architecture=[50, 800, 500, 300, 2], lambda=1e-05, decay=0.0001, momentum=0.99, actfn=relu: score=0.34999999404 | time=2.56502890587 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.699999988079 | time=3.2302839756 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.300000011921 | time=3.14152598381 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.40000000596 | time=3.25892901421 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=5e-07, decay=1e-05, momentum=0.99, actfn=relu: score=0.600000023842 | time=3.30806398392 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=5e-07, decay=5e-05, momentum=0.99, actfn=relu: score=0.649999976158 | time=3.17942905426 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=5e-07, decay=0.0001, momentum=0.99, actfn=relu: score=0.40000000596 | time=3.21294498444 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.40000000596 | time=3.48409605026 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.40000000596 | time=3.06573796272 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.40000000596 | time=3.17743301392 Epoch 00011: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=5e-06, decay=1e-05, momentum=0.99, actfn=relu: score=0.75 | time=3.32091283798 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=5e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.75 | time=3.04501795769 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=5e-06, decay=0.0001, momentum=0.99, actfn=relu: score=0.550000011921 | time=3.07281899452 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-05, decay=1e-05, momentum=0.99, actfn=relu: score=0.699999988079 | time=3.09461283684 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-05, decay=5e-05, momentum=0.99, actfn=relu: score=0.649999976158 | time=3.05219483376 Epoch 00007: early stopping architecture=[50, 800, 800, 500, 300, 2], lambda=1e-05, decay=0.0001, momentum=0.99, actfn=relu: score=0.40000000596 | time=3.06941986084 Best Config: architecture = [50, 50, 2], lambda = 1e-07, decay = 1e-05, momentum = 0.99, actfn = relu, best_acc = 0.899999976158 Mean Time = 2.0646273613seconds, |Models| = 75, Total Time = 154.847052097seconds
tutorials/jupyter/cbmpy_03_basic_functionality.ipynb
###Markdown CBMPy Tutorial 03 CBMPy basics Here I cover the basics functionality available in the CBMPy module. We have already encountered some of these int Tutorial 2. For more information please see the CBMPy reference guide (available from http://cbmpy.sourceforge.net).Additional files needed for this tutorial: NoneAs always we start by importing CBMPy ###Code import cbmpy ###Output ***** Using CPLEX ***** ###Markdown CBMPy modules CBMPy is designed using a flexible/modular architecture. Most of the functionality exists modules, while some of the more commonly used functions have been made available at the module level i.e. `cbmpy.*` In Jupityr and IPython it is possible to view all the functions/properties availalable in a module by typing `cbmpy.`. Here is a list of the modules, (they generaly start with CB) that are available for use: ```cbmpy.CBCPLEX cbmpy.CBPlot cbmpy.CBCommon cbmpy.CBQt4 cbmpy.CBConfig cbmpy.CBRead cbmpy.CBDataStruct cbmpy.CBReadtxtcbmpy.CBGUI cbmpy.CBSolvercbmpy.CBModel cbmpy.CBToolscbmpy.CBModelTools cbmpy.CBVersioncbmpy.CBMultiCore cbmpy.CBWritecbmpy.CBMultiEnv cbmpy.CBWxcbmpy.CBNetDB cbmpy.CBXML``` Similarly the functions in each module can also be displayed: `cbmpy.CBmodule.` Constants and utility functions For your convenience CBMPy also defines a number of often used constants. ###Code # infinity cbmpy.INF # negative infinity cbmpy.NINF # IEEE not a number cbmpy.NAN ###Output _____no_output_____ ###Markdown as well as utility functions and program information: ###Code # CBMPy SVN revision number and version cbmpy.rev cbmpy.__version__ # run all nose nose based unittests - note does not work in Jupyter environment #cbmpy.test() ###Output _____no_output_____ ###Markdown Common model analysis functions For ease of use shortcuts to some basic FBA functions are provided, First the model object is instantiated as `cmod`: ###Code # load a test model cmod = cbmpy.readSBML3FBC('cbmpy_test_core') ###Output core_memesa_model.l3.xml FBC version: 1 M.getNumReactions: 26 M.getNumSpecies: 22 FBC.getNumObjectives: 1 FBC.getNumGeneAssociations: 0 FBC.getNumFluxBounds: 52 Zero dimension compartment detected: Cell INFO: Active objective: objMaxJ25 Adding objective: objMaxJ25 SBML3 load time: 0.022 INFO: no standard gene encoding detected, attempting to load from annotations. INFO: used key(s) '[]' INFO: Added 0 new genes and 0 associations to model ###Markdown Now we optimize the model (run the FBA): ###Code # Optimize the model sol = cbmpy.doFBA(cmod) # print the result print('The optimum value of the objective function is: {}'.format(sol)) ###Output cplx_constructLPfromFBA time: 0.00699996948242 cplx_analyzeModel FBA --> LP time: 0.00800013542175 CPXPARAM_Read_DataCheck 1 Tried aggregator 1 time. LP Presolve eliminated 0 rows and 4 columns. Aggregator did 11 substitutions. Reduced LP has 8 rows, 11 columns, and 20 nonzeros. Presolve time = 0.00 sec. (0.02 ticks) Initializing dual steep norms . . . Iteration log . . . Iteration: 1 Dual objective = 1000.000000 INFO: Model is optimal: 1 Solution status = 1 : optimal Solution method = 2 : dual Objective value = 1.0 Model is optimal Status: LPS_OPT Model is optimal Model is optimal analyzeModel objective value: 1.0 The optimum value of the objective function is: 1.0 ###Markdown Note how the function returns the value of the objective function. In the Tutorial 4 we will see an alternative ways to get this value. Note there are various arguments that can be used with the doFBA function, see the docstring for more details: ###Code help(cbmpy.doFBA) ###Output Help on function cplx_analyzeModel in module cbmpy.CBCPLEX: cplx_analyzeModel(f, lpFname=None, return_lp_obj=False, with_reduced_costs='unscaled', with_sensitivity=False, del_intermediate=False, build_n=True, quiet=False, oldlpgen=False, method='o') Optimize a model and add the result of the optimization to the model object (e.g. `reaction.value`, `objectiveFunction.value`). The stoichiometric matrix is automatically generated. This is a common function available in all solver interfaces. By default returns the objective function value - *f* an instantiated PySCeSCBM model object - *lpFname* [default=None] the name of the intermediate LP file. If not specified no LP file is produced - *return_lp_obj* [default=False] off by default when enabled it returns the CPLEX LP object - *with_reduced_costs* [default='unscaled'] calculate and add reduced cost information to mode this can be: 'unscaled' or 'scaled' or anything else which is interpreted as 'None'. Scaled means s_rcost = (r.reduced_cost*rval)/obj_value - *with_sensitivity* [default=False] add solution sensitivity information (not yet implemented) - *del_intermediate* [default=False] redundant except if output file is produced and deleted (not useful) - *build_n* [default=True] generate stoichiometry from the reaction network (reactions/reagents/species) - *quiet* [default=False] suppress cplex output - *method* [default='o'] choose the CPLEX method to use for solution, default is automatic. See CPLEX reference manual for details - 'o': auto - 'p': primal - 'd': dual - 'b': barrier (no crossover) - 'h': barrier - 's': sifting - 'c': concurrent
scripts/demos/Intro_to_interact.ipynb
###Markdown An introduction to Interact"The interact function (ipywidgets.interact) automatically creates user interface (UI) controls for exploring code and data interactively. It is the easiest way to get started using IPython’s widgets." [Using Interact Documentation](https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html) First let's import the libraries we need: ###Code # imports import numpy as np %matplotlib inline import matplotlib.pyplot as plt from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets from bruges.filters.wavelets import ricker, gabor, sinc, cosine ###Output _____no_output_____ ###Markdown Why would we want to use Interact in the first place?  An _interactive_ image speaks a thousand words... ###Code import plotter ###Output _____no_output_____ ###Markdown Basic interactLet's jump in: for a basic function `vp_from_dt` that prints out `Vp` for a given `DT`, we can construct a simple slider like so: ###Code def vp_from_dt(dt): """vp = 10e6 / dt""" return print(f'vp = {10e6/dt:.2f}') interact(vp_from_dt, dt=3500) ###Output _____no_output_____ ###Markdown For a similar function that requires a `boolean`, we can make a toggle, also notice that by saving the `interact()` function to a `name`, we suppress the output ``.We have also created a dropdown list automatically by passing an `iterable` to the `interact()` function. ###Code vp = np.arange(2400, 2750, 50) def convert_vp_to_dt(vp, convert_to_dt): if convert_to_dt: output = 1e6/vp else: output = vp return print(f'The result is {output:.2f}') my_boolean = interact(convert_vp_to_dt, vp=vp, convert_to_dt=True) ###Output _____no_output_____ ###Markdown If we need to get input from the user, we can use an `input box`, (see [here](https://stackoverflow.com/questions/35361038/using-ipython-ipywidget-to-create-a-variable) for example):- We first define variables for the `input` and `saved` variables- Then we define a `get_input()` function that will assign the input value to the saved variable- Next we call the `on_submit()` function on the `input variable`, passing it the `get_input()` function ###Code input_string = widgets.Text(placeholder='Please type something in this box') saved_string = widgets.Text() def get_input(input_text): """bind the input text to a variable""" saved_string.value = input_string.value return saved_string input_string.on_submit(get_input) ###Output _____no_output_____ ###Markdown We can now use the `input_variable` to see an input box: ###Code input_string ###Output _____no_output_____ ###Markdown And the `output_variable` will be updated `on_submit()`: ###Code saved_string ###Output _____no_output_____ ###Markdown We now have access to the `output_variable` and can saved it to a string for manipulation: ###Code my_string = saved_string.value my_string.upper() ###Output _____no_output_____ ###Markdown Also note that the `saved_variable` remains bound to the `input_variable`, so if you type something else in the `input_variable` box, the `saved_variable` is updated. The reverse is _not_ true however. There are many widget types available:- [Numeric widgets](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlNumeric-widgets)- [Boolean widgets](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlBoolean-widgets)- [Selection widgets](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlSelection-widgets)- [String widgets](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlString-widgets)- [Image](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlImage)- [Button](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlButton)- [Output](https://ipywidgets.readthedocs.io/en/latest/examples/Output%20Widget.html)- [Play (Animation) widget](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlPlay-(Animation)-widget)- [Date picker](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlDate-picker)- [Color picker](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlColor-picker)- [Controller](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlController)- [Container/Layout widgets](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlContainer/Layout-widgets)Almost all have different `keyword arguments` that can be set as in the `IntSlider` example below: ###Code widgets.IntSlider( value=12, min=0, max=100, step=1, description='Slider:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d' ) ###Output _____no_output_____ ###Markdown ExerciseTry to replicate the range slider below using a min of 0 and a max of 20.Once you've got it working, see what changes you can make to it.If you don't know where to start, you'll find all the widgets [here](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.htmlWidget-List). ###Code # your code here widgets.IntRangeSlider( value=[3, 12], min=0, max=20, step=1, description='Range:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d', ) ###Output _____no_output_____ ###Markdown Interact usage`interact` can also be used as a `decorator`. Interact [decorators](https://wiki.python.org/moin/PythonDecoratorsWhat_is_a_Decorator) allow you expand the functionality of your function and interact with it in a single shot. As this `square_or_double()` example function shows, interact also works with functions that have multiple arguments. [source](https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.htmlBasic-interact) ###Code method = widgets.RadioButtons(options=['squared','doubled'],description='option') y = widgets.IntSlider(value=5,min=0,max=10,step=1,description='y') @interact(method=method, y=y) def square_or_double(method, y): if method == 'squared': result = y**2 else: result = y*2 return print(f'{y} {method} = {result}') ###Output _____no_output_____ ###Markdown ExerciseWrite a function that returns `a` to the power of `b` but use the interact decorator to make both `a` and `b` interactive (between 0 and 10 in steps of 1), add a toggle to negate the result. ###Code # your code here @interact(a=widgets.IntSlider(value=2,min=0,max=10,step=1,description='a'), b=widgets.IntSlider(value=4,min=0,max=10,step=1,description='b'), negate=widgets.Checkbox(value=False,description='negate')) def pow_a_b(a, b, negate): """return a to the power of b or negative a**b""" if negate: out = -a**b else: out = a**b return out @interact(a=(0,10,1), b=(0,10,1), negate=False) def pow_a_b(a, b, negate): """return a to the power of b or negative a**b""" if negate: out = -a**b else: out = a**b return out ###Output _____no_output_____ ###Markdown Worked ExampleLet's build an example of an interactive wavelet using [Bruges](https://github.com/agile-geoscience/bruges), we'll use:- [Ricker](https://github.com/agile-geoscience/bruges/blob/master/bruges/filters/wavelets.py)- [Gabor](https://github.com/agile-geoscience/bruges/blob/master/bruges/filters/wavelets.py)- [sinc](https://github.com/agile-geoscience/bruges/blob/master/bruges/filters/wavelets.py)- [cosine](https://github.com/agile-geoscience/bruges/blob/master/bruges/filters/wavelets.py) ###Code w, t = ricker(duration=0.128, dt=0.001, f=25, return_t=True) fig, ax = plt.subplots(figsize=(15, 6), ncols=1) ax.plot(t, w) ax.grid() ax.set_title(f'ricker wavelet - frequency=25') plt.show() ###Output _____no_output_____ ###Markdown ExerciseLet's turn this into an interactive function:- first define a function- copy the code above into that function- use an interact decorator and widget to have frequency by a slider (allow a range from 1Hz to 75Hz in steps of 1Hz)Remember to correct the title. ###Code # your code here @interact(frequency=widgets.IntSlider(value=25,min=1,max=75,step=1)) def plot_filter(frequency): w, t = ricker(duration=0.128, dt=0.001, f=frequency, return_t=True) fig, ax = plt.subplots(figsize=(15, 6), ncols=1) ax.plot(t, w) ax.grid() ax.set_title(f'ricker wavelet - frequency={frequency}') plt.show() return ###Output _____no_output_____ ###Markdown ExerciseNow let's allow the user to pass both duration _and_ dt as interactive arguments, using your code above:- add two more arguments to the function- define these arguments `duration` and `dt` as `Interact.widgets`For `duration` use a value 0.256 seconds with a minimum of 0.04 seconds, a maximum of 0.512 seconds and steps of 0.004 seconds.For `dt` use a value 0.001 seconds with a minimum of 0.0001 seconds, a maximum of 0.008 seconds and steps of 0.0001 seconds. You may want to use the `**kwargs` `readout_format='.4f'` for `dt`.N.B.: you can optionally add `continuous_update=False` to the arguments of your `widgets` in order to avoid 'choppy' display when you move the sliders. ###Code # your code here @interact(frequency=widgets.IntSlider(value=25,min=1,max=75,step=1,continuous_update=False), duration=widgets.FloatSlider(value=0.256,min=0.04,max=0.512,step=0.004,continuous_update=False), dt=widgets.FloatSlider(value=0.001,min=0.0001,max=0.008, step=0.0001,continuous_update=False, readout_format='.4f')) def plot_filter(frequency,duration,dt): w, t = ricker(duration=duration, dt=dt, f=frequency, return_t=True) fig, ax = plt.subplots(figsize=(15, 6), ncols=1) ax.plot(t, w) ax.grid() ax.set_title(f'ricker wavelet - frequency={frequency}') plt.show() return ###Output _____no_output_____ ###Markdown ExerciseNow let's see if we can fill the wavelet between zero and positive values of the wavelet, for this you can use the matplotlib function `.fill_between()`, you might need to read the [docs](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.fill_between.html) or look at an [example](https://matplotlib.org/examples/pylab_examples/fill_between_demo.html) to figure out how to use this function. ###Code # your code here @interact(frequency=widgets.IntSlider(value=25,min=1,max=75,step=1,continuous_update=False), duration=widgets.FloatSlider(value=0.256,min=0.04,max=0.512,step=0.004,continuous_update=False), dt=widgets.FloatSlider(value=0.001,min=0.0001,max=0.008, step=0.0001,continuous_update=False, readout_format='.4f'), filled=widgets.Checkbox(value=True,description='fill wavelet',disabled=False) ) def plot_filter(frequency,duration,dt,filled): w, t = ricker(duration=duration, dt=dt, f=frequency, return_t=True) fig, ax = plt.subplots(figsize=(15, 6), ncols=1) ax.plot(t, w) ax.grid() ax.set_title(f'ricker wavelet - frequency={frequency}') # define fill_between() parameters x_min = -duration / 2 x_max = duration / 2 x = np.arange(x_min, x_max, dt) if filled: ax.fill_between(x, 0, w, where=w > 0, color='k') plt.show() return ###Output _____no_output_____ ###Markdown ExerciseFinally, let's see if we can add a choice of wavelets to the function, so that the user can choose between 'ricker', 'gabor', 'sinc' and 'cosine' for example (these all have the same input parameters), there are different ways to achieve this for example using a `ToggleButtons` or a `Select` widget.Once again, remember to correct the title. ###Code # your code here FUNCS={'ricker': ricker,'gabor': gabor,'sinc': sinc,'cosine': cosine} @interact(wavelet=widgets.ToggleButtons(options=FUNCS,description='wavelet',button_style='success'), frequency=widgets.IntSlider(value=25,min=1,max=75,step=1,continuous_update=False), duration=widgets.FloatSlider(value=0.256,min=0.04,max=0.512,step=0.004,continuous_update=False), dt=widgets.FloatSlider(value=0.001,min=0.0001,max=0.008, step=0.0001,continuous_update=False, readout_format='.4f'), filled=widgets.Checkbox(value=True,description='fill wavelet',disabled=False) ) def plot_filter(wavelet, frequency, duration, dt, filled): w, t = wavelet(duration=duration, dt=dt, f=frequency, return_t=True) fig, ax = plt.subplots(figsize=(15, 6), ncols=1) ax.plot(t, w) ax.grid() ax.set_title(f'{wavelet.__name__} wavelet - frequency={frequency}') # define fill_between() parameters x_min = -duration / 2 x_max = duration / 2 x = np.arange(x_min, x_max, dt) if filled: ax.fill_between(x, 0, w, where=w > 0, color='k') plt.show() return # A final version with all formatting FUNCS={'ricker': ricker,'gabor': gabor,'sinc': sinc,'cosine': cosine} @interact(wavelet=widgets.ToggleButtons(options=FUNCS.keys(),description='wavelet',button_style='success'), duration=widgets.FloatSlider(value=0.256,min=0.04,max=0.512,step=0.004, description='duration', continuous_update=False, readout_format='.3f'), dt=widgets.FloatSlider(value=0.001,min=0.0001,max=0.008,step=0.0001, description='dt', continuous_update=False, readout_format='.4f'), frequency=widgets.IntSlider(value=25,min=1,max=75,step=1, description='frequency', continuous_update=False, readout_format='d'), filled=widgets.Checkbox(value=True,description='fill wavelet',disabled=False) ) def plot_filter(wavelet, duration, dt, frequency, filled): """ Plot a filter: Args: function (function): one of ['ricker', 'gabor', 'sinc', 'cosine'] duration (float): The length in seconds of the wavelet. dt (float): The sample interval in seconds. frequency (ndarray): Dominant frequency of the wavelet in Hz. fill (boolean): whether the filter plot is filled between 0 and wavelet. Returns: ndarray. {function} wavelet with centre frequency 'frequency' sampled on t. """ # call the wavelet function w, t = FUNCS[wavelet](duration, dt, f=frequency, return_t=True) # create the plot fig, ax = plt.subplots(figsize=(15, 6), ncols=1) ax.plot(t, w, color='black') ax.grid() ax.set_title(f'{wavelet} wavelet, frequency={frequency}, duration={duration}, dt={dt}') # define fill_between() parameters x_min = -duration / 2 x_max = duration / 2 x = np.arange(x_min, x_max, dt) # fill wavelet if filled: ax.fill_between(x, 0, w, where=w > 0, color='k') # show the plot plt.show() return ###Output _____no_output_____ ###Markdown SummaryLet's summarise by looking at the initial reason we looked at interact: ###Code @interact( colormap=['viridis', 'plasma', 'inferno', 'magma', 'Greys', 'Greys_r'], section=widgets.RadioButtons(options=['inline', 'xline', 'timeslice'], value='inline',description='slicer',disabled=False), inline=widgets.IntSlider(value=300,min=0,max=600,step=1, continuous_update=False,description='<font color="red">inline</>'), xline=widgets.IntSlider(value=240,min=0,max=480,step=1, continuous_update=False,description='<font color="green">xline</>'), timeslice=widgets.IntSlider(value=125,min=0,max=250,step=1, continuous_update=False,description='<font color="blue">timeslice</>'), ) def seismic_plotter(colormap, section, inline, xline, timeslice): """Plot a chosen seismic ILine, XLine or Timeslice with a choice of colormaps""" # load a volume vol = np.load('../../data/Penobscot_0-1000ms.npy') # sections dictionary sections = { 'inline': {'amp': vol[inline,:,:].T, 'line': inline, 'shrink_val': 0.6, 'axhline_y': timeslice, 'axhline_c': 'b', 'axvline_x': xline, 'axvline_c': 'g', 'axspine_c': 'r'}, 'xline': {'amp': vol[:,xline,:].T, 'line': xline, 'shrink_val': 0.5, 'axhline_y': timeslice, 'axhline_c': 'b', 'axvline_x': inline, 'axvline_c': 'r', 'axspine_c': 'g'}, 'timeslice': {'amp': vol[:,:,timeslice], 'line': timeslice, 'shrink_val': 0.95, 'axhline_y': xline, 'axhline_c': 'g', 'axvline_x': inline, 'axvline_c': 'r', 'axspine_c': 'b'}, } # scale amplitudes ma = np.percentile(vol, 98) # plot figure fig, ax = plt.subplots(figsize=(18, 6), ncols=1) sec = sections[section] im = ax.imshow(sec['amp'], aspect=0.5, vmin=-ma, vmax=ma, cmap=colormap) ax.set_title(f'Penobscot_0-1000ms {section} {sec["line"]}') plt.colorbar(im, ax=ax, shrink=sec['shrink_val']).set_label(colormap) # add projected lines ax.axhline(y=sec['axhline_y'], linewidth=2, color=sec['axhline_c']) ax.axvline(x=sec['axvline_x'], linewidth=2, color=sec['axvline_c']) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2) ax.spines[axis].set_color(sec['axspine_c']) plt.show() return ###Output _____no_output_____
dev/_downloads/cfc20c17238f93690fc049d714cab718/plot_read_inverse.ipynb
###Markdown Reading an inverse operatorThe inverse operator's source space is shown in 3D. ###Code # Author: Alexandre Gramfort <[email protected]> # # License: BSD (3-clause) import mne from mne.datasets import sample from mne.minimum_norm import read_inverse_operator from mne.viz import set_3d_view print(__doc__) data_path = sample.data_path() subjects_dir = data_path + '/subjects' fname_trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif' inv_fname = data_path inv_fname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' inv = read_inverse_operator(inv_fname) print("Method: %s" % inv['methods']) print("fMRI prior: %s" % inv['fmri_prior']) print("Number of sources: %s" % inv['nsource']) print("Number of channels: %s" % inv['nchan']) src = inv['src'] # get the source space # Get access to the triangulation of the cortex print("Number of vertices on the left hemisphere: %d" % len(src[0]['rr'])) print("Number of triangles on left hemisphere: %d" % len(src[0]['use_tris'])) print("Number of vertices on the right hemisphere: %d" % len(src[1]['rr'])) print("Number of triangles on right hemisphere: %d" % len(src[1]['use_tris'])) ###Output _____no_output_____ ###Markdown Show result on 3D source space ###Code fig = mne.viz.plot_alignment(subject='sample', subjects_dir=subjects_dir, trans=fname_trans, surfaces='white', src=src) set_3d_view(fig, focalpoint=(0., 0., 0.06)) ###Output _____no_output_____
datasets/CTC/NoisyDataPrep.ipynb
###Markdown Noise2Void Data Prep ###Code X_trainN2V=np.concatenate((X_train,X_test[:,:768,:736])) X_valN2V=X_val print(X_train10.shape) print(X_test10.shape) X_trainN2V10=np.concatenate((X_train10,X_test10[:,:768,:736])) X_valN2V10=X_val10 X_trainN2V20=np.concatenate((X_train20,X_test20[:,:768,:736])) X_valN2V20=X_val20 np.savez_compressed("/Volumes/Data/StarVoid/Sim/NumpyData/TrainValN2V.npz", X_train=X_trainN2V, X_val=X_valN2V) np.savez_compressed("/Volumes/Data/StarVoid/Sim/NumpyData/TrainValN2V10.npz", X_train=X_trainN2V10, X_val=X_valN2V10) np.savez_compressed("/Volumes/Data/StarVoid/Sim/NumpyData/TrainValN2V20.npz", X_train=X_trainN2V20, X_val=X_valN2V20) ###Output (102, 768, 736) (30, 773, 739)
chapter2/2.1.1.pytorch-basics-tensor.ipynb
###Markdown 标量 ###Code scalar = torch.tensor(3.1415926535) scalar.size(),scalar.item() ###Output _____no_output_____ ###Markdown 如上所示可以发现,使用item,存在精度的丢失问题 Numpy转换 Tensor和numpy对象共享内存,他们之间的转换很快。如果其中一个变了,另外一个也会随之改变。torch的tensor可转成numpy后,使用的还是原来的存储位置 ###Code a : torch.Tensor = torch.randn(3,4) id(a) b = torch.randn(3,4) a_np = a.numpy() id(a_np),type(a_np) id(a[0]),id(a_np[0]) ###Output _____no_output_____ ###Markdown 有梯度的Tensor,转换成numpy ###Code _ = torch.randn(3,3,requires_grad=True) _.numpy() _.detach().numpy() ###Output _____no_output_____ ###Markdown 他们间共享的内存无法通过id(),查看 ###Code c = torch.from_numpy(a_np) c.cuda() type(c) c_cuda = c.cuda() type(c_cuda) d = torch.eye(3) d.cuda() d.type() ###Output _____no_output_____
notebooks/data_prep-3.ipynb
###Markdown ###Code df[(df.file_date == '2020-02-22') | (df.file_date == '2020-02-23') | (df.file_date == '2020-02-24') | (df.file_date == '2020-02-25')].sort_values(['country', 'province', 'date']) day21 = pd.read_csv(os.path.join(DATA, '02-21-2020.csv'), index_col=None, header=0, parse_dates=['Last Update']) day21.Confirmed.sum() # day21 day22 = pd.read_csv(os.path.join(DATA, '02-22-2020.csv'), index_col=None, header=0, parse_dates=['Last Update']) day22.Confirmed.sum() day23 = pd.read_csv(os.path.join(DATA, '02-23-2020.csv'), index_col=None, header=0, parse_dates=['Last Update']) day23.Confirmed.sum() # day23.sort_values('Country/Region') day24 = pd.read_csv(os.path.join(DATA, '02-24-2020.csv'), index_col=None, header=0, parse_dates=['Last Update']) day24.Confirmed.sum() # day24.sort_values('Country/Region') day25 = pd.read_csv(os.path.join(DATA, '02-25-2020.csv'), index_col=None, header=0, parse_dates=['Last Update']) day25.Confirmed.sum() # day24['Last Update'] = day24['Last Update'].astype(str) # day24[~day24['Last Update'].str.contains('2020-02-24', regex= True, na=False)].Confirmed.sum() # day24.Confirmed.sum() - day23.Confirmed.sum() # day25.Confirmed.sum() - day24.Confirmed.sum() df_merged = pd.concat([day23, day24]) df_merged = df_merged.sort_values(['Country/Region', 'Province/State']) df_merged ###Output _____no_output_____ ###Markdown ###Code ''' Calculate the number of people that are ACTUALLY infected on a given day currently infected = sum of people date - (recovored + died) ex: 5 = 10 - (4 - 1) ''' current_infected = pd.DataFrame([]) current_infected['currently_infected'] = (df.groupby('date').confirmed.sum() - \ (df.groupby('date').deaths.sum() + df.groupby('date').recovered.sum())) current_infected['delta'] = (current_infected['currently_infected'] - df.groupby('date').confirmed.sum()) daily_cases_df = pd.merge(daily_cases_df, current_infected, how='outer', on='date') #Create date of extraction folder save_dir = './data/' + str(datetime.date(datetime.now())) print('Saving to data subdirectory...') print('...', save_dir) if not os.path.exists(save_dir): os.mkdir(save_dir) print('Saving...') file_name = 'agg_data_{}.parquet.gzip'.format(datetime.date(datetime.now())) df.astype(str).to_parquet(os.path.join(save_dir, file_name), compression='gzip') print('...', file_name) csv_file_name = 'agg_data_{}.csv'.format(datetime.date(datetime.now())) df.astype(str).to_csv(os.path.join(save_dir, csv_file_name)) print('...', csv_file_name) daily_cases_file_name = 'trend_{}.csv'.format(datetime.date(datetime.now())) daily_cases_df.astype(str).to_csv(os.path.join(save_dir, daily_cases_file_name)) print('...', daily_cases_file_name) print('Done!') ###Output Saving to data subdirectory... ... ./data/2020-02-18 Saving... ... agg_data_2020-02-18.parquet.gzip ... agg_data_2020-02-18.csv ... trend_2020-02-18.csv Done!
HW4_AML71_Modjeska_Murphy_FFNN.ipynb
###Markdown CSCI E-82 - Advanced Machine Learning, Data Mining and Artificial Intelligence===== Homework 4 - Deep Learning - Dog Breed Identification *AML71: David Modjeska and Dominic Murphy*------ 0. Introduction The goal of this homework assignment and Kaggle competition is to predict the breeds for a set of dog photos. The dataset originates with ImageNet, as cleaned up by Stanford University, so the foundation for prediction was solid. Our team created two models to address this challenge, as instructed: a feed-forward neural network (FFNN) and a convolutional neural network (CNN).To prepare for modeling, we pre-processed the images in a number of steps:1. Resized all images, and converted images for the FFNN to grayscale2. Encoded the class labels2. Divided the training data into training and validation subsets3. Rebalanced the training data subset through image flipping and upsampling4. Augmented the FFNN images through image sharpening, rotation, and warping5. Reduced the FFNN images through PCA6. Saved the pre-processed images and related data into pickles for future re-useOn a high level, the FFNN consists of 8 hidden layers with RELU activation, supplemented by dropout and normalization layers. The CNN consists of the well-known VGG16 model, extended and retrained with additional convolutional, dropout, and hidden layers. The FFNN was implemented in TensorFlow for learning purposes, while the CNN was implemented in Keras for classification accuracy. Results showed that the CNN model achieved fairly well on Kaggle - scoring in the middle of the field, broadly speaking. Multiclass log loss was approximately 2.8. The best FFNN model had a best validation accuracy of 5.99%, and a wide range of modeling techniques were explored and documented. The narrative for this model captures the details below.The submission notebooks are cloned to the extent possible for the two models, FFNN and CNN. The shared sections include all pre-processing steps and the metrics structure (and helper functions). The code re-use attained through this discipline paid off in time savings towards the latter half of the assignment. Moreover, team communications benefitted from a shared understanding, vocabulary, and requirements. 1. Set up environment Load packages and set display options ###Code import cv2 import math import numpy as np import pandas as pd import os import re import random import seaborn as sns import tensorflow as tf import h5py from os import path from scipy import ndimage, misc from six.moves import cPickle as pickle from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split as tt_split from sklearn.metrics import hamming_loss, f1_score, roc_curve, auc, log_loss, \ classification_report from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.utils import resample, shuffle # import keras # from keras.models import Sequential, load_model # from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, ZeroPadding2D # from keras.optimizers import SGD, Adam # from keras import backend as K # from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint, LearningRateScheduler ,ReduceLROnPlateau # from keras.applications.vgg16 import VGG16, preprocess_input #from keras import metrics import matplotlib %matplotlib inline import matplotlib.pyplot as plt from IPython.display import display, HTML, Markdown, Image %matplotlib inline cache_dir = '' def my_display(str): display(HTML(str)) def my_display_df(df): display(HTML(df.to_html(index=False))) def my_linespace(): my_display('<p>') plt.rcParams["patch.force_edgecolor"] = True plt.style.use('ggplot') palette = sns.color_palette(palette = 'deep') sns.set_palette(palette) ###Output _____no_output_____ ###Markdown Set global constants for modeling ###Code # input image dimensions img_rows, img_cols = 128, 128 # smaller batch size means noisier gradient, but more updates per epoch #Dom: keep high for laptop, lower for AWS batch_size = 64 # number of iterations over the complete training data epochs = 10 # how to split the training data among training vs. validation sets train_ratio = 0.75 # limit the number of images that are processed during project's model exploration phase n_image_cap = np.inf # number of principal components to use in data reduction # this number must be a square in order for sample image display to work n_pcs_to_use = 256 # model_type can be either 'cnn' or 'ffnn' model_type = 'ffnn' cache_dir = '' ###Output _____no_output_____ ###Markdown 2. Load and Explore Class Labels Load and clean class labels ###Code labels_data = pd.read_csv( './labels.csv', names = ['kaggle_id', 'breed'], header = 0) labels_data.breed = labels_data.breed.str.title() ###Output _____no_output_____ ###Markdown Snippet of training data ###Code my_linespace() my_display_df(labels_data.head()) ###Output _____no_output_____ ###Markdown Sample of class labels ###Code class_labels = labels_data.breed.unique() print() print(*class_labels[:20], sep = ', ') ###Output Boston_Bull, Dingo, Pekinese, Bluetick, Golden_Retriever, Bedlington_Terrier, Borzoi, Basenji, Scottish_Deerhound, Shetland_Sheepdog, Walker_Hound, Maltese_Dog, Norfolk_Terrier, African_Hunting_Dog, Wire-Haired_Fox_Terrier, Redbone, Lakeland_Terrier, Boxer, Doberman, Otterhound ###Markdown Summary of image counts by breed In the summary statistics, histogram, and bar chart below, the most interesting thing to notice is the class is the class imbalance: class labels per breed span a range from 66 through 120, with a mean around 85. Clearly some sort of upsampling or rebalancing is required. Our approach will be explained below in connection with image flipping. ###Code counts = labels_data.breed.value_counts() counts_df = pd.DataFrame({'breed':counts.index, 'count':counts}).reset_index(drop = True) counts_df.describe() print() plt.figure(figsize = (8, 5)) sns.distplot(a = counts_df['count'], bins = 15, hist = True, kde = False, hist_kws={"alpha": 0.9}) plt.xlabel('Breed count') plt.ylabel('Frequency') plt.title('Distribution of image counts by breed') plt.show() print() plt.figure(figsize = (8, 16)) ax = plt.subplot(1, 1, 1) sns.barplot(y = 'breed', x = 'count', data = counts_df) plt.title('Count of images by breed') ax.set_yticklabels(labels = counts_df.breed, fontsize=9) plt.show() ###Output ###Markdown 3. Load and Preprocess Images Load and resize images The training or testing images are loaded from disk in one large code loop per set, in the code below. After loading each image, it is resized to 128 x 128 pixels and reshaped for modeling. Images for the FFNN are also converted to grayscale, as a kind of 'smart' flattening to one layer. Each image is added to a master list after this processing, and each image's ID is taken from the filename and inserted into a parallel list. This ID list will be used to synchronize the class labels data with the loaded imagery. Finally, each image is normalized through division by 255, in order to ensure that all data values lie between 0.0 and 1.0. ###Code def load_process_images(image_dir): is_color = (model_type == 'cnn') #----- prepare for loading images = [] image_ids = [] image_dir = './' + image_dir + '/' image_filenames = os.listdir(image_dir) if (image_dir is 'train') and (labels_data.shape[0] != len(image_filenames)): print("Number of training labels doesn't match number of training images on disk") #----- load and resize images n_images = min(n_image_cap, len(image_filenames)) for i in range(n_images): # get image and id from disk filename = image_filenames[i] image = cv2.imread(image_dir + filename) id = re.sub('.jpg', '', filename) image = cv2.resize(image, (img_rows, img_cols)) if is_color: image = np.array(image).reshape((3, img_rows, img_cols)) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # add image and id to list images.append([image]) image_ids.append(id) #----- collect the image list into an array x = np.array(images) if is_color: # add new dimension to array x = x[:, 0, :, :] images = None image_ids_df = pd.DataFrame({'id': image_ids }) if is_color: # assumes TensorFlow as backend, so scalar for # dimensions is last x = x.reshape(x.shape[0], img_rows, img_cols, 3) input_shape = (img_rows, img_cols, 3) else: x = x.reshape(x.shape[0], img_rows, img_cols) input_shape = (img_rows, img_cols) #----- center and normalize images # normalize image values to [0,1] (Keras sample code doesn't center) x = x.astype('float32') x /= 255 # standardization # display status print() print('After loading ' + image_dir + ' directory:') print('\tLoaded', len(image_ids), 'images from disk') print('\tx shape:', x.shape) print('\t' + str(x.shape[0]) + ' samples processed') return(x, image_ids, input_shape) ###Output _____no_output_____ ###Markdown Encode y The class labels for the training data are loaded separately. These labels are then inner-joined with the already-loaded images, using image ID as the join key. After this merge, class labels are encoded as digits and then one-hot encoded for modeling. ###Code # construct y for classification (TO DO: verify synchronization of labels with images) def encode_y(x_ids, labels_data): x_ids_df = pd.DataFrame(x_ids, columns = ['id']) y_0 = pd.merge(left = x_ids_df, right = labels_data , left_on = 'id', right_on = 'kaggle_id') y_0 = y_0.breed.values label_encoder = LabelEncoder() label_encoder.fit(y_0) y_1 = label_encoder.transform(y_0).reshape(-1, 1) one_hot_encoder = OneHotEncoder(sparse = False) one_hot_encoder.fit(y_1) y = one_hot_encoder.transform(y_1) num_labels = y.shape[1] return(y, label_encoder, one_hot_encoder, num_labels) ###Output _____no_output_____ ###Markdown Split training data into train and validation sets ###Code # split training x and y into train and validation sets def split_my_training_data(x, y): x_train, x_valid, y_train, y_valid, = tt_split(x, y, train_size = train_ratio, test_size = 1.0 - train_ratio, shuffle = True) print() print('After splitting training data:') print() print('\tx_train shape: ' + str(x_train.shape)) print('\tx_valid shape: ' + str(x_valid.shape)) print() print('\ty_train shape: ' + str(y_train.shape)) print('\ty_valid shape: ' + str(y_valid.shape)) return(x_train, x_valid, y_train, y_valid) ###Output _____no_output_____ ###Markdown Rebalance the classes with up-sampling from flipped images In order to address the two-to-one class imabalance, we upsampled the minority classes to the size of the majority class through image flipping. Color images were flipped using the opencv package, while B/W images were flipped using NumPy. The resulting images were appended to the training data, along with the relevant class labels. We should note that neither the validation data nor the testing data were touched during this upsampling process. ###Code #move from one hot encoding back to single column per row with class index number def one_hot_decode(y): return(np.argmax(y, axis = -1)) # adds flipped images to minority classes to balance the training set # Example: x_train, y_train = class_balance_with_image_flip( x_train, y_train ) # TO DO: for very unbalanced classes, oversample using both flipped and original images def class_balance_with_image_flip( x_train, y_train, is_color = True): #move from one hot encoding back to single column per row with class index number y_train_decode = one_hot_decode(y_train) #get counts by class index counts = np.unique(y_train_decode, return_counts=True) #find number of images are in the majority class max_class_count = np.max(counts[1]) flipped_x_list = [] flipped_y_idx_list = [] for i_class in counts[0]: #get original images for the class class_imgs = x_train[np.where(y_train_decode == i_class )] #find the number of images we needed to balance the class n_original = len(class_imgs) n_target = max_class_count - n_original #Flip the original images to create new images i_row = 0 for i in range(n_target): #horizontaly flip the original image if is_color : image = cv2.flip( class_imgs[i_row], 1 ) image = np.array(image).reshape((img_rows, img_cols, 3)) else: #BW image = np.flip(class_imgs[i_row], axis = 1) image = np.array(image).reshape((img_rows, img_cols)) flipped_x_list.append(image) flipped_y_idx_list.append(i_class) #loop back to first class image if we have exceed the number of original images i_row += 1 if i_row >= n_original: i_row = 0 if len(flipped_x_list) > 0: #gather flipped images x_flipped = np.array(flipped_x_list) y_flipped_decode = np.array(flipped_y_idx_list) # cast list to array y_flipped = one_hot_encoder.transform(y_flipped_decode.reshape(-1, 1) ) #append to end of training set y_train = np.concatenate((y_train, y_flipped), axis=0) x_train = np.concatenate((x_train, x_flipped), axis=0) #reshuffle to mix the original and flipped images x_train, y_train = shuffle(x_train, y_train, random_state = 0) print() print (len(flipped_x_list),' image(s) flipped and added') return(x_train, y_train); ###Output _____no_output_____ ###Markdown Add transformed images to training data ###Code # from https://www.cc.gatech.edu/classes/AY2015/cs4475_summer/documents/sharpen.py def sharpen_image(image): image2 = image # Create the identity filter, but with the 1 shifted to the right kernel = np.zeros( (9,9), np.float32) kernel[4,4] = 2.0 # Identity times two # Create a box filter: boxFilter = np.ones( (9,9), np.float32) / 81.0 # Subtract the two: kernel = kernel - boxFilter # Note that we are subject to overflow and underflow here...but I believe that # filter2D clips top and bottom ranges on the output, plus you'd need a # very bright or very dark pixel surrounded by the opposite type. image2 = cv2.filter2D(image, -1, kernel) return(image2) def rotate_image(image): degrees = 5 M = cv2.getRotationMatrix2D((img_cols/2, img_rows/2), degrees, 1) image2 = cv2.warpAffine(image, M, (img_cols, img_rows)) return(image2) # from https://docs.opencv.org/3.1.0/da/d6e/tutorial_py_geometric_transformations.html def warp_image(image): points1 = np.float32([[25,25],[100,25],[25,100]]) # points2 = np.float32([[5,50],[100,25],[50,125]]) points2 = np.float32([[15,37],[100,25],[37,110]]) M = cv2.getAffineTransform(points1, points2) image2 = cv2.warpAffine(image, M, (img_cols, img_rows)) return(image2) # TO DO: adapt for color images also def add_transformed_images(x_train, y_train): sharpened_x_list = [] # add sharpened images for i in range(x_train.shape[0]): image2 = sharpen_image(x_train[i, :]) sharpened_x_list.append(image2) # add rotated images for i in range(x_train.shape[0]): image3 = rotate_image(x_train[i, :]) sharpened_x_list.append(image3) # add warped images for i in range(x_train.shape[0]): image4 = rotate_image(x_train[i, :]) sharpened_x_list.append(image4) x_sharpened = np.array(sharpened_x_list) x_train = np.vstack((x_train, x_sharpened)) # adjust y for added images y_train = np.vstack((y_train, y_train, y_train, y_train)) x_train, y_train = shuffle(x_train, y_train, random_state = 0) print() print (len(sharpened_x_list),' image(s) transformed and added') return(x_train, y_train); # image_dir = 'train/' # image_filenames = os.listdir(image_dir) # for i in range(1): # filename = image_filenames[i] # image = cv2.imread(image_dir + filename) # image = cv2.resize(image, (img_rows, img_cols)) # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) # plt.imshow(image) # plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis # plt.show() # image2 = sharpen_image(image) # plt.imshow(image2) # plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis # plt.show() # image3 = rotate_image(image) # plt.imshow(image3) # plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis # plt.show() # image4 = warp_image(image) # plt.imshow(image4) # plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis # plt.show() ###Output _____no_output_____ ###Markdown Reduce FFNN training data using PCA Because the B/W images can be linearized for efficient modeling, it makes sense to apply PCA for both data reduction and a sort of regularization. Accordingly, a number of principal components was found that explained approximately 80% of the original variance, while reducing the actual data size by approximately 98%. (The scree plot below shows the tradeoff between number of principal components and variance explained.) For assignment purposes, a major benefit of applying PCA was the improvement in model training performance, which allowed additional experimentation during the time available. ###Code ### visualize and choose number of principal components to use def viz_pca(pca_attribs, n_pcs_to_use): n_pcs_to_use = min(n_pcs_to_use, len(x_train) - 1) components = pca_attribs[0] explained_variance_ratio = pca_attribs[1] cum_var_exp = 100.0 * explained_variance_ratio.cumsum() plot_n_components = components.shape[0] # plot percentage of variance not explained print() fig = plt.figure(figsize = (8, 4)) plt.plot(100.0 - cum_var_exp[:plot_n_components]) plt.xticks(range(0, plot_n_components, 500)) plt.xlabel('Number of principal components') plt.ylabel('% of variance not explained') plt.title('Scree plot for PCA\n(Showing first 1000 of 16383 components)') plt.show() # print cumlative percentage of variance explained my_linespace() my_display('Percentage of variance explained by ' + str(n_pcs_to_use) + ' principal components: ' + str(round(cum_var_exp[n_pcs_to_use - 1], 2)) + '%') my_linespace() def reduce_image_data(x_train, x_valid, x_test): # flatten image arrays x_train = x_train.reshape(x_train.shape[0], img_rows * img_cols) x_valid = x_valid.reshape(x_valid.shape[0], img_rows * img_cols) x_test = x_test.reshape(x_test.shape[0], img_rows * img_cols) # create extra PC's for graphing purposes pca = PCA(copy = True, random_state = 0, n_components = 4 * n_pcs_to_use, svd_solver = 'full') pca.fit(np.vstack([x_train, x_valid])) # transform using PCA x_train = pca.transform(x_train)[:, :n_pcs_to_use] x_valid = pca.transform(x_valid)[:, :n_pcs_to_use] x_test = pca.transform(x_test)[:, :n_pcs_to_use] input_shape = [n_pcs_to_use] # display shapes of transformed data print() print('After PCA:') print('\tx_train shape: ' + str(x_train.shape)) print('\tx_valid shape: ' + str(x_valid.shape)) print('\tx_test shape: ' + str(x_test.shape)) return(x_train, x_valid, x_test, pca) ###Output _____no_output_____ ###Markdown Save/load preprocessed images as pickle files To save time during modeling runs and experiments, we saved all pre-processed data to pickle files for easy reloading during future sessions. Care was needed to capture image data accurately and completely, of course, as well as to serialize the necessary parts of encoding and data reduction functions. The resuting pickle files for color images were large enough to warrant being split into two parts. (As it turns out, the S3 storage of AWS was ideal for this purpose.) ###Code # save a prepared pickle file def save_pickle(pickle_filename, save): try: f = open(pickle_filename, 'wb') pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_filename, ':', e) raise statinfo = os.stat(pickle_filename) print() print('Size of pickle file', pickle_filename, ':', statinfo.st_size) # prepare and save a new pickle file def prep_and_save_pickle_file(pickle_filename, x, y, x_ids, data_source, pca = None): # prep data structure to save if pca != None: pca_explained_variance_ratio = pca.explained_variance_ratio_ one_hot_encoder.active_features_ label_encoder.classes_ p1_num = math.ceil(len(x)/2) save = { 'x1': x[0:p1_num], 'x_ids': x_ids, 'y1': y[0:p1_num] if y is not None else None, 'input_shape': input_shape, 'pca_components': pca.components_ if pca is not None else None, 'pca_mean' : pca.mean_ if pca is not None else None, 'pca_explained_variance_ratio': pca.explained_variance_ratio_ if pca is not None else None, 'encoder_labels': one_hot_encoder.active_features_ if one_hot_encoder is not None else None, 'encoder_names' : label_encoder.classes_ if label_encoder is not None else None } # save images to pickle file for future sessions save_pickle(pickle_filename, save) save = { 'x2': x[p1_num:] , 'y2': y[p1_num:] if y is not None else None, } # save images to pickle file for future sessions save_pickle('2_' + pickle_filename, save) print() print ('Saved pickle file:', data_source, input_shape[0]) # load an existing pickle file def load_pickle_file(pickle_filename): # fill data structure from loaded pickle with open(pickle_filename, 'rb') as f: save = pickle.load(f) x1 = save['x1'] x_ids = save['x_ids'] y1 = save['y1'] input_shape = save['input_shape'], pca_components = save['pca_components'], pca_mean = save['pca_mean'], pca_explained_variance_ratio = save['pca_explained_variance_ratio'], encoder_labels = save['encoder_labels'], encoder_names = save['encoder_names'] # fill data structure from loaded pickle with open('2_' + pickle_filename, 'rb') as f: save = pickle.load(f) x2 = save['x2'] y2 = save['y2'] x = np.concatenate((x1,x2), axis=0) y = np.concatenate((y1,y2), axis=0) if y1 is not None else None # adjust data types after pickling input_shape = input_shape[0] pca_components = pca_components[0] pca_explained_variance_ratio = np.array(pca_explained_variance_ratio) encoder_labels = np.array(encoder_labels).T.flatten() print() print ('Loaded pickle file:', pickle_filename, input_shape[0]) pca_attribs = [pca_components, pca_explained_variance_ratio, pca_mean] return(x, y, x_ids, input_shape, pca_attribs, encoder_labels, encoder_names) ###Output _____no_output_____ ###Markdown Top-level script to preprocess all data The top-level script below runs all pre-processing functions as needed, according to situation. ###Code prep_filename = lambda data_source: \ 'HW4_data_' + data_source + '_' + str(img_rows) + '_' + model_type + '.pickle' train_filename = prep_filename('train') valid_filename = prep_filename('valid') test_filename = prep_filename('test') # pickle files don't exist yet, so preprocess data and create them if not path.exists(train_filename) \ or not path.exists(valid_filename) \ or not path.exists (test_filename): # load and process images x, x_ids, input_shape = load_process_images('train') x_test, x_ids_test, input_shape = load_process_images('test') # encode y y, label_encoder, one_hot_encoder, num_labels = encode_y(x_ids, labels_data) # split training data x_train, x_valid, y_train, y_valid = split_my_training_data(x, y) x = y = None # free memory # rebalance classes with upsampling from flipped images is_color = (model_type == 'cnn') x_train, y_train = class_balance_with_image_flip(x_train, y_train, is_color) if not is_color: x_train, y_train = add_transformed_images(x_train, y_train) # reduce image data pca = None if not is_color: x_train, x_valid, x_test, pca = reduce_image_data(x_train, x_valid, x_test) pca_attribs = [pca.components_, pca.explained_variance_ratio_, pca.mean_] encoder_labels = one_hot_encoder.active_features_ encoder_names = label_encoder.classes_ # save pickle files prep_and_save_pickle_file(train_filename, x_train, y_train, None , 'train', pca) prep_and_save_pickle_file(valid_filename, x_valid, y_valid, None , 'valid', pca) prep_and_save_pickle_file(test_filename , x_test , None , x_ids_test , 'train', pca) # pickle files already exist, to load them else: x_train, y_train, _, input_shape, pca_attribs, encoder_labels, encoder_names = load_pickle_file(train_filename) x_valid, y_valid, _ , _, _, _, _ = load_pickle_file(valid_filename) x_test , y_test , x_ids_test, _, _, _, _ = load_pickle_file(test_filename) num_labels = y_train.shape[1] ###Output Loaded pickle file: HW4_data_train_128_ffnn.pickle 128 Loaded pickle file: HW4_data_valid_128_ffnn.pickle 128 Loaded pickle file: HW4_data_test_128_ffnn.pickle 128 ###Markdown Visualization of PCAThe elbow in the scree plot below suggests using approximately 250 principal components for data reduction. ###Code if model_type is 'ffnn': viz_pca(pca_attribs, n_pcs_to_use) input_shape = [x_train.shape[1]] ###Output ###Markdown Display sample images After such extensive pre-processing, it's essential to validate image quality and label pairing through visual inspection. While exhaustive inspection is impossible, the code below is set up to allow random inspection of twelve images and labels at a time from each of the training or validation set, as well as twelve images at a time from the testing set. The B/W images must be reconstituted from PCA-reduced form (as well as renormalized) for display. Inspection results during multiple runs for all data sets showed no obvious problems, so we proceeded with modeling on this foundation. ###Code def renormalize_image(x): x_adjust = np.amin(x) if x_adjust < 0: x += -x_adjust x_range = np.amax(x) - np.amin(x) x /= x_range num_rows = 3 num_cols = 4 num_images_to_display = num_rows * num_cols # get sample images from training data x_display, x_display_labels_0 = resample(x_train, y_train, replace = False, n_samples = num_rows * num_cols) x_display_labels = encoder_names[one_hot_decode(x_display_labels_0)] # prepare b/w images for display if model_type is not 'cnn': pca_components = pca_attribs[0][:n_pcs_to_use, :] pca_mean = pca_attribs[2] x_display = np.dot(x_display, pca_components) + pca_mean renormalize_image(x_display) x_display = x_display.reshape(x_display.shape[0], img_rows, img_cols) # code adapted from: # https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_image_display/py_image_display.html print() plt.figure(figsize = (16, 12)) for i in range(num_images_to_display): plt.subplot(num_rows, num_images_to_display/num_rows, i + 1) convert_spec = cv2.COLOR_BGR2RGB if model_type is 'cnn' else cv2.COLOR_GRAY2RGB image = cv2.cvtColor(x_display[i], convert_spec) plt.imshow(image) plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.title(x_display_labels[i]) plt.show() ###Output ###Markdown 4. Train FFNN Model in TensorFlow In order to address the dog breeds classification challenge on a non-visual basis, a series of feed-forward neural networks (FFNN's) was created. This series avoided such visual mechanisms as convolutions, in order to explore the capabilities of traditional networks. Accordingly, the data was prepared with grayscale images and data reduction via PCA, as explained above. While classification accuracy could be expected to fall short of that via CNN's, the exploration is instructive *per se*.The exploration of FFNN's included the variations listed below, building up from the simplest model towards a more sophisticated, final model:1. single hidden layer with SGD optimization2. two hidden layers with Adam optimization3. four hidden layers with Adam and dropout4. eight hidden layers with Adam and dropout5. eight hidden layers with Adam, dropout, and batch normalization6. eight hidden layers with Adam, dropout, normalization, and 4096 nodes per hidden layer7. eight hidden layers with Adam, dropout, normalization, 4096 nodes, pyramidal architecture, and 64 principal componentsAdditional variations were explored (not shown here), including the following:* sharpening images* omitting data reduction (i.e., PCA)* increasing the number of hidden layers from eight to sixteen* increasing the dropout rate to 90%* using a range of learning rates between 1e-1 and 1e-6* using a range of epsilon values between 1 and 1e-8* L2 regularizationResults consistently showed strong overfitting, which was difficult to circumvent. In the output below, increasing model sophistication raised the training accuracy from approximately 5% to approximately 98% (in longer runs). At the same time, validation accuracy rose to a maximum of 5.99%, which is not useful for practical purposes. Still, this validation accuracy is well above that of a naive classifier that always predicts randomly, which would theoretically be approximately 0.8%. The earlier models below use a shorter run of only 50 epochs, and a slightly higher learning rate of 1e-3, in order to demonstrate model results relatively quickly. The final model uses a longer run of 100 epochs and a learning rate of 1e-4, in order to demonstrate model results more thoroughly.A clear finding of this process if the effectiveness of convolutions for processing images in networks, as will be discussed below. Define helper functions ###Code def categorical_accuracy(y_true, y_pred_proba): return(np.mean(np.equal(np.argmax(y_true, axis = -1), np.argmax(y_pred_proba, axis = -1)))) def weight_variable(shape, name): initial = tf.truncated_normal(shape, stddev = np.sqrt(2.0 / shape[0])) return tf.Variable(initial, name = name) def bias_variable(shape, name): initial = tf.zeros(shape) return tf.Variable(initial, name) split_by_half = lambda x, k : int((x / 2) ** k) def display_train_status(batch_num, l, predictions, batch_labels, use_prob = False): if not use_prob: valid_predict_proba = valid_prediction.eval() else: valid_predict_proba = valid_prediction.eval({keep_prob: 1.0}) #valid_predict_proba = valid_prediction.eval() valid_labelset = tf_valid_labelset.eval() # print batch number print('Epoch %d:' % (batch_num)) # print minibatch loss print(' Minibatch loss: %.2f' % (l)) # print minibatch accuracy acc = categorical_accuracy(batch_labels, predictions) print(' Minibatch accuracy: %.2f%%' % (100.0 * acc)) # print validation loss val_loss = log_loss(valid_labelset, valid_predict_proba) print(' Validation loss: %.2f' % val_loss) # print validation accuracy val_acc = categorical_accuracy(valid_labelset, valid_predict_proba) print(' Validation accuracy: %.2f%%' % (100.0 * val_acc)) print() return(acc, val_acc, valid_predict_proba) # define input data. def def_input_data(batch_size, flattened_size, num_labels): tf_train_dataset = tf.placeholder(tf.float32, shape = (batch_size, flattened_size), name = "TrainingData") tf_train_labelset = tf.placeholder(tf.float32, shape = (batch_size, num_labels), name = "TrainingLabels") tf_valid_dataset = tf.constant(x_valid, name = "ValidationData") tf_valid_labelset = tf.constant(y_valid, name = "ValidationLabels") return(tf_train_dataset, tf_train_labelset, tf_valid_dataset, tf_valid_labelset) # initialize TensorFlow session def my_init_session(graph): tf.global_variables_initializer().run() merged = tf.summary.merge_all() writer = tf.summary.FileWriter("tensorflowlogs", graph) return(merged, writer) ###Output _____no_output_____ ###Markdown First model: define basic NN with 1 layer and SGD optimizer ###Code if model_type is not 'cnn': flattened_size = x_train.shape[1] n_hidden_nodes = 1000 graph = tf.Graph() with graph.as_default(): # define input data. tf_train_dataset, tf_train_labelset, tf_valid_dataset, tf_valid_labelset = \ def_input_data(batch_size, flattened_size, num_labels) # define variables. layer1_weights = tf.Variable(tf.truncated_normal([flattened_size, num_labels]), name = "weights1") layer1_biases = tf.Variable(tf.zeros([num_labels]), name = "biases1") # define model. def model(data, name): with tf.name_scope(name) as scope: layer1 = tf.add(tf.matmul(data, layer1_weights), layer1_biases, name = "layer1") return layer1 # define training computation. logits = model(tf_train_dataset, name = "logits") loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = tf_train_labelset), name = "loss") # define optimizer. optimizer = tf.train.GradientDescentOptimizer(learning_rate = 1e-3).minimize(loss) # predict on training and validation data train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset, name = "validation")) def run_session_basic(num_batches, name): with tf.Session(graph = graph) as session: # initialize session merged, writer = my_init_session(session.graph) print("\nSession initialized\n") # run all 'epochs' for batch_num in range(num_batches): # define batches and input offset = (batch_num * batch_size) % (y_train.shape[0] - batch_size) batch_data = x_train[offset:(offset + batch_size), :] batch_labels = y_train[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labelset : batch_labels} # run this epoch _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict) # display loss and accuracy periodically display_interval = batches_per_epoch if (batch_num % display_interval == 0): acc, val_acc, valid_predict_proba = display_train_status(batch_num, l, predictions, batch_labels) # package the training history valid_predict_proba = valid_prediction.eval() return(valid_predict_proba) if model_type is not 'cnn': batches_per_epoch = math.ceil(y_train.shape[0] / batch_size) num_batches = 50 * batches_per_epoch y_valid_predict_proba = run_session_basic(num_batches = num_batches, name = "HW4 FFNN") ###Output Session initialized Epoch 0: Minibatch loss: 64.70 Minibatch accuracy: 0.00% Validation loss: 32.33 Validation accuracy: 1.17% Epoch 758: Minibatch loss: 68.48 Minibatch accuracy: 0.00% Validation loss: 32.12 Validation accuracy: 1.06% Epoch 1516: Minibatch loss: 61.94 Minibatch accuracy: 0.00% Validation loss: 31.92 Validation accuracy: 1.06% Epoch 2274: Minibatch loss: 58.55 Minibatch accuracy: 1.56% Validation loss: 31.73 Validation accuracy: 1.13% Epoch 3032: Minibatch loss: 54.77 Minibatch accuracy: 3.12% Validation loss: 31.55 Validation accuracy: 1.21% Epoch 3790: Minibatch loss: 47.67 Minibatch accuracy: 0.00% Validation loss: 31.37 Validation accuracy: 1.29% Epoch 4548: Minibatch loss: 53.42 Minibatch accuracy: 0.00% Validation loss: 31.20 Validation accuracy: 1.29% Epoch 5306: Minibatch loss: 55.86 Minibatch accuracy: 0.00% Validation loss: 31.04 Validation accuracy: 1.25% Epoch 6064: Minibatch loss: 52.18 Minibatch accuracy: 1.56% Validation loss: 30.88 Validation accuracy: 1.29% Epoch 6822: Minibatch loss: 53.67 Minibatch accuracy: 1.56% Validation loss: 30.72 Validation accuracy: 1.29% Epoch 7580: Minibatch loss: 46.14 Minibatch accuracy: 1.56% Validation loss: 30.57 Validation accuracy: 1.21% Epoch 8338: Minibatch loss: 44.83 Minibatch accuracy: 0.00% Validation loss: 30.41 Validation accuracy: 1.21% Epoch 9096: Minibatch loss: 49.34 Minibatch accuracy: 0.00% Validation loss: 30.26 Validation accuracy: 1.29% Epoch 9854: Minibatch loss: 47.21 Minibatch accuracy: 0.00% Validation loss: 30.10 Validation accuracy: 1.25% Epoch 10612: Minibatch loss: 42.10 Minibatch accuracy: 0.00% Validation loss: 29.95 Validation accuracy: 1.21% Epoch 11370: Minibatch loss: 39.50 Minibatch accuracy: 0.00% Validation loss: 29.81 Validation accuracy: 1.29% Epoch 12128: Minibatch loss: 44.37 Minibatch accuracy: 0.00% Validation loss: 29.67 Validation accuracy: 1.33% Epoch 12886: Minibatch loss: 40.78 Minibatch accuracy: 1.56% Validation loss: 29.54 Validation accuracy: 1.37% Epoch 13644: Minibatch loss: 43.98 Minibatch accuracy: 1.56% Validation loss: 29.41 Validation accuracy: 1.37% Epoch 14402: Minibatch loss: 45.81 Minibatch accuracy: 0.00% Validation loss: 29.29 Validation accuracy: 1.41% Epoch 15160: Minibatch loss: 45.22 Minibatch accuracy: 0.00% Validation loss: 29.18 Validation accuracy: 1.45% Epoch 15918: Minibatch loss: 37.02 Minibatch accuracy: 1.56% Validation loss: 29.06 Validation accuracy: 1.41% Epoch 16676: Minibatch loss: 41.24 Minibatch accuracy: 3.12% Validation loss: 28.95 Validation accuracy: 1.49% Epoch 17434: Minibatch loss: 48.32 Minibatch accuracy: 0.00% Validation loss: 28.84 Validation accuracy: 1.53% Epoch 18192: Minibatch loss: 39.05 Minibatch accuracy: 0.00% Validation loss: 28.74 Validation accuracy: 1.60% Epoch 18950: Minibatch loss: 37.57 Minibatch accuracy: 0.00% Validation loss: 28.63 Validation accuracy: 1.56% Epoch 19708: Minibatch loss: 37.35 Minibatch accuracy: 6.25% Validation loss: 28.54 Validation accuracy: 1.56% Epoch 20466: Minibatch loss: 41.53 Minibatch accuracy: 4.69% Validation loss: 28.44 Validation accuracy: 1.60% Epoch 21224: Minibatch loss: 39.61 Minibatch accuracy: 1.56% Validation loss: 28.34 Validation accuracy: 1.68% Epoch 21982: Minibatch loss: 35.08 Minibatch accuracy: 1.56% Validation loss: 28.25 Validation accuracy: 1.72% Epoch 22740: Minibatch loss: 36.15 Minibatch accuracy: 0.00% Validation loss: 28.17 Validation accuracy: 1.72% Epoch 23498: Minibatch loss: 34.44 Minibatch accuracy: 4.69% Validation loss: 28.08 Validation accuracy: 1.64% Epoch 24256: Minibatch loss: 36.35 Minibatch accuracy: 0.00% Validation loss: 28.00 Validation accuracy: 1.64% Epoch 25014: Minibatch loss: 42.51 Minibatch accuracy: 3.12% Validation loss: 27.92 Validation accuracy: 1.60% Epoch 25772: Minibatch loss: 38.17 Minibatch accuracy: 0.00% Validation loss: 27.84 Validation accuracy: 1.60% Epoch 26530: Minibatch loss: 37.92 Minibatch accuracy: 1.56% Validation loss: 27.76 Validation accuracy: 1.60% Epoch 27288: Minibatch loss: 29.92 Minibatch accuracy: 4.69% Validation loss: 27.69 Validation accuracy: 1.53% Epoch 28046: Minibatch loss: 36.12 Minibatch accuracy: 0.00% Validation loss: 27.61 Validation accuracy: 1.53% Epoch 28804: Minibatch loss: 35.95 Minibatch accuracy: 1.56% Validation loss: 27.54 Validation accuracy: 1.53% Epoch 29562: Minibatch loss: 35.01 Minibatch accuracy: 0.00% Validation loss: 27.47 Validation accuracy: 1.53% Epoch 30320: Minibatch loss: 32.39 Minibatch accuracy: 0.00% Validation loss: 27.40 Validation accuracy: 1.49% Epoch 31078: Minibatch loss: 36.87 Minibatch accuracy: 0.00% Validation loss: 27.33 Validation accuracy: 1.45% Epoch 31836: Minibatch loss: 38.88 Minibatch accuracy: 0.00% Validation loss: 27.26 Validation accuracy: 1.49% Epoch 32594: Minibatch loss: 36.83 Minibatch accuracy: 0.00% Validation loss: 27.20 Validation accuracy: 1.45% Epoch 33352: Minibatch loss: 33.60 Minibatch accuracy: 1.56% Validation loss: 27.13 Validation accuracy: 1.49% Epoch 34110: Minibatch loss: 33.42 Minibatch accuracy: 1.56% Validation loss: 27.07 Validation accuracy: 1.49% Epoch 34868: Minibatch loss: 35.53 Minibatch accuracy: 1.56% Validation loss: 27.01 Validation accuracy: 1.49% Epoch 35626: Minibatch loss: 37.60 Minibatch accuracy: 0.00% Validation loss: 26.95 Validation accuracy: 1.45% Epoch 36384: Minibatch loss: 34.70 Minibatch accuracy: 0.00% Validation loss: 26.89 Validation accuracy: 1.45% Epoch 37142: Minibatch loss: 34.38 Minibatch accuracy: 0.00% Validation loss: 26.83 Validation accuracy: 1.49% ###Markdown Define enhanced basic NN with 2 layers and Adam optimizer ###Code if model_type is not 'cnn': flattened_size = x_train.shape[1] n_hidden_nodes = 1000 graph = tf.Graph() with graph.as_default(): # define input data. tf_train_dataset, tf_train_labelset, tf_valid_dataset, tf_valid_labelset = \ def_input_data(batch_size, flattened_size, num_labels) # define variables. layer1_weights = tf.Variable(tf.truncated_normal([flattened_size, n_hidden_nodes]), name = "weights1") layer1_biases = tf.Variable(tf.zeros([n_hidden_nodes]), name = "biases1") layer2_weights = tf.Variable(tf.truncated_normal([n_hidden_nodes, num_labels]), name = "weights2") layer2_biases = tf.Variable(tf.ones([num_labels]), name = "biases2") # define model. def model(data, name): with tf.name_scope(name) as scope: layer1 = tf.add(tf.matmul(data, layer1_weights), layer1_biases, name = "layer1") hidden1 = tf.nn.relu(layer1, name = "relu1") layer2 = tf.add(tf.matmul(hidden1, layer2_weights), layer2_biases, name = "layer2") return layer2 # define training computation. logits = model(tf_train_dataset, name = "logits") loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = tf_train_labelset), name = "loss") # define optimizer. optimizer = tf.train.AdamOptimizer(learning_rate = 1e-3, epsilon = 0.001).minimize(loss) # predict on training and validation data train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset, name = "validation")) def run_session_2_layer(num_batches, name): with tf.Session(graph = graph) as session: # initialize session merged, writer = my_init_session(session.graph) print("\nSession initialized\n") # run all 'epochs' for batch_num in range(num_batches): # define batches and input offset = (batch_num * batch_size) % (y_train.shape[0] - batch_size) batch_data = x_train[offset:(offset + batch_size), :] batch_labels = y_train[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labelset : batch_labels} # run this epoch _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict) # display loss and accuracy periodically display_interval = batches_per_epoch if (batch_num % display_interval == 0): acc, val_acc, valid_predict_proba = display_train_status(batch_num, l, predictions, batch_labels) # package the training history valid_predict_proba = valid_prediction.eval() return(valid_predict_proba) if model_type is not 'cnn': batches_per_epoch = math.ceil(y_train.shape[0] / batch_size) num_batches = 50 * batches_per_epoch y_valid_predict_proba = run_session_2_layer(num_batches = num_batches, name = "HW4 FFNN") ###Output Session initialized Epoch 0: Minibatch loss: 1242.88 Minibatch accuracy: 0.00% Validation loss: 34.14 Validation accuracy: 1.06% Epoch 758: Minibatch loss: 563.21 Minibatch accuracy: 1.56% Validation loss: 33.94 Validation accuracy: 1.37% Epoch 1516: Minibatch loss: 380.76 Minibatch accuracy: 6.25% Validation loss: 33.86 Validation accuracy: 1.76% Epoch 2274: Minibatch loss: 276.16 Minibatch accuracy: 9.38% Validation loss: 33.70 Validation accuracy: 1.96% Epoch 3032: Minibatch loss: 227.13 Minibatch accuracy: 20.31% Validation loss: 33.62 Validation accuracy: 2.31% Epoch 3790: Minibatch loss: 159.81 Minibatch accuracy: 20.31% Validation loss: 33.50 Validation accuracy: 2.50% Epoch 4548: Minibatch loss: 159.92 Minibatch accuracy: 18.75% Validation loss: 33.47 Validation accuracy: 2.54% Epoch 5306: Minibatch loss: 108.03 Minibatch accuracy: 28.12% Validation loss: 33.47 Validation accuracy: 2.62% Epoch 6064: Minibatch loss: 112.24 Minibatch accuracy: 34.38% Validation loss: 33.44 Validation accuracy: 2.66% Epoch 6822: Minibatch loss: 45.63 Minibatch accuracy: 42.19% Validation loss: 33.38 Validation accuracy: 2.82% Epoch 7580: Minibatch loss: 59.33 Minibatch accuracy: 51.56% Validation loss: 33.38 Validation accuracy: 2.78% Epoch 8338: Minibatch loss: 37.88 Minibatch accuracy: 51.56% Validation loss: 33.38 Validation accuracy: 2.70% Epoch 9096: Minibatch loss: 34.71 Minibatch accuracy: 53.12% Validation loss: 33.38 Validation accuracy: 2.74% Epoch 9854: Minibatch loss: 16.41 Minibatch accuracy: 67.19% Validation loss: 33.40 Validation accuracy: 2.54% Epoch 10612: Minibatch loss: 16.46 Minibatch accuracy: 65.62% Validation loss: 33.34 Validation accuracy: 2.70% Epoch 11370: Minibatch loss: 10.99 Minibatch accuracy: 81.25% Validation loss: 33.34 Validation accuracy: 2.74% Epoch 12128: Minibatch loss: 11.95 Minibatch accuracy: 68.75% Validation loss: 33.32 Validation accuracy: 2.74% Epoch 12886: Minibatch loss: 22.64 Minibatch accuracy: 62.50% Validation loss: 33.36 Validation accuracy: 2.58% Epoch 13644: Minibatch loss: 3.28 Minibatch accuracy: 81.25% Validation loss: 33.30 Validation accuracy: 2.82% Epoch 14402: Minibatch loss: 3.44 Minibatch accuracy: 87.50% Validation loss: 33.33 Validation accuracy: 2.90% Epoch 15160: Minibatch loss: 3.41 Minibatch accuracy: 85.94% Validation loss: 33.33 Validation accuracy: 2.74% Epoch 15918: Minibatch loss: 1.44 Minibatch accuracy: 87.50% Validation loss: 33.45 Validation accuracy: 2.78% Epoch 16676: Minibatch loss: 0.98 Minibatch accuracy: 92.19% Validation loss: 33.33 Validation accuracy: 2.82% Epoch 17434: Minibatch loss: 5.22 Minibatch accuracy: 90.62% Validation loss: 33.35 Validation accuracy: 2.74% Epoch 18192: Minibatch loss: 1.72 Minibatch accuracy: 93.75% Validation loss: 33.38 Validation accuracy: 2.86% Epoch 18950: Minibatch loss: 3.82 Minibatch accuracy: 89.06% Validation loss: 33.32 Validation accuracy: 2.90% Epoch 19708: Minibatch loss: 2.04 Minibatch accuracy: 90.62% Validation loss: 33.41 Validation accuracy: 2.66% Epoch 20466: Minibatch loss: 1.16 Minibatch accuracy: 92.19% Validation loss: 33.34 Validation accuracy: 2.97% Epoch 21224: Minibatch loss: 0.04 Minibatch accuracy: 96.88% Validation loss: 33.36 Validation accuracy: 2.50% Epoch 21982: Minibatch loss: 1.50 Minibatch accuracy: 90.62% Validation loss: 33.36 Validation accuracy: 2.82% Epoch 22740: Minibatch loss: 0.04 Minibatch accuracy: 98.44% Validation loss: 33.36 Validation accuracy: 2.90% Epoch 23498: Minibatch loss: 0.15 Minibatch accuracy: 95.31% Validation loss: 33.32 Validation accuracy: 2.86% Epoch 24256: Minibatch loss: 1.06 Minibatch accuracy: 92.19% Validation loss: 33.41 Validation accuracy: 2.74% Epoch 25014: Minibatch loss: 2.67 Minibatch accuracy: 93.75% Validation loss: 33.31 Validation accuracy: 3.05% Epoch 25772: Minibatch loss: 0.89 Minibatch accuracy: 96.88% Validation loss: 33.38 Validation accuracy: 2.82% Epoch 26530: Minibatch loss: 0.38 Minibatch accuracy: 96.88% Validation loss: 33.40 Validation accuracy: 2.86% Epoch 27288: Minibatch loss: 0.43 Minibatch accuracy: 95.31% Validation loss: 33.33 Validation accuracy: 2.86% Epoch 28046: Minibatch loss: 2.20 Minibatch accuracy: 93.75% Validation loss: 33.31 Validation accuracy: 2.86% Epoch 28804: Minibatch loss: 0.49 Minibatch accuracy: 98.44% Validation loss: 33.29 Validation accuracy: 2.93% Epoch 29562: Minibatch loss: 0.22 Minibatch accuracy: 98.44% Validation loss: 33.24 Validation accuracy: 2.86% Epoch 30320: Minibatch loss: 0.64 Minibatch accuracy: 93.75% Validation loss: 33.17 Validation accuracy: 3.29% Epoch 31078: Minibatch loss: 0.42 Minibatch accuracy: 98.44% Validation loss: 33.30 Validation accuracy: 2.97% Epoch 31836: Minibatch loss: 1.29 Minibatch accuracy: 93.75% Validation loss: 33.33 Validation accuracy: 2.82% Epoch 32594: Minibatch loss: 0.61 Minibatch accuracy: 96.88% Validation loss: 33.40 Validation accuracy: 2.70% Epoch 33352: Minibatch loss: 0.00 Minibatch accuracy: 100.00% Validation loss: 33.39 Validation accuracy: 2.58% Epoch 34110: Minibatch loss: 1.58 Minibatch accuracy: 93.75% Validation loss: 33.33 Validation accuracy: 2.82% Epoch 34868: Minibatch loss: 0.05 Minibatch accuracy: 98.44% Validation loss: 33.32 Validation accuracy: 2.82% Epoch 35626: Minibatch loss: 0.87 Minibatch accuracy: 95.31% Validation loss: 33.29 Validation accuracy: 3.09% Epoch 36384: Minibatch loss: 0.14 Minibatch accuracy: 98.44% Validation loss: 33.33 Validation accuracy: 2.93% Epoch 37142: Minibatch loss: 0.80 Minibatch accuracy: 96.88% Validation loss: 33.28 Validation accuracy: 3.01% ###Markdown Define deeper NN with 4 layers, Adam, and dropout ###Code if model_type is 'ffnn': flattened_size = x_train.shape[1] n_hidden_nodes = 1000 graph = tf.Graph() with graph.as_default(): # input data tf_train_dataset, tf_train_labelset, tf_valid_dataset, tf_valid_labelset = \ def_input_data(batch_size, flattened_size, num_labels) #----- variables. layer_1_weights = weight_variable([flattened_size, n_hidden_nodes], name = "weights_1") layer_1_biases = bias_variable([n_hidden_nodes], name = "biases_1") layer_2_weights = weight_variable([n_hidden_nodes, split_by_half(n_hidden_nodes,1)], name = "weights_2") layer_2_biases = bias_variable([split_by_half(n_hidden_nodes, 1)], name = "biases_2") layer_3_weights = weight_variable([split_by_half(n_hidden_nodes, 1), split_by_half(n_hidden_nodes,2)], name = "weights_3") layer_3_biases = bias_variable([split_by_half(n_hidden_nodes, 2)], name = "biases_3") layer_4_weights = weight_variable([split_by_half(n_hidden_nodes, 2), num_labels], name = "weights_4") layer_4_biases = bias_variable([num_labels], name = "biases_4") keep_prob = tf.placeholder("float", name = "keep_prob") # model def model(data, name, proba = keep_prob): with tf.name_scope(name) as scope: layer_1 = tf.add(tf.matmul(data, layer_1_weights), layer_1_biases, name = "layer1") hidden_1 = tf.nn.dropout(tf.nn.relu(layer_1), proba, name = "dropout_1") layer_2 = tf.add(tf.matmul(hidden_1, layer_2_weights), layer_2_biases, name = "layer2") hidden_2 = tf.nn.dropout(tf.nn.relu(layer_2), proba, name = "dropout_2") layer_3 = tf.add(tf.matmul(hidden_2, layer_3_weights), layer_3_biases, name = "layer3") hidden_3 = tf.nn.dropout(tf.nn.relu(layer_3), proba) layer_4 = tf.add(tf.matmul(hidden_3, layer_4_weights), layer_4_biases, name = "layer4") return layer_4 # training computation. logits = model(tf_train_dataset, "logits", keep_prob) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = tf_train_labelset), name = "loss") # optimizer. optimizer_0 = tf.train.AdamOptimizer(learning_rate = 1e-3, epsilon = 0.001) optimizer = optimizer_0.minimize(loss) # predictions on training and validation data train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset, name = "validation")) def run_session_4_layer(num_batches, name): with tf.Session(graph = graph) as session: # initialize session merged, writer = my_init_session(session.graph) print("\nSession initialized\n") # run all 'epochs' for batch_num in range(num_batches): # define batches and input offset = (batch_num * batch_size) % (y_train.shape[0] - batch_size) batch_data = x_train[offset:(offset + batch_size), :] batch_labels = y_train[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labelset : batch_labels, keep_prob : 0.7 } # run this epoch _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict) # display loss and accuracy periodically display_interval = batches_per_epoch if (batch_num % display_interval == 0): acc, val_acc, valid_predict_proba = display_train_status(batch_num, l, predictions, batch_labels, use_prob = True) # package the training history valid_predict_proba = valid_prediction.eval({keep_prob: 1.0}) return(valid_predict_proba) if model_type is not 'cnn': batches_per_epoch = math.ceil(y_train.shape[0] / batch_size) num_batches = 50 * batches_per_epoch y_valid_predict_proba = run_session_4_layer(num_batches = num_batches, name = "HW4 FFNN") ###Output Session initialized Epoch 0: Minibatch loss: 8.29 Minibatch accuracy: 0.00% Validation loss: 33.26 Validation accuracy: 0.63% Epoch 758: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 1.06% Epoch 1516: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 2274: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 3032: Minibatch loss: 4.79 Minibatch accuracy: 3.12% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 3790: Minibatch loss: 4.79 Minibatch accuracy: 6.25% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 4548: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 5306: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 6064: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 6822: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 7580: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 8338: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 9096: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 9854: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 10612: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 11370: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 12128: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 12886: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 13644: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 14402: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 15160: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 15918: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 16676: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 17434: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 18192: Minibatch loss: 4.79 Minibatch accuracy: 3.12% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 18950: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 19708: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 20466: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 21224: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 21982: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 22740: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 23498: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 24256: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 25014: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 25772: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 26530: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 27288: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 28046: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 28804: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 29562: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 30320: Minibatch loss: 4.93 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.90% Epoch 31078: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 31836: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 32594: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 33352: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 34110: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 34868: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 35626: Minibatch loss: 4.79 Minibatch accuracy: 1.56% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 36384: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% Epoch 37142: Minibatch loss: 4.79 Minibatch accuracy: 0.00% Validation loss: 4.79 Validation accuracy: 0.59% ###Markdown Define 8-layer NN with Adam and dropout ###Code if model_type is 'ffnn': flattened_size = x_train.shape[1] n_hidden_nodes = 1000 graph = tf.Graph() with graph.as_default(): # input data tf_train_dataset, tf_train_labelset, tf_valid_dataset, tf_valid_labelset = \ def_input_data(batch_size, flattened_size, num_labels) #----- variables. layer_1_weights = weight_variable([flattened_size, n_hidden_nodes], name = "weights_1") layer_1_biases = bias_variable([n_hidden_nodes], name = "biases_1") layer_2_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_2") layer_2_biases = bias_variable([n_hidden_nodes], name = "biases_2") layer_3_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_3") layer_3_biases = bias_variable([n_hidden_nodes], name = "biases_3") layer_4_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_4") layer_4_biases = bias_variable([n_hidden_nodes], name = "biases_4") layer_5_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_5") layer_5_biases = bias_variable([n_hidden_nodes], name = "biases_5") layer_6_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_6") layer_6_biases = bias_variable([n_hidden_nodes], name = "biases_6") layer_7_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_7") layer_7_biases = bias_variable([n_hidden_nodes], name = "biases_7") layer_8_weights = weight_variable([n_hidden_nodes, num_labels], name = "weights_8") layer_8_biases = bias_variable([num_labels], name = "biases_8") keep_prob = tf.placeholder("float", name = "keep_prob") # model def model(data, name, proba = keep_prob): with tf.name_scope(name) as scope: layer_1 = tf.add(tf.matmul(data, layer_1_weights), layer_1_biases, name = "layer1") hidden_1 = tf.nn.dropout(tf.nn.relu(layer_1), proba, name = "dropout_1") layer_2 = tf.add(tf.matmul(hidden_1, layer_2_weights), layer_2_biases, name = "layer2") hidden_2 = tf.nn.dropout(tf.nn.relu(layer_2), proba, name = "dropout_2") layer_3 = tf.add(tf.matmul(hidden_2, layer_3_weights), layer_3_biases, name = "layer3") hidden_3 = tf.nn.dropout(tf.nn.relu(layer_3), proba, name = "dropout_3") layer_4 = tf.add(tf.matmul(hidden_3, layer_4_weights), layer_4_biases, name = "layer4") hidden_4 = tf.nn.dropout(tf.nn.relu(layer_4), proba, name = "dropout_4") layer_5 = tf.add(tf.matmul(hidden_4, layer_5_weights), layer_5_biases, name = "layer5") hidden_5 = tf.nn.dropout(tf.nn.relu(layer_5), proba, name = "dropout_5") layer_6 = tf.add(tf.matmul(hidden_5, layer_6_weights), layer_6_biases, name = "layer6") hidden_6 = tf.nn.dropout(tf.nn.relu(layer_6), proba, name = "dropout_6") layer_7 = tf.add(tf.matmul(hidden_6, layer_7_weights), layer_7_biases, name = "layer7") hidden_7 = tf.nn.dropout(tf.nn.relu(layer_7), proba, name = "dropout_7") layer_8 = tf.add(tf.matmul(hidden_7, layer_8_weights), layer_8_biases, name = "layer8") return layer_8 # training computation. logits = model(tf_train_dataset, "logits", keep_prob) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = tf_train_labelset), name = "loss") # optimizer. optimizer_0 = tf.train.AdamOptimizer(learning_rate = 1e-3) optimizer = optimizer_0.minimize(loss) # predictions on training and validation data train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset, name = "validation")) def run_session_8_layer(num_batches, name): with tf.Session(graph = graph) as session: # initialize session merged, writer = my_init_session(session.graph) print("\nSession initialized\n") # run all 'epochs' for batch_num in range(num_batches): # define batches and input offset = (batch_num * batch_size) % (y_train.shape[0] - batch_size) batch_data = x_train[offset:(offset + batch_size), :] batch_labels = y_train[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labelset : batch_labels, keep_prob : 0.7 } # run this epoch _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict) # display loss and accuracy periodically display_interval = batches_per_epoch if (batch_num % display_interval == 0): acc, val_acc, valid_predict_proba = display_train_status(batch_num, l, predictions, batch_labels, use_prob = True) # package the training history valid_predict_proba = valid_prediction.eval({keep_prob: 1.0}) return(valid_predict_proba) if model_type is not 'cnn': batches_per_epoch = math.ceil(y_train.shape[0] / batch_size) num_batches = 50 * batches_per_epoch y_valid_predict_proba = run_session_8_layer(num_batches = num_batches, name = "HW4 FFNN") ###Output Session initialized Epoch 0: Minibatch loss: 9.79 Minibatch accuracy: 0.00% Validation loss: 5.07 Validation accuracy: 0.70% Epoch 758: Minibatch loss: 4.68 Minibatch accuracy: 1.56% Validation loss: 4.63 Validation accuracy: 2.11% Epoch 1516: Minibatch loss: 4.58 Minibatch accuracy: 1.56% Validation loss: 4.60 Validation accuracy: 2.35% Epoch 2274: Minibatch loss: 4.30 Minibatch accuracy: 4.69% Validation loss: 4.55 Validation accuracy: 2.86% Epoch 3032: Minibatch loss: 4.23 Minibatch accuracy: 6.25% Validation loss: 4.61 Validation accuracy: 2.58% Epoch 3790: Minibatch loss: 4.27 Minibatch accuracy: 1.56% Validation loss: 4.62 Validation accuracy: 2.97% Epoch 4548: Minibatch loss: 4.04 Minibatch accuracy: 4.69% Validation loss: 4.66 Validation accuracy: 2.86% Epoch 5306: Minibatch loss: 3.87 Minibatch accuracy: 4.69% Validation loss: 4.69 Validation accuracy: 2.74% Epoch 6064: Minibatch loss: 3.86 Minibatch accuracy: 3.12% Validation loss: 4.73 Validation accuracy: 2.62% Epoch 6822: Minibatch loss: 3.90 Minibatch accuracy: 3.12% Validation loss: 4.78 Validation accuracy: 2.54% Epoch 7580: Minibatch loss: 4.06 Minibatch accuracy: 4.69% Validation loss: 4.83 Validation accuracy: 2.74% Epoch 8338: Minibatch loss: 3.73 Minibatch accuracy: 10.94% Validation loss: 4.88 Validation accuracy: 2.74% Epoch 9096: Minibatch loss: 3.91 Minibatch accuracy: 3.12% Validation loss: 4.88 Validation accuracy: 2.93% Epoch 9854: Minibatch loss: 3.98 Minibatch accuracy: 12.50% Validation loss: 4.94 Validation accuracy: 2.70% Epoch 10612: Minibatch loss: 3.84 Minibatch accuracy: 9.38% Validation loss: 4.98 Validation accuracy: 2.43% Epoch 11370: Minibatch loss: 3.82 Minibatch accuracy: 10.94% Validation loss: 4.98 Validation accuracy: 1.92% Epoch 12128: Minibatch loss: 3.74 Minibatch accuracy: 3.12% Validation loss: 5.00 Validation accuracy: 2.58% Epoch 12886: Minibatch loss: 3.66 Minibatch accuracy: 10.94% Validation loss: 5.09 Validation accuracy: 2.86% Epoch 13644: Minibatch loss: 3.95 Minibatch accuracy: 12.50% Validation loss: 5.06 Validation accuracy: 2.35% Epoch 14402: Minibatch loss: 3.70 Minibatch accuracy: 10.94% Validation loss: 5.08 Validation accuracy: 2.50% Epoch 15160: Minibatch loss: 3.56 Minibatch accuracy: 10.94% Validation loss: 5.14 Validation accuracy: 2.15% Epoch 15918: Minibatch loss: 3.69 Minibatch accuracy: 7.81% Validation loss: 5.20 Validation accuracy: 2.43% Epoch 16676: Minibatch loss: 3.72 Minibatch accuracy: 7.81% Validation loss: 5.17 Validation accuracy: 2.23% Epoch 17434: Minibatch loss: 3.63 Minibatch accuracy: 10.94% Validation loss: 5.20 Validation accuracy: 2.43% Epoch 18192: Minibatch loss: 3.43 Minibatch accuracy: 10.94% Validation loss: 5.14 Validation accuracy: 2.11% Epoch 18950: Minibatch loss: 3.64 Minibatch accuracy: 10.94% Validation loss: 5.25 Validation accuracy: 2.35% Epoch 19708: Minibatch loss: 3.63 Minibatch accuracy: 20.31% Validation loss: 5.19 Validation accuracy: 2.78% Epoch 20466: Minibatch loss: 3.60 Minibatch accuracy: 15.62% Validation loss: 5.23 Validation accuracy: 2.82% Epoch 21224: Minibatch loss: 3.41 Minibatch accuracy: 10.94% Validation loss: 5.28 Validation accuracy: 2.11% Epoch 21982: Minibatch loss: 3.54 Minibatch accuracy: 12.50% Validation loss: 5.26 Validation accuracy: 2.15% Epoch 22740: Minibatch loss: 3.74 Minibatch accuracy: 7.81% Validation loss: 5.28 Validation accuracy: 2.54% Epoch 23498: Minibatch loss: 3.40 Minibatch accuracy: 14.06% Validation loss: 5.35 Validation accuracy: 2.35% Epoch 24256: Minibatch loss: 3.74 Minibatch accuracy: 14.06% Validation loss: 5.33 Validation accuracy: 2.90% Epoch 25014: Minibatch loss: 3.22 Minibatch accuracy: 9.38% Validation loss: 5.39 Validation accuracy: 2.46% Epoch 25772: Minibatch loss: 3.42 Minibatch accuracy: 10.94% Validation loss: 5.38 Validation accuracy: 2.31% Epoch 26530: Minibatch loss: 3.78 Minibatch accuracy: 6.25% Validation loss: 5.38 Validation accuracy: 2.62% Epoch 27288: Minibatch loss: 3.24 Minibatch accuracy: 12.50% Validation loss: 5.35 Validation accuracy: 2.11% Epoch 28046: Minibatch loss: 3.33 Minibatch accuracy: 7.81% Validation loss: 5.38 Validation accuracy: 2.27% Epoch 28804: Minibatch loss: 3.35 Minibatch accuracy: 12.50% Validation loss: 5.48 Validation accuracy: 2.19% Epoch 29562: Minibatch loss: 3.26 Minibatch accuracy: 12.50% Validation loss: 5.42 Validation accuracy: 2.70% Epoch 30320: Minibatch loss: 3.20 Minibatch accuracy: 15.62% Validation loss: 5.45 Validation accuracy: 2.90% Epoch 31078: Minibatch loss: 3.59 Minibatch accuracy: 14.06% Validation loss: 5.48 Validation accuracy: 2.35% Epoch 31836: Minibatch loss: 3.51 Minibatch accuracy: 15.62% Validation loss: 5.46 Validation accuracy: 2.90% Epoch 32594: Minibatch loss: 3.59 Minibatch accuracy: 9.38% Validation loss: 5.49 Validation accuracy: 2.39% Epoch 33352: Minibatch loss: 3.52 Minibatch accuracy: 9.38% Validation loss: 5.54 Validation accuracy: 2.19% Epoch 34110: Minibatch loss: 3.18 Minibatch accuracy: 18.75% Validation loss: 5.47 Validation accuracy: 2.19% Epoch 34868: Minibatch loss: 3.57 Minibatch accuracy: 18.75% Validation loss: 5.52 Validation accuracy: 2.39% Epoch 35626: Minibatch loss: 3.17 Minibatch accuracy: 15.62% Validation loss: 5.59 Validation accuracy: 2.15% Epoch 36384: Minibatch loss: 3.09 Minibatch accuracy: 18.75% Validation loss: 5.58 Validation accuracy: 2.70% Epoch 37142: Minibatch loss: 3.31 Minibatch accuracy: 10.94% Validation loss: 5.49 Validation accuracy: 2.46% ###Markdown Define 8-layer NN with Adam, dropout and batch normalization ###Code if model_type is 'ffnn': flattened_size = x_train.shape[1] n_hidden_nodes = 1000 graph = tf.Graph() with graph.as_default(): # input data tf_train_dataset, tf_train_labelset, tf_valid_dataset, tf_valid_labelset = \ def_input_data(batch_size, flattened_size, num_labels) #----- variables. layer_1_weights = weight_variable([flattened_size, n_hidden_nodes], name = "weights_1") layer_1_biases = bias_variable([n_hidden_nodes], name = "biases_1") layer_2_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_2") layer_2_biases = bias_variable([n_hidden_nodes], name = "biases_2") layer_3_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_3") layer_3_biases = bias_variable([n_hidden_nodes], name = "biases_3") layer_4_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_4") layer_4_biases = bias_variable([n_hidden_nodes], name = "biases_4") layer_5_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_5") layer_5_biases = bias_variable([n_hidden_nodes], name = "biases_5") layer_6_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_6") layer_6_biases = bias_variable([n_hidden_nodes], name = "biases_6") layer_7_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_7") layer_7_biases = bias_variable([n_hidden_nodes], name = "biases_7") layer_8_weights = weight_variable([n_hidden_nodes, num_labels], name = "weights_8") layer_8_biases = bias_variable([num_labels], name = "biases_8") keep_prob = tf.placeholder("float", name = "keep_prob") # model def model(data, name, proba = keep_prob): with tf.name_scope(name) as scope: layer_1 = tf.add(tf.matmul(data, layer_1_weights), layer_1_biases, name = "layer1") hidden_1 = tf.nn.dropout(tf.nn.relu(layer_1), proba, name = "dropout_1") norm_1 = tf.contrib.layers.batch_norm(hidden_1, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_2 = tf.add(tf.matmul(norm_1, layer_2_weights), layer_2_biases, name = "layer2") hidden_2 = tf.nn.dropout(tf.nn.relu(layer_2), proba, name = "dropout_2") norm_2 = tf.contrib.layers.batch_norm(hidden_2, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_3 = tf.add(tf.matmul(norm_2, layer_3_weights), layer_3_biases, name = "layer3") hidden_3 = tf.nn.dropout(tf.nn.relu(layer_3), proba, name = "dropout_3") norm_3 = tf.contrib.layers.batch_norm(hidden_3, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_4 = tf.add(tf.matmul(norm_3, layer_4_weights), layer_4_biases, name = "layer4") hidden_4 = tf.nn.dropout(tf.nn.relu(layer_4), proba, name = "dropout_4") norm_4 = tf.contrib.layers.batch_norm(hidden_4, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_5 = tf.add(tf.matmul(norm_4, layer_5_weights), layer_5_biases, name = "layer5") hidden_5 = tf.nn.dropout(tf.nn.relu(layer_5), proba, name = "dropout_5") norm_5 = tf.contrib.layers.batch_norm(hidden_5, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_6 = tf.add(tf.matmul(norm_5, layer_6_weights), layer_6_biases, name = "layer6") hidden_6 = tf.nn.dropout(tf.nn.relu(layer_6), proba, name = "dropout_6") norm_6 = tf.contrib.layers.batch_norm(hidden_6, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_7 = tf.add(tf.matmul(norm_6, layer_7_weights), layer_7_biases, name = "layer7") hidden_7 = tf.nn.dropout(tf.nn.relu(layer_7), proba, name = "dropout_7") norm_7 = tf.contrib.layers.batch_norm(hidden_7, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_8 = tf.add(tf.matmul(norm_7, layer_8_weights), layer_8_biases, name = "layer8") return layer_8 # training computation. logits = model(tf_train_dataset, "logits", keep_prob) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = tf_train_labelset), name = "loss") # optimizer. optimizer_0 = tf.train.AdamOptimizer(learning_rate = 1e-3, epsilon = 0.001) optimizer = optimizer_0.minimize(loss) # predictions on training and validation data train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset, name = "validation")) def run_session_8_layer_norm(num_batches, name): with tf.Session(graph = graph) as session: # initialize session merged, writer = my_init_session(session.graph) print("\nSession initialized\n") # run all 'epochs' for batch_num in range(num_batches): # define batches and input offset = (batch_num * batch_size) % (y_train.shape[0] - batch_size) batch_data = x_train[offset:(offset + batch_size), :] batch_labels = y_train[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labelset : batch_labels, keep_prob : 0.7 } # run this epoch _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict) # display loss and accuracy periodically display_interval = batches_per_epoch if (batch_num % display_interval == 0): acc, val_acc, valid_predict_proba = display_train_status(batch_num, l, predictions, batch_labels, use_prob = True) # package the training history valid_predict_proba = valid_prediction.eval({keep_prob: 1.0}) return(valid_predict_proba) if model_type is not 'cnn': batches_per_epoch = math.ceil(y_train.shape[0] / batch_size) num_batches = 50 * batches_per_epoch y_valid_predict_proba = run_session_8_layer_norm(num_batches = num_batches, name = "HW4 FFNN") ###Output Session initialized Epoch 0: Minibatch loss: 5.62 Minibatch accuracy: 0.00% Validation loss: 5.54 Validation accuracy: 0.74% Epoch 758: Minibatch loss: 4.86 Minibatch accuracy: 0.00% Validation loss: 4.98 Validation accuracy: 3.17% Epoch 1516: Minibatch loss: 4.57 Minibatch accuracy: 3.12% Validation loss: 5.21 Validation accuracy: 3.48% Epoch 2274: Minibatch loss: 4.26 Minibatch accuracy: 3.12% Validation loss: 5.63 Validation accuracy: 4.03% Epoch 3032: Minibatch loss: 4.00 Minibatch accuracy: 9.38% Validation loss: 6.18 Validation accuracy: 3.91% Epoch 3790: Minibatch loss: 3.95 Minibatch accuracy: 7.81% Validation loss: 6.90 Validation accuracy: 3.87% Epoch 4548: Minibatch loss: 3.55 Minibatch accuracy: 15.62% Validation loss: 7.59 Validation accuracy: 3.99% Epoch 5306: Minibatch loss: 3.66 Minibatch accuracy: 12.50% Validation loss: 8.46 Validation accuracy: 3.99% Epoch 6064: Minibatch loss: 2.82 Minibatch accuracy: 32.81% Validation loss: 9.29 Validation accuracy: 3.99% Epoch 6822: Minibatch loss: 2.97 Minibatch accuracy: 26.56% Validation loss: 10.08 Validation accuracy: 4.66% Epoch 7580: Minibatch loss: 3.22 Minibatch accuracy: 14.06% Validation loss: 10.89 Validation accuracy: 4.66% Epoch 8338: Minibatch loss: 2.47 Minibatch accuracy: 29.69% Validation loss: 11.69 Validation accuracy: 4.30% Epoch 9096: Minibatch loss: 2.65 Minibatch accuracy: 31.25% Validation loss: 12.39 Validation accuracy: 4.69% Epoch 9854: Minibatch loss: 2.20 Minibatch accuracy: 39.06% Validation loss: 13.08 Validation accuracy: 4.66% Epoch 10612: Minibatch loss: 1.88 Minibatch accuracy: 51.56% Validation loss: 13.61 Validation accuracy: 4.38% Epoch 11370: Minibatch loss: 1.86 Minibatch accuracy: 48.44% Validation loss: 14.26 Validation accuracy: 4.15% Epoch 12128: Minibatch loss: 1.61 Minibatch accuracy: 59.38% Validation loss: 14.72 Validation accuracy: 4.66% Epoch 12886: Minibatch loss: 1.59 Minibatch accuracy: 50.00% Validation loss: 15.18 Validation accuracy: 4.81% Epoch 13644: Minibatch loss: 1.73 Minibatch accuracy: 50.00% Validation loss: 15.63 Validation accuracy: 4.30% Epoch 14402: Minibatch loss: 1.88 Minibatch accuracy: 43.75% Validation loss: 16.10 Validation accuracy: 5.16% Epoch 15160: Minibatch loss: 1.67 Minibatch accuracy: 50.00% Validation loss: 16.53 Validation accuracy: 4.38% Epoch 15918: Minibatch loss: 1.55 Minibatch accuracy: 56.25% Validation loss: 16.99 Validation accuracy: 5.13% Epoch 16676: Minibatch loss: 1.67 Minibatch accuracy: 51.56% Validation loss: 17.41 Validation accuracy: 4.85% Epoch 17434: Minibatch loss: 1.40 Minibatch accuracy: 59.38% Validation loss: 17.66 Validation accuracy: 4.19% Epoch 18192: Minibatch loss: 1.62 Minibatch accuracy: 56.25% Validation loss: 18.04 Validation accuracy: 4.23% Epoch 18950: Minibatch loss: 1.13 Minibatch accuracy: 64.06% Validation loss: 18.41 Validation accuracy: 4.89% Epoch 19708: Minibatch loss: 0.95 Minibatch accuracy: 67.19% Validation loss: 18.53 Validation accuracy: 4.97% Epoch 20466: Minibatch loss: 1.22 Minibatch accuracy: 65.62% Validation loss: 18.77 Validation accuracy: 4.85% Epoch 21224: Minibatch loss: 1.29 Minibatch accuracy: 62.50% Validation loss: 19.25 Validation accuracy: 5.32% Epoch 21982: Minibatch loss: 0.93 Minibatch accuracy: 65.62% Validation loss: 19.49 Validation accuracy: 5.52% Epoch 22740: Minibatch loss: 1.45 Minibatch accuracy: 60.94% Validation loss: 19.73 Validation accuracy: 5.24% Epoch 23498: Minibatch loss: 1.09 Minibatch accuracy: 73.44% Validation loss: 20.00 Validation accuracy: 5.09% Epoch 24256: Minibatch loss: 1.08 Minibatch accuracy: 71.88% Validation loss: 20.20 Validation accuracy: 4.58% Epoch 25014: Minibatch loss: 0.84 Minibatch accuracy: 71.88% Validation loss: 20.39 Validation accuracy: 4.89% Epoch 25772: Minibatch loss: 0.73 Minibatch accuracy: 81.25% Validation loss: 20.64 Validation accuracy: 4.81% Epoch 26530: Minibatch loss: 1.15 Minibatch accuracy: 70.31% Validation loss: 20.91 Validation accuracy: 4.62% Epoch 27288: Minibatch loss: 1.08 Minibatch accuracy: 73.44% Validation loss: 21.11 Validation accuracy: 4.58% Epoch 28046: Minibatch loss: 1.05 Minibatch accuracy: 78.12% Validation loss: 21.31 Validation accuracy: 4.89% Epoch 28804: Minibatch loss: 1.01 Minibatch accuracy: 70.31% Validation loss: 21.46 Validation accuracy: 5.05% Epoch 29562: Minibatch loss: 1.03 Minibatch accuracy: 67.19% Validation loss: 21.61 Validation accuracy: 4.89% Epoch 30320: Minibatch loss: 1.13 Minibatch accuracy: 65.62% Validation loss: 21.83 Validation accuracy: 4.54% Epoch 31078: Minibatch loss: 0.84 Minibatch accuracy: 76.56% Validation loss: 22.04 Validation accuracy: 4.73% Epoch 31836: Minibatch loss: 0.69 Minibatch accuracy: 78.12% Validation loss: 22.03 Validation accuracy: 5.20% Epoch 32594: Minibatch loss: 0.61 Minibatch accuracy: 76.56% Validation loss: 22.31 Validation accuracy: 4.93% Epoch 33352: Minibatch loss: 0.52 Minibatch accuracy: 84.38% Validation loss: 22.53 Validation accuracy: 5.09% Epoch 34110: Minibatch loss: 0.49 Minibatch accuracy: 81.25% Validation loss: 22.70 Validation accuracy: 4.66% Epoch 34868: Minibatch loss: 0.68 Minibatch accuracy: 79.69% Validation loss: 22.72 Validation accuracy: 4.97% Epoch 35626: Minibatch loss: 0.46 Minibatch accuracy: 89.06% Validation loss: 22.80 Validation accuracy: 4.81% Epoch 36384: Minibatch loss: 0.45 Minibatch accuracy: 87.50% Validation loss: 23.07 Validation accuracy: 5.13% Epoch 37142: Minibatch loss: 0.77 Minibatch accuracy: 76.56% Validation loss: 23.15 Validation accuracy: 5.01% ###Markdown Define 8-layer NN with Adam, dropout, normalization, and 4096 nodes per hidden layer ###Code if model_type is 'ffnn': flattened_size = x_train.shape[1] n_hidden_nodes = 4096 graph = tf.Graph() with graph.as_default(): # input data tf_train_dataset, tf_train_labelset, tf_valid_dataset, tf_valid_labelset = \ def_input_data(batch_size, flattened_size, num_labels) #----- variables. layer_1_weights = weight_variable([flattened_size, n_hidden_nodes], name = "weights_1") layer_1_biases = bias_variable([n_hidden_nodes], name = "biases_1") layer_2_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_2") layer_2_biases = bias_variable([n_hidden_nodes], name = "biases_2") layer_3_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_3") layer_3_biases = bias_variable([n_hidden_nodes], name = "biases_3") layer_4_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_4") layer_4_biases = bias_variable([n_hidden_nodes], name = "biases_4") layer_5_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_5") layer_5_biases = bias_variable([n_hidden_nodes], name = "biases_5") layer_6_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_6") layer_6_biases = bias_variable([n_hidden_nodes], name = "biases_6") layer_7_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_7") layer_7_biases = bias_variable([n_hidden_nodes], name = "biases_7") layer_8_weights = weight_variable([n_hidden_nodes, num_labels], name = "weights_8") layer_8_biases = bias_variable([num_labels], name = "biases_8") keep_prob = tf.placeholder("float", name = "keep_prob") # model def model(data, name, proba = keep_prob): with tf.name_scope(name) as scope: layer_1 = tf.add(tf.matmul(data, layer_1_weights), layer_1_biases, name = "layer1") hidden_1 = tf.nn.dropout(tf.nn.relu(layer_1), proba, name = "dropout_1") norm_1 = tf.contrib.layers.batch_norm(hidden_1, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_2 = tf.add(tf.matmul(norm_1, layer_2_weights), layer_2_biases, name = "layer2") hidden_2 = tf.nn.dropout(tf.nn.relu(layer_2), proba, name = "dropout_2") norm_2 = tf.contrib.layers.batch_norm(hidden_2, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_3 = tf.add(tf.matmul(norm_2, layer_3_weights), layer_3_biases, name = "layer3") hidden_3 = tf.nn.dropout(tf.nn.relu(layer_3), proba, name = "dropout_3") norm_3 = tf.contrib.layers.batch_norm(hidden_3, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_4 = tf.add(tf.matmul(norm_3, layer_4_weights), layer_4_biases, name = "layer4") hidden_4 = tf.nn.dropout(tf.nn.relu(layer_4), proba, name = "dropout_4") norm_4 = tf.contrib.layers.batch_norm(hidden_4, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_5 = tf.add(tf.matmul(norm_4, layer_5_weights), layer_5_biases, name = "layer5") hidden_5 = tf.nn.dropout(tf.nn.relu(layer_5), proba, name = "dropout_5") norm_5 = tf.contrib.layers.batch_norm(hidden_5, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_6 = tf.add(tf.matmul(norm_5, layer_6_weights), layer_6_biases, name = "layer6") hidden_6 = tf.nn.dropout(tf.nn.relu(layer_6), proba, name = "dropout_6") norm_6 = tf.contrib.layers.batch_norm(hidden_6, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_7 = tf.add(tf.matmul(norm_6, layer_7_weights), layer_7_biases, name = "layer7") hidden_7 = tf.nn.dropout(tf.nn.relu(layer_7), proba, name = "dropout_7") norm_7 = tf.contrib.layers.batch_norm(hidden_7, center = True, scale = True, is_training = (keep_prob is not 1.0)) layer_8 = tf.add(tf.matmul(norm_7, layer_8_weights), layer_8_biases, name = "layer8") return layer_8 # training computation. logits = model(tf_train_dataset, "logits", keep_prob) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = tf_train_labelset), name = "loss") # optimizer. optimizer_0 = tf.train.AdamOptimizer(learning_rate = 1e-3, epsilon = 0.001) optimizer = optimizer_0.minimize(loss) # predictions on training and validation data train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset, name = "validation")) def run_session_8_layer_4096(num_batches, name): with tf.Session(graph = graph) as session: # initialize session merged, writer = my_init_session(session.graph) print("\nSession initialized\n") # run all 'epochs' for batch_num in range(num_batches): # define batches and input offset = (batch_num * batch_size) % (y_train.shape[0] - batch_size) batch_data = x_train[offset:(offset + batch_size), :] batch_labels = y_train[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labelset : batch_labels, keep_prob : 0.7 } # run this epoch _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict) # display loss and accuracy periodically display_interval = batches_per_epoch if (batch_num % display_interval == 0): acc, val_acc, valid_predict_proba = display_train_status(batch_num, l, predictions, batch_labels, use_prob = True) # package the training history valid_predict_proba = valid_prediction.eval({keep_prob: 1.0}) return(valid_predict_proba) if model_type is not 'cnn': batches_per_epoch = math.ceil(y_train.shape[0] / batch_size) num_batches = 50 * batches_per_epoch y_valid_predict_proba = run_session_8_layer_4096(num_batches = num_batches, name = "HW4 FFNN") ###Output Session initialized Epoch 0: Minibatch loss: 5.62 Minibatch accuracy: 0.00% Validation loss: 5.54 Validation accuracy: 1.06% Epoch 758: Minibatch loss: 5.51 Minibatch accuracy: 0.00% Validation loss: 6.08 Validation accuracy: 3.01% Epoch 1516: Minibatch loss: 5.26 Minibatch accuracy: 3.12% Validation loss: 6.38 Validation accuracy: 3.29% Epoch 2274: Minibatch loss: 4.38 Minibatch accuracy: 4.69% Validation loss: 7.17 Validation accuracy: 3.76% Epoch 3032: Minibatch loss: 4.35 Minibatch accuracy: 14.06% Validation loss: 8.37 Validation accuracy: 3.13% Epoch 3790: Minibatch loss: 3.04 Minibatch accuracy: 23.44% Validation loss: 10.62 Validation accuracy: 3.17% Epoch 4548: Minibatch loss: 2.60 Minibatch accuracy: 37.50% Validation loss: 12.66 Validation accuracy: 3.40% Epoch 5306: Minibatch loss: 2.13 Minibatch accuracy: 39.06% Validation loss: 14.40 Validation accuracy: 4.03% Epoch 6064: Minibatch loss: 1.38 Minibatch accuracy: 68.75% Validation loss: 16.23 Validation accuracy: 3.99% Epoch 6822: Minibatch loss: 1.21 Minibatch accuracy: 67.19% Validation loss: 17.75 Validation accuracy: 4.62% Epoch 7580: Minibatch loss: 1.24 Minibatch accuracy: 59.38% Validation loss: 18.60 Validation accuracy: 4.38% Epoch 8338: Minibatch loss: 0.59 Minibatch accuracy: 85.94% Validation loss: 19.62 Validation accuracy: 4.97% Epoch 9096: Minibatch loss: 0.67 Minibatch accuracy: 82.81% Validation loss: 20.40 Validation accuracy: 5.05% Epoch 9854: Minibatch loss: 0.57 Minibatch accuracy: 79.69% Validation loss: 21.15 Validation accuracy: 5.20% Epoch 10612: Minibatch loss: 0.67 Minibatch accuracy: 87.50% Validation loss: 21.91 Validation accuracy: 4.69% Epoch 11370: Minibatch loss: 0.34 Minibatch accuracy: 92.19% Validation loss: 22.32 Validation accuracy: 5.16% Epoch 12128: Minibatch loss: 0.44 Minibatch accuracy: 89.06% Validation loss: 22.90 Validation accuracy: 4.73% Epoch 12886: Minibatch loss: 0.37 Minibatch accuracy: 87.50% Validation loss: 23.16 Validation accuracy: 4.81% Epoch 13644: Minibatch loss: 0.27 Minibatch accuracy: 92.19% Validation loss: 23.47 Validation accuracy: 4.50% Epoch 14402: Minibatch loss: 0.44 Minibatch accuracy: 85.94% Validation loss: 23.85 Validation accuracy: 4.93% Epoch 15160: Minibatch loss: 0.32 Minibatch accuracy: 89.06% Validation loss: 24.27 Validation accuracy: 4.73% Epoch 15918: Minibatch loss: 0.37 Minibatch accuracy: 87.50% Validation loss: 24.48 Validation accuracy: 4.69% Epoch 16676: Minibatch loss: 0.21 Minibatch accuracy: 90.62% Validation loss: 24.62 Validation accuracy: 4.50% Epoch 17434: Minibatch loss: 0.58 Minibatch accuracy: 81.25% Validation loss: 24.95 Validation accuracy: 4.77% Epoch 18192: Minibatch loss: 0.25 Minibatch accuracy: 90.62% Validation loss: 25.10 Validation accuracy: 4.81% Epoch 18950: Minibatch loss: 0.26 Minibatch accuracy: 90.62% Validation loss: 25.38 Validation accuracy: 4.89% Epoch 19708: Minibatch loss: 0.14 Minibatch accuracy: 96.88% Validation loss: 25.42 Validation accuracy: 4.89% Epoch 20466: Minibatch loss: 0.32 Minibatch accuracy: 90.62% Validation loss: 25.44 Validation accuracy: 4.58% Epoch 21224: Minibatch loss: 0.62 Minibatch accuracy: 92.19% Validation loss: 25.62 Validation accuracy: 4.69% Epoch 21982: Minibatch loss: 0.16 Minibatch accuracy: 95.31% Validation loss: 26.02 Validation accuracy: 4.58% Epoch 22740: Minibatch loss: 0.27 Minibatch accuracy: 90.62% Validation loss: 26.12 Validation accuracy: 4.58% Epoch 23498: Minibatch loss: 0.21 Minibatch accuracy: 92.19% Validation loss: 26.07 Validation accuracy: 4.93% Epoch 24256: Minibatch loss: 0.09 Minibatch accuracy: 96.88% Validation loss: 26.37 Validation accuracy: 4.81% Epoch 25014: Minibatch loss: 0.15 Minibatch accuracy: 96.88% Validation loss: 26.31 Validation accuracy: 4.89% Epoch 25772: Minibatch loss: 0.07 Minibatch accuracy: 96.88% Validation loss: 26.61 Validation accuracy: 4.73% Epoch 26530: Minibatch loss: 0.16 Minibatch accuracy: 96.88% Validation loss: 26.68 Validation accuracy: 5.01% Epoch 27288: Minibatch loss: 0.30 Minibatch accuracy: 93.75% Validation loss: 26.77 Validation accuracy: 4.81% Epoch 28046: Minibatch loss: 0.05 Minibatch accuracy: 98.44% Validation loss: 26.89 Validation accuracy: 5.01% Epoch 28804: Minibatch loss: 0.05 Minibatch accuracy: 98.44% Validation loss: 26.83 Validation accuracy: 4.97% Epoch 29562: Minibatch loss: 0.17 Minibatch accuracy: 96.88% Validation loss: 26.82 Validation accuracy: 4.97% Epoch 30320: Minibatch loss: 0.22 Minibatch accuracy: 95.31% Validation loss: 27.02 Validation accuracy: 4.97% Epoch 31078: Minibatch loss: 0.06 Minibatch accuracy: 96.88% Validation loss: 27.03 Validation accuracy: 5.13% Epoch 31836: Minibatch loss: 0.06 Minibatch accuracy: 98.44% Validation loss: 27.28 Validation accuracy: 5.32% Epoch 32594: Minibatch loss: 0.30 Minibatch accuracy: 93.75% Validation loss: 27.13 Validation accuracy: 5.28% Epoch 33352: Minibatch loss: 0.08 Minibatch accuracy: 96.88% Validation loss: 27.31 Validation accuracy: 5.01% Epoch 34110: Minibatch loss: 0.11 Minibatch accuracy: 98.44% Validation loss: 27.57 Validation accuracy: 4.97% Epoch 34868: Minibatch loss: 0.03 Minibatch accuracy: 98.44% Validation loss: 27.63 Validation accuracy: 4.97% Epoch 35626: Minibatch loss: 0.09 Minibatch accuracy: 96.88% Validation loss: 27.60 Validation accuracy: 4.97% Epoch 36384: Minibatch loss: 0.24 Minibatch accuracy: 95.31% Validation loss: 27.62 Validation accuracy: 5.01% Epoch 37142: Minibatch loss: 0.15 Minibatch accuracy: 95.31% Validation loss: 27.81 Validation accuracy: 4.73% ###Markdown Final Model: define 8-layer NN with Adam, dropout, batch normalization, 4096 nodes, pyramidal architecture, and 64 principal components ###Code # help address overfitting by reducing number of principal components used n_pcs_to_use = 64 x_train = x_train[:, :n_pcs_to_use] x_valid = x_valid[:, :n_pcs_to_use] if model_type is 'ffnn': flattened_size = x_train.shape[1] n_hidden_nodes = 4096 graph = tf.Graph() with graph.as_default(): # input data tf_train_dataset, tf_train_labelset, tf_valid_dataset, tf_valid_labelset = \ def_input_data(batch_size, flattened_size, num_labels) #----- variables. layer_1_weights = weight_variable([flattened_size, n_hidden_nodes], name = "weights_1") layer_1_biases = bias_variable([n_hidden_nodes], name = "biases_1") layer_2_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_2") layer_2_biases = bias_variable([n_hidden_nodes], name = "biases_2") layer_3_weights = weight_variable([n_hidden_nodes, n_hidden_nodes], name = "weights_3") layer_3_biases = bias_variable([n_hidden_nodes], name = "biases_3") layer_4_weights = weight_variable([int(n_hidden_nodes), int(n_hidden_nodes/2)], name = "weights_4") layer_4_biases = bias_variable([int(n_hidden_nodes/2)], name = "biases_4") layer_5_weights = weight_variable([int(n_hidden_nodes/2), int(n_hidden_nodes/4)], name = "weights_5") layer_5_biases = bias_variable([int(n_hidden_nodes//4)], name = "biases_5") layer_6_weights = weight_variable([int(n_hidden_nodes/4), int(n_hidden_nodes/8)], name = "weights_6") layer_6_biases = bias_variable([int(n_hidden_nodes/8)], name = "biases_6") layer_7_weights = weight_variable([int(n_hidden_nodes/8), int(n_hidden_nodes/16)], name = "weights_7") layer_7_biases = bias_variable([int(n_hidden_nodes/16)], name = "biases_7") layer_8_weights = weight_variable([int(n_hidden_nodes/16), num_labels], name = "weights_8") layer_8_biases = bias_variable([num_labels], name = "biases_8") keep_prob = tf.placeholder("float", name = "keep_prob") # model def model(data, name, proba = 1.0): with tf.name_scope(name) as scope: layer_1 = tf.add(tf.matmul(data, layer_1_weights), layer_1_biases, name = "layer1") hidden_1 = tf.nn.dropout(tf.nn.relu(layer_1), proba, name = "dropout_1") norm_1 = tf.contrib.layers.batch_norm(hidden_1, center = True, scale = True, is_training = (proba is not 1.0)) layer_2 = tf.add(tf.matmul(norm_1, layer_2_weights), layer_2_biases, name = "layer2") hidden_2 = tf.nn.dropout(tf.nn.relu(layer_2), proba, name = "dropout_2") norm_2 = tf.contrib.layers.batch_norm(hidden_2, center = True, scale = True, is_training = (proba is not 1.0)) layer_3 = tf.add(tf.matmul(norm_2, layer_3_weights), layer_3_biases, name = "layer3") hidden_3 = tf.nn.dropout(tf.nn.relu(layer_3), proba, name = "dropout_3") norm_3 = tf.contrib.layers.batch_norm(hidden_3, center = True, scale = True, is_training = (proba is not 1.0)) layer_4 = tf.add(tf.matmul(norm_3, layer_4_weights), layer_4_biases, name = "layer4") hidden_4 = tf.nn.dropout(tf.nn.relu(layer_4), proba, name = "dropout_4") norm_4 = tf.contrib.layers.batch_norm(hidden_4, center = True, scale = True, is_training = (proba is not 1.0)) layer_5 = tf.add(tf.matmul(norm_4, layer_5_weights), layer_5_biases, name = "layer5") hidden_5 = tf.nn.dropout(tf.nn.relu(layer_5), proba, name = "dropout_5") norm_5 = tf.contrib.layers.batch_norm(hidden_5, center = True, scale = True, is_training = (proba is not 1.0)) layer_6 = tf.add(tf.matmul(norm_5, layer_6_weights), layer_6_biases, name = "layer6") hidden_6 = tf.nn.dropout(tf.nn.relu(layer_6), proba, name = "dropout_6") norm_6 = tf.contrib.layers.batch_norm(hidden_6, center = True, scale = True, is_training = (proba is not 1.0)) layer_7 = tf.add(tf.matmul(norm_6, layer_7_weights), layer_7_biases, name = "layer7") hidden_7 = tf.nn.dropout(tf.nn.relu(layer_7), proba, name = "dropout_7") norm_7 = tf.contrib.layers.batch_norm(hidden_7, center = True, scale = True, is_training = (proba is not 1.0)) layer_8 = tf.add(tf.matmul(norm_7, layer_8_weights), layer_8_biases, name = "layer8") return layer_8 # training computation. logits = model(tf_train_dataset, "logits", keep_prob) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = tf_train_labelset), name = "loss") # optimizer. optimizer_0 = tf.train.AdamOptimizer(learning_rate = 1e-4, epsilon = 0.001) optimizer = optimizer_0.minimize(loss) # predictions on training and validation data train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset, name = "validation", proba = 1.0)) def run_session_final_model(num_batches, name, k_prob = 1.0): # define the training history accumulators acc_hist, loss_hist, lr_hist, val_acc_hist, val_loss_hist = ([] for i in range(5)) with tf.Session(graph = graph) as session: # initialize session merged, writer = my_init_session(session.graph) print("\nSession initialized\n") # run all 'epochs' for batch_num in range(num_batches): # define batches and input offset = (batch_num * batch_size) % (y_train.shape[0] - batch_size) batch_data = x_train[offset:(offset + batch_size), :] batch_labels = y_train[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labelset : batch_labels, keep_prob : k_prob} # run this epoch _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict) # display loss and accuracy periodically # display_interval = 10 * round(y_train.shape[0] / batch_size) display_interval = batches_per_epoch if (batch_num % display_interval == 0): acc, val_acc, valid_predict_proba = \ display_train_status(batch_num / batches_per_epoch, l, predictions, batch_labels, use_prob = True) # accumulate training history # lr = session.run(optimizer_0._learning_rate_tensor) # SGD acc_hist.append(acc) loss_hist.append(l) lr_hist.append(session.run(optimizer_0._lr_t) ) val_acc_hist.append(val_acc) val_loss_hist.append(log_loss(y_valid, valid_predict_proba)) # package the training history valid_predict_proba = valid_prediction.eval({keep_prob: 1.0}) training_history = {'acc':acc_hist, 'loss':loss_hist, 'lr':lr_hist, 'val_acc':val_acc_hist, 'val_loss':val_loss_hist} return(valid_predict_proba, training_history) if model_type is not 'cnn': batches_per_epoch = math.ceil(y_train.shape[0] / batch_size) num_batches = 100 * batches_per_epoch y_valid_predict_proba, training_history = run_session_final_model(num_batches = num_batches, name = "HW4 FFNN", k_prob = 0.7) ###Output Session initialized Epoch 0: Minibatch loss: 5.60 Minibatch accuracy: 0.00% Validation loss: 5.55 Validation accuracy: 0.82% Epoch 1: Minibatch loss: 4.94 Minibatch accuracy: 1.56% Validation loss: 5.00 Validation accuracy: 2.58% Epoch 2: Minibatch loss: 4.87 Minibatch accuracy: 3.12% Validation loss: 4.82 Validation accuracy: 2.82% Epoch 3: Minibatch loss: 4.53 Minibatch accuracy: 3.12% Validation loss: 4.74 Validation accuracy: 3.48% Epoch 4: Minibatch loss: 4.45 Minibatch accuracy: 4.69% Validation loss: 4.74 Validation accuracy: 3.79% Epoch 5: Minibatch loss: 4.60 Minibatch accuracy: 6.25% Validation loss: 4.73 Validation accuracy: 4.11% Epoch 6: Minibatch loss: 4.32 Minibatch accuracy: 9.38% Validation loss: 4.76 Validation accuracy: 4.62% Epoch 7: Minibatch loss: 4.28 Minibatch accuracy: 3.12% Validation loss: 4.76 Validation accuracy: 4.38% Epoch 8: Minibatch loss: 4.27 Minibatch accuracy: 6.25% Validation loss: 4.80 Validation accuracy: 4.62% Epoch 9: Minibatch loss: 4.21 Minibatch accuracy: 4.69% Validation loss: 4.82 Validation accuracy: 4.38% Epoch 10: Minibatch loss: 4.47 Minibatch accuracy: 1.56% Validation loss: 4.87 Validation accuracy: 4.46% Epoch 11: Minibatch loss: 4.23 Minibatch accuracy: 9.38% Validation loss: 4.90 Validation accuracy: 4.62% Epoch 12: Minibatch loss: 4.06 Minibatch accuracy: 12.50% Validation loss: 4.95 Validation accuracy: 4.73% Epoch 13: Minibatch loss: 3.73 Minibatch accuracy: 9.38% Validation loss: 5.00 Validation accuracy: 4.97% Epoch 14: Minibatch loss: 3.67 Minibatch accuracy: 7.81% Validation loss: 5.06 Validation accuracy: 4.69% Epoch 15: Minibatch loss: 3.87 Minibatch accuracy: 9.38% Validation loss: 5.12 Validation accuracy: 5.05% Epoch 16: Minibatch loss: 3.65 Minibatch accuracy: 18.75% Validation loss: 5.18 Validation accuracy: 5.52% Epoch 17: Minibatch loss: 3.57 Minibatch accuracy: 15.62% Validation loss: 5.24 Validation accuracy: 5.16% Epoch 18: Minibatch loss: 3.74 Minibatch accuracy: 9.38% Validation loss: 5.33 Validation accuracy: 5.40% Epoch 19: Minibatch loss: 3.41 Minibatch accuracy: 20.31% Validation loss: 5.41 Validation accuracy: 5.28% Epoch 20: Minibatch loss: 3.30 Minibatch accuracy: 18.75% Validation loss: 5.50 Validation accuracy: 5.01% Epoch 21: Minibatch loss: 3.31 Minibatch accuracy: 31.25% Validation loss: 5.59 Validation accuracy: 5.24% Epoch 22: Minibatch loss: 3.11 Minibatch accuracy: 28.12% Validation loss: 5.71 Validation accuracy: 5.01% Epoch 23: Minibatch loss: 3.02 Minibatch accuracy: 25.00% Validation loss: 5.79 Validation accuracy: 5.36% Epoch 24: Minibatch loss: 3.37 Minibatch accuracy: 18.75% Validation loss: 5.90 Validation accuracy: 5.24% Epoch 25: Minibatch loss: 2.84 Minibatch accuracy: 28.12% Validation loss: 5.99 Validation accuracy: 4.81% Epoch 26: Minibatch loss: 2.56 Minibatch accuracy: 39.06% Validation loss: 6.11 Validation accuracy: 5.13% Epoch 27: Minibatch loss: 2.95 Minibatch accuracy: 28.12% Validation loss: 6.20 Validation accuracy: 4.85% Epoch 28: Minibatch loss: 2.65 Minibatch accuracy: 37.50% Validation loss: 6.31 Validation accuracy: 5.01% Epoch 29: Minibatch loss: 2.33 Minibatch accuracy: 37.50% Validation loss: 6.42 Validation accuracy: 5.48% Epoch 30: Minibatch loss: 2.81 Minibatch accuracy: 26.56% Validation loss: 6.55 Validation accuracy: 5.79% Epoch 31: Minibatch loss: 2.34 Minibatch accuracy: 46.88% Validation loss: 6.66 Validation accuracy: 5.44% Epoch 32: Minibatch loss: 2.21 Minibatch accuracy: 39.06% Validation loss: 6.78 Validation accuracy: 5.52% Epoch 33: Minibatch loss: 1.80 Minibatch accuracy: 50.00% Validation loss: 6.89 Validation accuracy: 5.40% Epoch 34: Minibatch loss: 2.02 Minibatch accuracy: 42.19% Validation loss: 7.02 Validation accuracy: 5.44% Epoch 35: Minibatch loss: 1.83 Minibatch accuracy: 54.69% Validation loss: 7.10 Validation accuracy: 5.71% Epoch 36: Minibatch loss: 1.96 Minibatch accuracy: 50.00% Validation loss: 7.24 Validation accuracy: 5.48% Epoch 37: Minibatch loss: 1.69 Minibatch accuracy: 50.00% Validation loss: 7.35 Validation accuracy: 5.36% Epoch 38: Minibatch loss: 1.32 Minibatch accuracy: 59.38% Validation loss: 7.49 Validation accuracy: 5.59% Epoch 39: Minibatch loss: 1.24 Minibatch accuracy: 67.19% Validation loss: 7.59 Validation accuracy: 5.44% Epoch 40: Minibatch loss: 1.05 Minibatch accuracy: 76.56% Validation loss: 7.69 Validation accuracy: 5.63% Epoch 41: Minibatch loss: 1.65 Minibatch accuracy: 56.25% Validation loss: 7.82 Validation accuracy: 5.67% Epoch 42: Minibatch loss: 1.06 Minibatch accuracy: 76.56% Validation loss: 7.94 Validation accuracy: 5.75% Epoch 43: Minibatch loss: 1.10 Minibatch accuracy: 64.06% Validation loss: 8.04 Validation accuracy: 5.83% Epoch 44: Minibatch loss: 0.88 Minibatch accuracy: 73.44% Validation loss: 8.17 Validation accuracy: 5.91% Epoch 45: Minibatch loss: 0.96 Minibatch accuracy: 71.88% Validation loss: 8.27 Validation accuracy: 5.79% Epoch 46: Minibatch loss: 0.62 Minibatch accuracy: 75.00% Validation loss: 8.40 Validation accuracy: 5.52% Epoch 47: Minibatch loss: 0.57 Minibatch accuracy: 81.25% Validation loss: 8.50 Validation accuracy: 5.71% Epoch 48: Minibatch loss: 0.83 Minibatch accuracy: 78.12% Validation loss: 8.59 Validation accuracy: 5.79% Epoch 49: Minibatch loss: 0.49 Minibatch accuracy: 87.50% Validation loss: 8.69 Validation accuracy: 5.59% Epoch 50: Minibatch loss: 0.49 Minibatch accuracy: 85.94% Validation loss: 8.74 Validation accuracy: 5.52% Epoch 51: Minibatch loss: 0.40 Minibatch accuracy: 87.50% Validation loss: 8.83 Validation accuracy: 5.79% Epoch 52: Minibatch loss: 0.28 Minibatch accuracy: 90.62% Validation loss: 8.91 Validation accuracy: 5.48% Epoch 53: Minibatch loss: 0.33 Minibatch accuracy: 93.75% Validation loss: 8.99 Validation accuracy: 5.67% Epoch 54: Minibatch loss: 0.45 Minibatch accuracy: 82.81% Validation loss: 9.07 Validation accuracy: 5.63% Epoch 55: Minibatch loss: 0.24 Minibatch accuracy: 93.75% Validation loss: 9.18 Validation accuracy: 5.67% Epoch 56: Minibatch loss: 0.21 Minibatch accuracy: 96.88% Validation loss: 9.17 Validation accuracy: 5.28% Epoch 57: Minibatch loss: 0.33 Minibatch accuracy: 90.62% Validation loss: 9.27 Validation accuracy: 5.44% Epoch 58: Minibatch loss: 0.49 Minibatch accuracy: 87.50% Validation loss: 9.34 Validation accuracy: 5.67% Epoch 59: Minibatch loss: 0.36 Minibatch accuracy: 92.19% Validation loss: 9.48 Validation accuracy: 5.48% Epoch 60: Minibatch loss: 0.33 Minibatch accuracy: 93.75% Validation loss: 9.46 Validation accuracy: 5.63% Epoch 61: Minibatch loss: 0.17 Minibatch accuracy: 92.19% Validation loss: 9.53 Validation accuracy: 5.13% Epoch 62: Minibatch loss: 0.24 Minibatch accuracy: 95.31% Validation loss: 9.56 Validation accuracy: 5.24% Epoch 63: Minibatch loss: 0.26 Minibatch accuracy: 90.62% Validation loss: 9.64 Validation accuracy: 5.24% Epoch 64: Minibatch loss: 0.12 Minibatch accuracy: 95.31% Validation loss: 9.71 Validation accuracy: 5.32% Epoch 65: Minibatch loss: 0.18 Minibatch accuracy: 95.31% Validation loss: 9.69 Validation accuracy: 5.91% Epoch 66: Minibatch loss: 0.12 Minibatch accuracy: 96.88% Validation loss: 9.75 Validation accuracy: 5.75% ###Markdown 5. Train CNN Model in Keras (please see other notebook) 6. Score FFNN model using validation data Metrics below show the classification results from a variety of perspectives, useful in tuning:* multiclass log loss* categorical accuracy* naive classification* F1 score* Hamming loss* detailed classification reportWhile classification effectiveness was limited for this model, its best accuracy of 5.99% was well above that of a naive classifier (below), whose accuracy was only 0.82%. In addition, the metrics in this section provide a solid basis for future work. ###Code ### prepare metrics calculations valid_num_images = y_valid.shape[0] # convert y_valid to array format y_valid_vector = one_hot_decode(y_valid) # convert predicted probabilities to binary 2D array y_valid_predict_proba_binary = np.zeros([y_valid.shape[0], num_labels]) for i in range(y_valid.shape[0]): y_valid_predict_proba_binary[i, y_valid_vector[i]] = 1 # convert predicted probabilities to binary 1D array using softmax y_valid_predict = np.argmax(y_valid_predict_proba, axis = -1) ###Output _____no_output_____ ###Markdown Multiclass log loss and categorical accuracy ###Code # accuracy code adapted from: # https://datascience.stackexchange.com/questions/14415/how-does-keras-calculate-accuracy loss = log_loss(y_valid, y_valid_predict_proba) my_accuracy = categorical_accuracy(y_valid, y_valid_predict_proba) print() print('Multiclass Log Loss:', round(loss, 4)) print('Categorical Accuracy:', round(my_accuracy, 4)) ###Output Multiclass Log Loss: 10.5707 Categorical Accuracy: 0.0544 ###Markdown Calculate true/false positive rates ###Code fprs = [] tprs = [] for i in range(y_valid.shape[1]): fpr, tpr, threshold = roc_curve(y_valid[:, i], y_valid_predict_proba[:, i]) fprs.append(fpr) tprs.append(tpr) ###Output _____no_output_____ ###Markdown Accuracy of naive classifier that predicts randomly ###Code y_naive_predict = np.floor(np.random.random(valid_num_images) / (1.0 / num_labels)) valid_naive_accu = np.sum(y_valid_vector == y_naive_predict) / valid_num_images print() print('Accuracy of naive classifier that predicts randomly:', round(valid_naive_accu, 4)) ###Output Accuracy of naive classifier that predicts randomly: 0.0059 ###Markdown F1 Score ###Code my_f1 = f1_score(y_valid_vector, y_valid_predict, average = 'macro') print() print('F1 score:', round(my_f1, 4)) ###Output F1 score: 0.0483 ###Markdown Hamming Loss ###Code my_hamming_loss = hamming_loss(y_valid_vector, y_valid_predict) print() print('Hamming loss:', round(my_hamming_loss, 4)) ###Output Hamming loss: 0.9456 ###Markdown Classification Report ###Code report = classification_report(y_valid_vector, y_valid_predict, labels = encoder_labels, target_names = encoder_names) print() print(report) ###Output precision recall f1-score support Affenpinscher 0.12 0.17 0.14 18 Afghan_Hound 0.33 0.12 0.18 25 African_Hunting_Dog 0.03 0.04 0.03 27 Airedale 0.08 0.05 0.06 22 American_Staffordshire_Terrier 0.00 0.00 0.00 13 Appenzeller 0.04 0.06 0.05 17 Australian_Terrier 0.06 0.04 0.05 25 Basenji 0.00 0.00 0.00 29 Basset 0.08 0.06 0.07 17 Beagle 0.05 0.03 0.04 31 Bedlington_Terrier 0.23 0.24 0.23 21 Bernese_Mountain_Dog 0.12 0.14 0.13 29 Black-And-Tan_Coonhound 0.03 0.06 0.04 16 Blenheim_Spaniel 0.00 0.00 0.00 26 Bloodhound 0.09 0.04 0.05 28 Bluetick 0.10 0.09 0.09 23 Border_Collie 0.03 0.06 0.04 17 Border_Terrier 0.06 0.04 0.05 26 Borzoi 0.07 0.05 0.06 20 Boston_Bull 0.00 0.00 0.00 22 Bouvier_Des_Flandres 0.22 0.23 0.22 22 Boxer 0.00 0.00 0.00 22 Brabancon_Griffon 0.00 0.00 0.00 18 Briard 0.00 0.00 0.00 14 Brittany_Spaniel 0.00 0.00 0.00 25 Bull_Mastiff 0.00 0.00 0.00 18 Cairn 0.00 0.00 0.00 26 Cardigan 0.00 0.00 0.00 17 Chesapeake_Bay_Retriever 0.00 0.00 0.00 23 Chihuahua 0.00 0.00 0.00 20 Chow 0.00 0.00 0.00 15 Clumber 0.10 0.12 0.11 24 Cocker_Spaniel 0.00 0.00 0.00 25 Collie 0.00 0.00 0.00 21 Curly-Coated_Retriever 0.08 0.22 0.11 18 Dandie_Dinmont 0.07 0.07 0.07 27 Dhole 0.08 0.26 0.12 19 Dingo 0.00 0.00 0.00 13 Doberman 0.03 0.06 0.04 18 English_Foxhound 0.17 0.19 0.18 26 English_Setter 0.13 0.09 0.11 23 English_Springer 0.07 0.05 0.06 21 Entlebucher 0.14 0.11 0.12 36 Eskimo_Dog 0.00 0.00 0.00 16 Flat-Coated_Retriever 0.08 0.09 0.08 23 French_Bulldog 0.00 0.00 0.00 19 German_Shepherd 0.00 0.00 0.00 12 German_Short-Haired_Pointer 0.00 0.00 0.00 23 Giant_Schnauzer 0.03 0.04 0.03 27 Golden_Retriever 0.00 0.00 0.00 15 Gordon_Setter 0.06 0.08 0.07 25 Great_Dane 0.00 0.00 0.00 18 Great_Pyrenees 0.03 0.04 0.03 24 Greater_Swiss_Mountain_Dog 0.06 0.06 0.06 17 Groenendael 0.11 0.17 0.13 23 Ibizan_Hound 0.04 0.04 0.04 23 Irish_Setter 0.00 0.00 0.00 23 Irish_Terrier 0.00 0.00 0.00 20 Irish_Water_Spaniel 0.06 0.12 0.08 17 Irish_Wolfhound 0.00 0.00 0.00 24 Italian_Greyhound 0.00 0.00 0.00 21 Japanese_Spaniel 0.14 0.14 0.14 28 Keeshond 0.00 0.00 0.00 21 Kelpie 0.07 0.04 0.05 24 Kerry_Blue_Terrier 0.12 0.13 0.12 23 Komondor 0.04 0.05 0.05 20 Kuvasz 0.02 0.08 0.04 12 Labrador_Retriever 0.00 0.00 0.00 18 Lakeland_Terrier 0.31 0.16 0.21 25 Leonberg 0.18 0.15 0.16 27 Lhasa 0.00 0.00 0.00 19 Malamute 0.03 0.05 0.04 21 Malinois 0.06 0.12 0.08 17 Maltese_Dog 0.06 0.07 0.06 29 Mexican_Hairless 0.14 0.13 0.13 23 Miniature_Pinscher 0.00 0.00 0.00 26 Miniature_Poodle 0.06 0.05 0.05 19 Miniature_Schnauzer 0.05 0.04 0.04 25 Newfoundland 0.00 0.00 0.00 17 Norfolk_Terrier 0.10 0.13 0.11 15 Norwegian_Elkhound 0.06 0.04 0.05 24 Norwich_Terrier 0.00 0.00 0.00 19 Old_English_Sheepdog 0.00 0.00 0.00 21 Otterhound 0.05 0.05 0.05 19 Papillon 0.07 0.11 0.09 18 Pekinese 0.12 0.10 0.11 21 Pembroke 0.00 0.00 0.00 24 Pomeranian 0.00 0.00 0.00 28 Pug 0.00 0.00 0.00 23 Redbone 0.00 0.00 0.00 18 Rhodesian_Ridgeback 0.06 0.04 0.05 27 Rottweiler 0.06 0.12 0.08 17 Saint_Bernard 0.08 0.11 0.09 19 Saluki 0.05 0.04 0.04 28 Samoyed 0.12 0.08 0.10 24 Schipperke 0.04 0.04 0.04 23 Scotch_Terrier 0.03 0.05 0.04 20 Scottish_Deerhound 0.05 0.04 0.04 25 Sealyham_Terrier 0.13 0.30 0.18 20 Shetland_Sheepdog 0.00 0.00 0.00 21 Shih-Tzu 0.04 0.04 0.04 26 Siberian_Husky 0.00 0.00 0.00 25 Silky_Terrier 0.00 0.00 0.00 30 Soft-Coated_Wheaten_Terrier 0.00 0.00 0.00 12 Staffordshire_Bullterrier 0.00 0.00 0.00 21 Standard_Poodle 0.12 0.07 0.09 15 Standard_Schnauzer 0.00 0.00 0.00 17 Sussex_Spaniel 0.06 0.08 0.07 12 Tibetan_Mastiff 0.07 0.12 0.09 17 Tibetan_Terrier 0.00 0.00 0.00 23 Toy_Poodle 0.00 0.00 0.00 25 Toy_Terrier 0.10 0.13 0.12 23 Vizsla 0.00 0.00 0.00 19 Walker_Hound 0.00 0.00 0.00 18 Weimaraner 0.00 0.00 0.00 26 Welsh_Springer_Spaniel 0.00 0.00 0.00 13 West_Highland_White_Terrier 0.03 0.05 0.04 20 Whippet 0.02 0.04 0.03 25 Wire-Haired_Fox_Terrier 0.09 0.11 0.10 18 Yorkshire_Terrier 0.00 0.00 0.00 10 avg / total 0.05 0.05 0.05 2556 ###Markdown 7. Score CNN model using validation data (please see other notebook) 8. Visualize FFNN The classification accuracy graph below confirms the overfitting diagnosis mentioned above - training accuracy rises, while validation accuracy remains level througout. The spikiness of the training accuracy curve suggests an opportunity for reducing the learning rate as well.The ROC curve shows some progress in detecting signal, in that the curve is not entirely flat. At the same time, the area under the curve is not large enough to be useful for practical purposes. Training process ###Code ### plot training process - accuracy print() fig = plt.figure(figsize = (8, 6)) ax = fig.add_subplot(1, 1, 1) ax.plot(training_history['acc'], color = 'red', label = 'Training Data') ax.plot(training_history['val_acc'], color = 'blue', label = 'Validation Data') ax.set_ylim(0, 1) ax.axvline(x=np.argmax(training_history['val_acc']),linewidth=1, linestyle='dashed', label = 'Best Model', color='grey' ) ax.legend(loc = 'upper left') ax.set_xlabel("Epoch") ax.set_ylabel("Accuracy") ax.set_title("Classification Accuracy") plt.show() ###Output ###Markdown ROC curve for sample class ###Code # adapted from http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html def display_roc_curve(y, predict_y_proba): fp_rate1, tp_rate1, _ = roc_curve(y, predict_y_proba) roc_auc1 = auc(fp_rate1, tp_rate1) print() plt.figure(figsize = (6, 6)) plt.title('ROC AUC (Class = 8)') plt.plot(fp_rate1, tp_rate1, 'b', label='AUC = %0.2f'% roc_auc1) plt.legend(loc='lower right') plt.plot([0,1],[0,1],'r--', lw = 1) plt.xlim([-0.1, 1.2]) plt.ylim([-0.1, 1.2]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() ### ROC curve for sample class class_num = 8 display_roc_curve(y_valid[:, class_num], y_valid_predict_proba[:, class_num]) ###Output
_episodes_pynb/XX-visualizing-the-distribution-of-a-dataset.ipynb
###Markdown Visualizing the distribution of a datasetFor visualization purposes we are going to combine the power of Pandas together with a very useful plotting library called [Seaborn](https://seaborn.pydata.org/index.html).Typically, when working with Seaborn we first import a number of different libraries for numerical analysis, statistics and plotting. ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) ###Output _____no_output_____ ###Markdown We are going to work with an example of a bivariate distribution of two variables. ###Code mean, cov = [0, 1], [(1, .5), (.5, 1)] data = np.random.multivariate_normal(mean, cov, 200) df = pd.DataFrame(data, columns=["x", "y"]) ###Output _____no_output_____ ###Markdown We can explore the distribution of each of the two variables in many different ways, either using histograms, but also Kernel density estimates or rugplots ###Code sns.distplot(df.x); sns.distplot(df.x, kde=False, rug=True); ###Output _____no_output_____ ###Markdown Plotting bivariate distributionsBut some of the most interesting features come when we start looking at the correlations present in the data. ###Code sns.jointplot(x="x", y="y", data=df); sns.jointplot(x="x", y="y", data=df, kind="kde"); f, ax = plt.subplots(figsize=(6, 6)) sns.kdeplot(df.x, df.y, ax=ax) sns.rugplot(df.x, color="g", ax=ax) sns.rugplot(df.y, vertical=True, ax=ax); ###Output _____no_output_____ ###Markdown If we have a more complicated dataset with more variables in it we will be able to exploit Seaborn's capabilities even further. ###Code iris = sns.load_dataset("iris") sns.pairplot(iris); g = sns.PairGrid(iris) g.map_diag(sns.kdeplot) g.map_offdiag(sns.kdeplot, cmap="Blues_d", n_levels=6); ###Output No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. /home/daviddesancho/anaconda3/lib/python3.6/site-packages/matplotlib/contour.py:967: UserWarning: The following kwargs were not used by contour: 'label', 'color' s)
05 Classification.ipynb
###Markdown Distribution of the three classes (Species) in this problem is euqally distributed. Accuracy would a good measure of the performance for model evaluation. ###Code iris.info() species = iris.Species.unique() colors = sns.color_palette("hls", 3) for i, v in enumerate(species): df = iris[iris.Species == v] plt.scatter(df["PetalLength"],df["SepalLength"], color = colors[i], label = v) plt.legend(loc = "upper left") plt.xlabel("Petal Length") plt.ylabel("Sepal Length") y = np.where(iris.Species == "Iris-virginica", 1, 0) y ###Output _____no_output_____ ###Markdown Sigmoid Activation Function ###Code p = np.linspace(-7, 7, 100) def phi(p): return 1 / (1 + np.exp(-p)) plt.plot(p, phi(p)) plt.xlabel("Linear regression output") plt.ylabel("Sigmoid") plt.title("Representation of Probability of prediction of 1") from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, roc_auc_score from mlxtend.plotting import plot_decision_regions X = iris.iloc[:, [2, 0]].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = 340) X_train.shape lr = LogisticRegression() lr.fit(X_train, y_train) y_test_pred = lr.predict(X_test) outcome = pd.DataFrame({"actual": y_test,"pred": y_test_pred}) outcome["match"] = outcome.actual == outcome.pred outcome accuracy_score(y_test, y_test_pred) plt.figure(figsize=(8, 6)) plot_decision_regions(X, y, lr, X_highlight = X_test) plt.xlabel("Petal Length") plt.ylabel("Sepal Length") plt.legend(loc = "upper left") confusion_matrix(y_test, y_test_pred) accuracy_score(y_test, y_test_pred) ###Output _____no_output_____ ###Markdown By default, the positive probability > 0.5 is outcome as 1 else 0. What if we want to change the probabilities threshold. ###Code y_test_prob = lr.predict_proba(X_test)[:, 1] y_test_pred_new = np.where(y_test_prob > 0.8, 1, 0) print("Accuracy: ", accuracy_score(y_test, y_test_pred_new)) confusion_matrix(y_test, y_test_pred_new) ###Output Accuracy: 0.8 ###Markdown So we can observe that as we vary the threshold the accuracy score varies too. But who decides on threshold? ###Code fpr, tpr, thresholds = roc_curve(y_test, y_test_prob) plt.plot(fpr, tpr, linewidth = 2) plt.ylim(0, 1) plt.xlim(0, 1) plt.plot([0,1], [0,1], ls = "--", color = "k") plt.xlabel("False Postive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") roc_auc_score(y_test, y_test_prob) from sklearn.model_selection import cross_val_score scores = cross_val_score(cv=5, scoring="accuracy", estimator=lr, X=X_train, y= y_train) scores.mean(), scores.std() ###Output _____no_output_____ ###Markdown Now, let's use all features availabe to predict the class. ###Code X = iris.values[:, :-1] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30) print(X_train.shape) lr = LogisticRegression(C = 10) scores = cross_val_score(cv=5, scoring="accuracy", estimator=lr, X=X_train, y= y_train) scores.mean(), scores.std() ###Output (105, 4) ###Markdown Regularization of model using complexity parameter ###Code params = 10 ** np.linspace(-5, 5, 100) means, stds = [], [] coefs = [] for p in params: lr = LogisticRegression(C = p) scores = cross_val_score(cv=5, scoring="accuracy", estimator=lr, X=X_train, y= y_train) means.append(scores.mean()) stds.append(scores.std()) lr.fit(X_train, y_train) coefs.append(lr.coef_[0]) means = np.array(means) stds = np.array(stds) plt.figure(figsize=(10, 5)) plt.subplot(1, 2, 1) plt.plot(params, means) plt.fill_between(params, means + stds, means - stds, alpha = 0.2) plt.xscale("log") plt.xlabel("C") plt.ylabel("accuracy") plt.title("Impact of complexity parameter (C)\n on accuracy score") plt.subplot(1, 2, 2) plt.plot(params, coefs) plt.xlabel("C") plt.ylabel("Coefficient") plt.xscale("log") plt.title("Impact of complexity parameter (C)\n on feature coefficients") plt.tight_layout() lr = LogisticRegression(C = 10) lr.fit(X_train, y_train) lr.coef_[0] lr = LogisticRegression(C = 1e-4) lr.fit(X_train, y_train) lr.coef_[0] ###Output _____no_output_____ ###Markdown Model Tuning using Grid Search Techqniue Find which parameters are available to tune. ###Code LogisticRegression().get_params() from sklearn.model_selection import GridSearchCV param_grid ={"C": 10 ** np.linspace(-5, 5, 100)} gs = GridSearchCV(cv=5, estimator = lr, scoring="accuracy", param_grid= param_grid) gs.fit(X_train, y_train) best = gs.best_estimator_ print("Best estimator score: ", best.score(X_test, y_test)) print(best.coef_[0]) gs.best_params_ ###Output _____no_output_____ ###Markdown Multi Class classification ###Code from sklearn.preprocessing import StandardScaler, LabelEncoder X = iris.iloc[:, 0:4].values y = iris.Species.values scaler = StandardScaler() X_std = scaler.fit_transform(X) pd.DataFrame(X_std).head() le = LabelEncoder() y = le.fit_transform(y) X_train, X_test, y_train, y_test = train_test_split(X_std, y, test_size = 0.3, random_state = 100) lr = LogisticRegression(max_iter=100, random_state=100) param_grid = [ {"C": 10 ** np.linspace(-5, 5, 100)} ] gs = GridSearchCV(cv=5, estimator = lr, scoring="accuracy", param_grid= param_grid) gs.fit(X_train, y_train) gs.best_params_ lr = gs.best_estimator_ lr.intercept_, lr.coef_ coeffs = pd.DataFrame(np.hstack([lr.intercept_.reshape(-1, 1), lr.coef_])) coeffs.columns = ["intercept", *iris.columns[0:4]] coeffs accuracy_score(y_test, lr.predict(X_test)) ###Output _____no_output_____ ###Markdown Decision Tree Classifier ###Code from sklearn.tree import DecisionTreeClassifier X = iris.iloc[:, [2,0]].values y = iris.Species.values scaler = StandardScaler() X_std = scaler.fit_transform(X) pd.DataFrame(X_std).head() le = LabelEncoder() y = le.fit_transform(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = 100) print(X_train.shape) tree = DecisionTreeClassifier(max_depth=4) tree.fit(X_train, y_train) plot_decision_regions(X, y, tree) plt.xlabel("Petal Length") plt.ylabel("Sepal Length") plt.legend(loc = "upper left") tree.score(X_test, y_test) ###Output _____no_output_____ ###Markdown We used 2 parameters for model training because we wanted to plot the decision region. Let's retrain the model using all features and compare the performance with logistic regression. ###Code X = iris.iloc[:, 0:4].values y = iris.Species.values X_std = scaler.fit_transform(X) tree = DecisionTreeClassifier(max_depth=4) mean_cv_accuracy = np.mean(cross_val_score(cv = 5, estimator=tree, X=X_std, y=y)) print("Mean accuracy using all features over full dataset: ", mean_cv_accuracy) ###Output Mean accuracy using all features over full dataset: 0.953333333333 ###Markdown We got above the accuracy of the tuned model using logistic regression 95.56. We can run tuning over tree max_depth and other parameters to tune the model. ###Code tuning_grid = {"max_depth": np.arange(1, 10)} tree = DecisionTreeClassifier() grid_search = GridSearchCV(cv=5, estimator=tree, param_grid=tuning_grid, scoring="accuracy") grid_search.fit(X_std, y) grid_search.best_score_, grid_search.best_params_ ###Output _____no_output_____ ###Markdown After tuning we got a better result 0.9733. Random Forest Classifier ###Code X = iris.iloc[:, [2,0]].values y = iris.Species.values scaler = StandardScaler() X_std = scaler.fit_transform(X) pd.DataFrame(X_std).head() le = LabelEncoder() y = le.fit_transform(y) X_train, X_test, y_train, y_test = train_test_split(X_std, y, test_size = 0.30, random_state = 100) print(X_train.shape) from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(max_depth=4, random_state=123) forest.fit(X_train, y_train) print("Accuracy:", forest.score(X_test, y_test)) plot_decision_regions(X_std, y, forest) plt.xlabel("Petal Length (standarized)") plt.ylabel("Sepal Length (standarized)") plt.legend(loc = "upper left") ###Output Accuracy: 0.955555555556 ###Markdown SVM Classifier ###Code from sklearn.svm import SVC X = iris.iloc[:, [2,0]].values y = iris.Species.values scaler = StandardScaler() X_std = scaler.fit_transform(X) pd.DataFrame(X_std).head() le = LabelEncoder() y = le.fit_transform(y) X_train, X_test, y_train, y_test = train_test_split(X_std, y, test_size = 0.30, random_state = 100) print(X_train.shape) svc = SVC(gamma=1, C =1, kernel="rbf", random_state=345) svc.fit(X_train, y_train) print("accuracy", svc.score(X_test, y_test)) plot_decision_regions(X_std, y, svc) plt.xlabel("Petal Length") plt.ylabel("Sepal Length") plt.legend(loc = "upper left") plt.title("SVM classifier using gamma = 1") svc = SVC(gamma=10, C = 1, kernel="rbf", random_state=345) svc.fit(X_train, y_train) print("accuracy", svc.score(X_test, y_test)) plot_decision_regions(X_std, y, svc) plt.xlabel("Petal Length") plt.ylabel("Sepal Length") plt.legend(loc = "upper left") plt.title("SVM classifier using gamma = 10, C = 1") svc = SVC(gamma=1, C = 10, kernel="rbf", random_state=345) svc.fit(X_train, y_train) print("accuracy", svc.score(X_test, y_test)) plot_decision_regions(X_std, y, svc) plt.xlabel("Petal Length") plt.ylabel("Sepal Length") plt.legend(loc = "upper left") plt.title("SVM classifier using gamma = 1, C = 10") ###Output accuracy 1.0 ###Markdown Above we see the impact of different gamma and C values. Higher the gamma value or C values, each observation seems to loose territory of influence and hence tend to create a more overfit model. But we see the accuracy score is already matched with some of the best we have got so far. Let retrain the model using all features and tune the model using parameter grid. ###Code svc = SVC(C=10, kernel="linear", random_state=345) svc.fit(X_train, y_train) print("accuracy", svc.score(X_test, y_test)) plot_decision_regions(X_std, y, svc) plt.xlabel("Petal Length") plt.ylabel("Sepal Length") plt.legend(loc = "upper left") plt.title("SVM classifier using gamma = 1") svc = SVC(C=10, kernel="poly", degree=2, random_state=345) svc.fit(X_train, y_train) print("accuracy", svc.score(X_test, y_test)) plot_decision_regions(X_std, y, svc) plt.xlabel("Petal Length") plt.ylabel("Sepal Length") plt.legend(loc = "upper left") plt.title("SVM classifier using gamma = 1") SVC().get_params() %%time param_grid = { "C": 10 ** np.linspace(-2, 2, 10), "gamma": 10 ** np.linspace(-1, 2, 10), "kernel": ["linear", "rbf"] } grid_search = GridSearchCV(cv = 5, estimator=SVC(), param_grid=param_grid, scoring="accuracy", verbose=True) grid_search.fit(X_std, y) print(grid_search.best_score_, grid_search.best_params_) ###Output Fitting 5 folds for each of 200 candidates, totalling 1000 fits 0.96 {'C': 4.6415888336127775, 'gamma': 1.0, 'kernel': 'rbf'} CPU times: user 2.11 s, sys: 35.2 ms, total: 2.14 s Wall time: 2.2 s
Homework/hw0/cs109a_hw0.ipynb
###Markdown CS109A Introduction to Data Science Homework 0: Knowledge Test**Harvard University****Fall 2019****Instructors**: Pavlos Protopapas, Kevin Rader, and Chris Tanner---This is a homework which you must turn in.This homework has the following intentions:1. To get you familiar with the jupyter/python environment2. You should easily understand these questions and what is being asked. If you struggle, this may not be the right class for you.3. You should be able to understand the intent (if not the exact syntax) of the code and be able to look up google and provide code that is asked of you. If you cannot, this may not be the right class for you. ###Code ## RUN THIS CELL TO GET THE RIGHT FORMATTING import requests from IPython.core.display import HTML styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text HTML(styles) ###Output _____no_output_____ ###Markdown --- Basic Math and Probability/Statistics Calculations We'll start you off with some basic math and statistics problems questions to make sure you have the appropriate background to be comfortable with concepts that will come up in CS 109a. Question 1: Mathiage is What Brings Us Together Today**Matrix Operations***Complete the following matrix operations (show your work as a markdown/latex notebook cell)* **1.1.** &nbsp;&nbsp;Let &nbsp;&nbsp; $ A = \left( \begin{array}{ccc}3 & 4 & 2 \\5 & 6 & 4 \\4 & 3 & 4 \end{array} \right) \,\,$ and $ \,\, B = \left( \begin{array}{ccc}1 & 4 & 2 \\1 & 9 & 3 \\2 & 3 & 3 \end{array} \right)$. Compute &nbsp;$A \cdot B$.**1.2.** &nbsp;&nbsp;Let &nbsp;&nbsp;$ A = \left( \begin{array}{ccc}0 & 12 & 8 \\1 & 15 & 0 \\0 & 6 & 3 \end{array} \right)$. Compute &nbsp; $A^{-1}$. **Solution**** Your solution here ** **Calculus and Probability***Complete the following (show your work as a markdown/latex notebook cell)***1.3**. From Wikipedia: > In mathematical optimization, statistics, econometrics, decision theory, machine learning and computational neuroscience, a loss function or cost function is a function that maps an event or values of one or more variables onto a real number intuitively representing some "cost" associated with the event. An optimization problem seeks to minimize a loss function. We've generated a cost function on parameters $x,y \in \mathcal{R}$ $L(x,y)= 3x^2y - y^3 - 3x^2 - 3y^2 + 2$. Find the critical points (optima) of $L(x,y)$.**1.4**. A central aspect of call center operations is the per minute statistics of caller demographics. Because of the massive call volumes call centers achieve, these per minute statistics can often take on well-known distributions. In the CS109 Homework Helpdesk, X and Y are discrete random variables with X measuring the number of female callers per minute and Y the total number of callers per minute. We've determined historically the joint pmf of (X, Y) and found it to be $$p_{X,Y}(x,y) = e^{-4}\frac{2^y}{x!(y-x)!}$$ where $y \in \mathcal{N}, x \in [0, y]$ (That is to say the total number of callers in a minute is a non-negative integer and the number of female callers naturally assumes a value between 0 and the total number of callers inclusive). Find the mean and variance of the marginal distribution of $X$. **(Hint: Think what values can y take on. A change of variables in your sum from y to y-x may make evaluating the sum easier.)** **Solution**** Your solution here ** ###Code ### The line %... is a jupyter "magic" command, and is not part of the Python language. # In this case we're just telling the plotting library to draw things on # the notebook, instead of on a separate window. %matplotlib inline # See the "import ... as ..." contructs below? They're just aliasing the package names. # That way we can call methods like plt.plot() instead of matplotlib.pyplot.plot(). import numpy as np import scipy as sp import pandas as pd import scipy.stats import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown **Basic Statistics***Complete the following: you can perform the calculations by hand (show your work) or using software (include the code and output, screenshots are fine if it is from another platform).***1.5**. 37 of the 76 female CS concentrators have taken Data Science 1 (DS1) while 50 of the 133 male concentrators haven taken DS1. Perform a statistical test to determine if interest in Data Science (by taking DS1) is related to sex. Be sure to state your conclusion. **Solution**** Your Solution here ** ------ Simulation of a Coin ThrowWe'd like to do some experiments with coin flips, but we don't have a physical coin at the moment. So let us **simulate** the process of flipping a coin on a computer. To do this we will use a form of the **random number generator** built into `numpy`. In particular, we will use the function `np.random.choice` which picks items with uniform probability from a list. If we provide it a list ['H', 'T'], it will pick one of the two items in the list. We can also ask it to do this multiple times by specifying the parameter `size`. ###Code def throw_a_coin(n_trials): return np.random.choice(['H','T'], size=n_trials) ###Output _____no_output_____ ###Markdown `np.sum` is a function that returns the sum of items in an iterable (i.e. a list or an array). Because python coerces `True` to 1 and `False` to 0, the effect of calling `np.sum` on the array of `True`s and `False`s will be to return the number of of `True`s in the array which is the same as the number of heads. Question 2: The 12 Labors of BernoullisNow that we know how to run our coin flip experiment, we're interested in knowing what happens as we choose larger and larger number of coin flips.**2.1**. Run one experiment of flipping a coin 40 times storing the resulting sample in the variable `throws1`. What's the total proportion of heads?**2.2**. **Replicate** the experiment in 2.1 storing the resulting sample in the variable `throws2`. What's the proportion of heads? How does this result compare to that you obtained in question 2.1?**2.3**. Write a function called `run_trials` that takes as input a list, called `n_flips`, of integers representing different values for the number of coin flips in a trial. For each element in the input list, `run_trials` should run the coin flip experiment with that number of flips and calculate the proportion of heads. The output of `run_trials` should be the list of calculated proportions. Store the output of calling `run_trials` in a list called `proportions`.**2.4**. Using the results in 2.3, reproduce the plot below. **2.5**. What's the appropriate observation about the result of running the coin flip experiment with larger and larger numbers of coin flips? Choose the appropriate one from the choices below. > A. Regardless of sample size the probability of in our experiment of observing heads is 0.5 so the proportion of heads observed in the coin-flip experiments will always be 0.5. >> B. The proportions **fluctuate** about their long-run value of 0.5 (what you might expect if you tossed the coin an infinite amount of times), in accordance with the notion of a fair coin (which we encoded in our simulation by having `np.random.choice` choose between two possibilities with equal probability), with the fluctuations seeming to become much smaller as the number of trials increases.>> C. The proportions **fluctuate** about their long-run value of 0.5 (what you might expect if you tossed the coin an infinite amount of times), in accordance with the notion of a fair coin (which we encoded in our simulation by having `np.random.choice` choose between two possibilities with equal probability), with the fluctuations constant regardless of the number of trials. Solutions **2.1** ###Code ## Your code here ###Output _____no_output_____ ###Markdown **2.2** ###Code ## Your code here ###Output _____no_output_____ ###Markdown **2.3** ###Code n_flips = [10, 30, 50, 70, 100, 130, 170, 200, 500, 1000, 2000, 5000, 10000] ## Your code here ###Output _____no_output_____ ###Markdown **2.4** ###Code ## Your code here ###Output _____no_output_____ ###Markdown **2.5** **What's the appropriate observation about the result of applying the coin flip experiment to larger and larger numbers of coin flips? Choose the appropriate one.**** Your answer here ** Multiple Replications of the Coin Flip ExperimentThe coin flip experiment that we did above gave us some insight, but we don't have a good notion of how robust our results are under repetition as we've only run one experiment for each number of coin flips. Lets redo the coin flip experiment, but let's incorporate multiple repetitions of each number of coin flips. For each choice of the number of flips, $n$, in an experiment, we'll do $M$ replications of the coin tossing experiment. Question 3. So Many Replications**3.1**. Write a function `make_throws` which takes as arguments the `n_replications` ($M$) and the `n_flips` ($n$), and returns a list (of size $M$) of proportions, with each proportion calculated by taking the ratio of heads to to total number of coin flips in each replication of $n$ coin tosses. `n_flips` should be a python parameter whose value should default to 20 if unspecified when `make_throws` is called. **3.2**. Create the variables `proportions_at_n_flips_100` and `proportions_at_n_flips_1000`. Store in these variables the result of `make_throws` for `n_flips` equal to 100 and 1000 respectively while keeping `n_replications` at 200. Create a plot with the histograms of `proportions_at_n_flips_100` and `proportions_at_n_flips_1000`. Make sure to title your plot, label the x-axis and provide a legend.(See below for an example of what the plot may look like) ![](figs/HW0Plot2.png) **3.3**. Calculate the mean and variance of the results in the each of the variables `proportions_at_n_flips_100` and `proportions_at_n_flips_1000` generated in 3.2.3.4. Based upon the plots what would be your guess of what type of distribution is represented by histograms in 3.2? Explain the factors that influenced your choice.> A. Gamma Distribution>> B. Beta Distribution>> C. Gaussian**3.5**. Let's just assume for arguments sake that the answer to 3.4 is **C. Gaussian**. Plot a **normed histogram** of your results `proportions_at_n_flips_1000` overlayed with your selection for the appropriate gaussian distribution to represent the experiment of flipping a coin 1000 times. (**Hint: What parameters should you use for your Gaussian?**) Answers **3.1** ###Code ## your code here ###Output _____no_output_____ ###Markdown **3.2** ###Code ## your code here ## code for your plot here ###Output _____no_output_____ ###Markdown **3.3** ###Code ## your code here ###Output _____no_output_____ ###Markdown **3.4** ** Your choice and explanation here ** **3.5** ###Code ## your code here ###Output _____no_output_____ ###Markdown Working With Distributions in Numpy/ScipyEarlier in this problem set we've been introduced to the Bernoulli "aka coin-flip" distribution and worked with it indirectly by using np.random.choice to make a random selection between two elements 'H' and 'T'. Let's see if we can create comparable results by taking advantage of the machinery for working with other probability distributions in python using numpy and scipy. Question 4: My Normal BinomialLet's use our coin-flipping machinery to do some experimentation with the binomial distribution. The binomial distribution, often represented by $k \sim Binomial(n, p)$ is often described the number of successes in `n` Bernoulli trials with each trial having a probability of success `p`. In other words, if you flip a coin `n` times, and each coin-flip has a probability `p` of landing heads, then the number of heads you observe is a sample from a bernoulli distribution.**4.1**. Sample the binomial distribution using coin flips by writing a function `sample_binomial1` which takes in integer parameters `n` and `size`. The output of `sample_binomial1` should be a list of length `size` observations with each observation being the outcome of flipping a coin `n` times and counting the number of heads. By default `size` should be 1. Your code should take advantage of the `throw_a_coin` function we defined above. **4.2**. Sample the binomial distribution directly using scipy.stats.binom.rvs by writing another function `sample_binomial2` that takes in integer parameters `n` and `size` as well as a float `p` parameter `p` where $p \in [0 \ldots 1]$. The output of `sample_binomial2` should be a list of length `size` observations with each observation a sample of $Binomial(n, p)$ (taking advantage of scipy.stats.binom). By default `size` should be 1 and `p` should be 0.5.**4.3**. Run sample_binomial1 with 25 and 200 as values of the `n` and `size` parameters respectively and store the result in `binomial_trials1`. Run sample_binomial2 with 25, 200 and 0.5 as values of the `n`, `size` and `p` parameters respectively and store the results in `binomial_trials2`. Plot normed histograms of `binomial_trials1` and `binomial_trials2`. On both histograms, overlay a plot of the pdf of $Binomial(n=25, p=0.5)$**4.4**. How do the plots in 4.3 compare?**4.5**. Find the mean and variance of `binomial_trials1`. How do they compare to the mean and variance of $Binomial(n=25, p=0.5)$ Answers **4.1** ###Code ## your code here ###Output _____no_output_____ ###Markdown **4.2** ###Code ## your code here ###Output _____no_output_____ ###Markdown **4.3** ###Code ## your code here ###Output _____no_output_____ ###Markdown **4.4** ** Your explanation here ** **4.5** ###Code ## your code here ###Output _____no_output_____ ###Markdown ** Your explanation here ** Testing Your Python Code In the following section we're going to do a brief introduction to unit testing. We do so not only because unit testing has become an increasingly important part of of the methodology of good software practices, but also because we plan on using unit tests as part of our own CS109 grading practices as a way of increasing rigor and repeatability decreasing complexity and manual workload in our evaluations of your code. We'll provide an example unit test at the end of this section. Introduction to unit testing ###Code import ipytest ###Output _____no_output_____ ###Markdown ***Unit testing*** is one of the most important software testing methodologies. Wikipedia describes unit testing as "a software testing method by which individual units of source code, sets of one or more computer program modules together with associated control data, usage procedures, and operating procedures, are tested to determine whether they are fit for use."There are many different python libraries that support software testing in general and unit testing in particular. PyTest is one of the most widely used and well-liked libraries for this purpose. We've chosen to adopt PyTest (and ipytest which allows pytest to be used in ipython notebooks) for our testing needs and we'll do a very brief introduction to Pytest here so that you can become familiar with it too. If you recall the function that we provided you above `throw_a_coin`, which we'll reproduce here for convenience, it took a number and returned that many "coin tosses". We'll start by seeing what happens when we give it different sizes of $N$. If we give $N=0$, we should get an empty array of "experiments". ###Code def throw_a_coin(N): return np.random.choice(['H','T'], size=N) throw_a_coin(0) ###Output _____no_output_____ ###Markdown Great! If we give it positive values of $N$ we should get that number of 'H's and 'T's. ###Code throw_a_coin(5) throw_a_coin(8) ###Output _____no_output_____ ###Markdown Exactly what we expected! What happens if the input isn't a positive integer though? ###Code throw_a_coin(4.5) ###Output _____no_output_____ ###Markdown or ###Code throw_a_coin(-4) ###Output _____no_output_____ ###Markdown It looks like for both real numbers and negative numbers, we get two kinds of errors a `TypeError` and a `ValueError`. We just engaged in one of the most rudimentary forms of testing, trial and error. We can use pytest to automate this process by writing some functions that will automatically (and potentially repeatedly) test individual units of our code methodology. These are called ***unit tests***.Before we write our tests, let's consider what we would think of as the appropriate behavior for `throw_a_coin` under the conditions we considered above. If `throw_a_coin` receives positive integer input, we want it to behave exactly as it currently does -- returning an output consisting of a list of characters 'H' or 'T' with the length of the list equal to the positive integer input. For a positive floating point input, we want `throw_a_coin_properly` to treat the input as if it were rounded down to the nearest integer (thus returning a list of 'H' or 'T' integers whose length is the same as the input rounded down to the next highest integer. For a any negative number input or an input of 0, we want `throw_a_coin_properly` to return an empty list. We create pytest tests by writing functions that start or end with "test". We'll use the **convention** that our tests will start with "test". We begin the code cell with ipytest's clean_tests function as a way to clear out the results of previous tests starting with "test_throw_a_coin" (the * is the standard wild card character here). ###Code ## the * after test_throw_a_coin tells this code cell to clean out the results ## of all tests starting with test_throw_a_coin ipytest.clean_tests("test_throw_a_coin*") ## run throw_a_coin with a variety of positive integer inputs (all numbers between 1 and 20) and ## verify that the length of the output list (e.g ['H', 'H', 'T', 'H', 'T']) matches the input integer def test_throw_a_coin_length_positive(): for n in range(1,20): assert len(throw_a_coin(n)) == n ## verify that throw_a_coin produces an empty list (i.e. a list of length 0) if provide with an input ## of 0 def test_throw_a_coin_length_zero(): ## should be the empty array assert len(throw_a_coin(0)) == 0 ## verify that given a positive floating point input (i.e. 4.34344298547201), throw_a_coin produces a list of ## coin flips of length equal to highest integer less than the input def test_throw_a_coin_float(): for n in np.random.exponential(7, size=5): assert len(throw_a_coin(n)) == np.floor(n) ## verify that given any negative input (e.g. -323.4), throw_a_coin produces an empty def test_throw_a_coin_negative(): for n in range(-7, 0): assert len(throw_a_coin(n)) == 0 ipytest.run_tests() ###Output unittest.case.FunctionTestCase (test_throw_a_coin_float) ... ERROR unittest.case.FunctionTestCase (test_throw_a_coin_length_positive) ... ok unittest.case.FunctionTestCase (test_throw_a_coin_length_zero) ... ok unittest.case.FunctionTestCase (test_throw_a_coin_negative) ... ERROR ====================================================================== ERROR: unittest.case.FunctionTestCase (test_throw_a_coin_float) ---------------------------------------------------------------------- Traceback (most recent call last): File "<ipython-input-46-78a86d656b91>", line 22, in test_throw_a_coin_float assert len(throw_a_coin(n)) == np.floor(n) File "<ipython-input-40-9b62022d816e>", line 2, in throw_a_coin return np.random.choice(['H','T'], size=N) File "mtrand.pyx", line 1147, in mtrand.RandomState.choice File "mtrand.pyx", line 979, in mtrand.RandomState.randint File "mtrand.pyx", line 980, in mtrand.RandomState.randint File "randint_helpers.pxi", line 253, in mtrand._rand_int64 TypeError: 'numpy.float64' object cannot be interpreted as an integer ====================================================================== ERROR: unittest.case.FunctionTestCase (test_throw_a_coin_negative) ---------------------------------------------------------------------- Traceback (most recent call last): File "<ipython-input-46-78a86d656b91>", line 28, in test_throw_a_coin_negative assert len(throw_a_coin(n)) == 0 File "<ipython-input-40-9b62022d816e>", line 2, in throw_a_coin return np.random.choice(['H','T'], size=N) File "mtrand.pyx", line 1147, in mtrand.RandomState.choice File "mtrand.pyx", line 979, in mtrand.RandomState.randint File "mtrand.pyx", line 980, in mtrand.RandomState.randint File "randint_helpers.pxi", line 253, in mtrand._rand_int64 ValueError: negative dimensions are not allowed ---------------------------------------------------------------------- Ran 4 tests in 0.006s FAILED (errors=2) ###Markdown As you see, we were able to use pytest (and ipytest which allows us to run pytest tests in our ipython notebooks) to automate the tests that we constructed manually before and get the same errors and successes. Now time to fix our code and write our own test! Question 5: You Better Test Yourself before You Wreck Yourself!Now it's time to fix `throw_a_coin` so that it passes the tests we've written above as well as add our own test to the mix!**5.1**. Write a new function called `throw_a_coin_properly` that will pass the tests that we saw above. For your convenience we'll provide a new jupyter notebook cell with the tests rewritten for the new function. All the tests should pass. For a positive floating point input, we want `throw_a_coin_properly` to treat the input as if it were rounded down to the nearest integer. For a any negative number input, we want `throw_a_coin_properly` to treat the input as if it were 0.**5.2**. Write a new test for `throw_a_coin_properly` that verifies that all the elements of the resultant arrays are 'H' or 'T'. Answers **5.1** ###Code # your code here ipytest.clean_tests("test_throw_a_coin*") def test_throw_a_coin_properly_length_positive(): for n in range(1,20): assert len(throw_a_coin_properly(n)) == n def test_throw_a_coin_properly_length_zero(): ## should be the empty array assert len(throw_a_coin_properly(0)) == 0 def test_throw_a_coin_properly_float(): for n in np.random.exponential(7, size=5): assert len(throw_a_coin_properly(n)) == np.floor(n) def test_throw_a_coin_properly_negative(): for n in range(-7, 0): assert len(throw_a_coin_properly(n)) == 0 ipytest.run_tests() ###Output _____no_output_____ ###Markdown **5.2** ###Code ipytest.clean_tests("test_throw_a_coin*") ## write a test that verifies you don't have any other elements except H's and T's def test_throw_a_coin_properly_verify_H_T(): # your code here ipytest.run_tests() ###Output _____no_output_____
notebooks/hydrogen-abstraction/2 microkinetics.ipynb
###Markdown Hydrogen abstraction of methane by chlorine atoms: microkinetic simulationSimulate a microkinetics system using calculated reaction rate constants. ###Code import os import sys import matplotlib.pyplot as plt import numpy as np import seaborn as sns import overreact as rx from overreact import constants from overreact import datasets sns.set(style="white", context="notebook", palette="colorblind", font_scale=1.1) temperature = 298.15 # basisset = "6-311G(2df,2pd)" # 6-311G(2df,2p) best predicts activation enthalpy basisset = 'cc-pVTZ' # ΔH‡(T=1) = 4.20 kcal/mol (closest to Tanaka's result, 4.08 kcal/mol) model = rx.parse_model( os.path.join(datasets.data_path, f"tanaka1996/UMP2/{basisset}/model.k") ) k_eck = rx.get_k( model.scheme, model.compounds, temperature=temperature, scale="M-1 s-1" ) # October 2020 estimates (in ppb) for CH4 from https://esrl.noaa.gov/gmd/ccgg/trends_ch4/ # y0_CH4 = 1890.9 * 1e-6 / np.sum(model.compounds["CH4"].atommasses) # Cl· estimate: 1.3e4 molecules / cm^-3 (doi:10.1038/srep36821) # y0_Cl = 1.3e4 * 1e-3 / constants.N_A # Estimate (in ppb) for HCl from doi:10.1002/2013JD020992 # y0_HCl = 1.3 * 1e-6 / np.sum(model.compounds["HCl"].atommasses) # Approximately within experimental conditions of doi:10.1021/jp0257909 # y0_Cl = 9e10 * 1e-3 / constants.N_A # y0_CH4 = 39 * y0_Cl # y0_HCl = 0.00 y0_Cl = 0.1e-6 y0_CH4 = 2.5 * y0_Cl y0_HCl = 0.25 * y0_Cl y0 = [y0_CH4, y0_Cl, 0.0, 0.0, y0_HCl] y0 dydt = rx.get_dydt(model.scheme, k_eck) y, r = rx.get_y(dydt, y0=y0) model.scheme.compounds y(y.t_max) truename = { "CH4": "CH$_4$", "Cl·": "Cl·", "CH3·": "CH$_3$·", "HCl": "HCl", } t = np.linspace(y.t_min, 1) fig, ax = plt.subplots() for i, name in enumerate(model.scheme.compounds): if not rx.is_transition_state(name): # if name in {"CH3·"}: ax.plot(1e3 * t, 1e9 * y(t)[i], label=f"{truename[name]}") ax.set_ylabel("Concentration [nM]") ax.set_xlabel("Time [ms]") # ax.set_yscale("log") # ax.set_xscale("log") # ax.legend(loc="lower right") ax.legend(loc="best") fig.tight_layout() ###Output _____no_output_____
Michigan_AppliedDataScienceWithPython/AppliedMachineLearning/Assignment+2.ipynb
###Markdown ---_You are currently looking at **version 1.5** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._--- Assignment 2In this assignment you'll explore the relationship between model complexity and generalization performance, by adjusting key parameters of various supervised learning models. Part 1 of this assignment will look at regression and Part 2 will look at classification. Part 1 - Regression First, run the following block to set up the variables needed for later sections. ###Code import numpy as np import pandas as pd from sklearn.model_selection import train_test_split np.random.seed(0) n = 15 x = np.linspace(0,10,n) + np.random.randn(n)/5 y = np.sin(x)+x/6 + np.random.randn(n)/10 X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0) # You can use this function to help you visualize the dataset by # plotting a scatterplot of the data points # in the training and test sets. def part1_scatter(): #import matplotlib.pyplot as plt #%matplotlib notebook plt.figure() plt.scatter(X_train, y_train, label='training data') plt.scatter(X_test, y_test, label='test data') plt.legend(loc=4); # NOTE: Uncomment the function below to visualize the data, but be sure # to **re-comment it before submitting this assignment to the autograder**. #part1_scatter() ###Output _____no_output_____ ###Markdown Question 1Write a function that fits a polynomial LinearRegression model on the *training data* `X_train` for degrees 1, 3, 6, and 9. (Use PolynomialFeatures in sklearn.preprocessing to create the polynomial features and then fit a linear regression model) For each model, find 100 predicted values over the interval x = 0 to 10 (e.g. `np.linspace(0,10,100)`) and store this in a numpy array. The first row of this array should correspond to the output from the model trained on degree 1, the second row degree 3, the third row degree 6, and the fourth row degree 9.The figure above shows the fitted models plotted on top of the original data (using `plot_one()`).*This function should return a numpy array with shape `(4, 100)`* ###Code np.random.seed(0) n = 15 x = np.linspace(0,10,n) + np.random.randn(n)/5 y = np.sin(x)+x/6 + np.random.randn(n)/10 X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0) # transforming the data to include another axis X_train = X_train[:, np.newaxis] y_train = y_train[:, np.newaxis] X_test = X_test[:, np.newaxis] y_test = y_test[:, np.newaxis] def answer_one(): from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures import numpy as np newXToPredict = np.linspace(0,10,100) newXToPredict = newXToPredict[:,np.newaxis] result_array = np.empty((4,100)) index = 0 for i in [1,3, 6, 9,]: polynomial_features= PolynomialFeatures(degree=i) x_poly = polynomial_features.fit_transform(X_train) newXToPredictPoly = polynomial_features.fit_transform(newXToPredict) model = LinearRegression() model.fit(x_poly, y_train) result = model.predict(newXToPredictPoly) result_array[index] = result.transpose() index+=1 return result_array # Return your answer # feel free to use the function plot_one() to replicate the figure # from the prompt once you have completed question one def plot_one(degree_predictions): #import matplotlib.pyplot as plt #%matplotlib notebook plt.figure(figsize=(10,5)) plt.plot(X_train, y_train, 'o', label='training data', markersize=10) plt.plot(X_test, y_test, 'o', label='test data', markersize=10) for i,degree in enumerate([1,3,6,9]): plt.plot(np.linspace(0,10,100), degree_predictions[i], alpha=0.8, lw=2, label='degree={}'.format(degree)) plt.ylim(-1,2.5) plt.legend(loc=4) #plot_one(answer_one()) ###Output _____no_output_____ ###Markdown Question 2Write a function that fits a polynomial LinearRegression model on the training data `X_train` for degrees 0 through 9. For each model compute the $R^2$ (coefficient of determination) regression score on the training data as well as the the test data, and return both of these arrays in a tuple.*This function should return one tuple of numpy arrays `(r2_train, r2_test)`. Both arrays should have shape `(10,)`* ###Code def answer_two(): from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.metrics.regression import r2_score # Your code here from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures import numpy as np newXToPredict = np.linspace(0,10,100) newXToPredict = newXToPredict[:,np.newaxis] r2_train = np.empty((10,1)) r2_test = np.empty((10,1)) for i in range(0,10): polynomial_features= PolynomialFeatures(degree=i) x_poly = polynomial_features.fit_transform(X_train) x_polyTest = polynomial_features.fit_transform(X_test) newXToPredictPoly = polynomial_features.fit_transform(newXToPredict) model = LinearRegression() model.fit(x_poly, y_train) predictTrain = model.predict(x_poly) predictTest = model.predict(x_polyTest) r2_train[i] = r2_score(y_train,predictTrain) r2_test[i] = r2_score(y_test,predictTest) return (r2_train[:,0],r2_test[:,0])# Your answer here ###Output [ 0. 0.42924578 0.4510998 0.58719954 0.91941945 0.97578641 0.99018233 0.99352509 0.99637545 0.99803706] [-0.47808642 -0.45237104 -0.06856984 0.00533105 0.73004943 0.87708301 0.9214094 0.92021504 0.63247951 -0.64525377] ###Markdown Question 3Based on the $R^2$ scores from question 2 (degree levels 0 through 9), what degree level corresponds to a model that is underfitting? What degree level corresponds to a model that is overfitting? What choice of degree level would provide a model with good generalization performance on this dataset? Hint: Try plotting the $R^2$ scores from question 2 to visualize the relationship between degree level and $R^2$. Remember to comment out the import matplotlib line before submission.*This function should return one tuple with the degree values in this order: `(Underfitting, Overfitting, Good_Generalization)`. There might be multiple correct solutions, however, you only need to return one possible solution, for example, (1,2,3).* ###Code def answer_three(): # Your code here return (0,9,7)# Return your answer ###Output _____no_output_____ ###Markdown Question 4Training models on high degree polynomial features can result in overly complex models that overfit, so we often use regularized versions of the model to constrain model complexity, as we saw with Ridge and Lasso linear regression.For this question, train two models: a non-regularized LinearRegression model (default parameters) and a regularized Lasso Regression model (with parameters `alpha=0.01`, `max_iter=10000`) both on polynomial features of degree 12. Return the $R^2$ score for both the LinearRegression and Lasso model's test sets.*This function should return one tuple `(LinearRegression_R2_test_score, Lasso_R2_test_score)`* ###Code def answer_four(): from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import Lasso, LinearRegression from sklearn.metrics.regression import r2_score # Your code here polynomial_features= PolynomialFeatures(degree=12) x_poly = polynomial_features.fit_transform(X_train) x_polyTest = polynomial_features.fit_transform(X_test) model = LinearRegression() model.fit(x_poly, y_train) predictTest = model.predict(x_polyTest) LinearRegression_R2_test_score = r2_score(y_test,predictTest) model = Lasso(alpha=0.01, max_iter=100000) model.fit(x_poly, y_train) predictTest = model.predict(x_polyTest) Lasso_R2_test_score = r2_score(y_test,predictTest) return (LinearRegression_R2_test_score, Lasso_R2_test_score) # Your answer here ###Output _____no_output_____ ###Markdown Part 2 - ClassificationHere's an application of machine learning that could save your life! For this section of the assignment we will be working with the [UCI Mushroom Data Set](http://archive.ics.uci.edu/ml/datasets/Mushroom?ref=datanews.io) stored in `readonly/mushrooms.csv`. The data will be used to train a model to predict whether or not a mushroom is poisonous. The following attributes are provided:*Attribute Information:*1. cap-shape: bell=b, conical=c, convex=x, flat=f, knobbed=k, sunken=s 2. cap-surface: fibrous=f, grooves=g, scaly=y, smooth=s 3. cap-color: brown=n, buff=b, cinnamon=c, gray=g, green=r, pink=p, purple=u, red=e, white=w, yellow=y 4. bruises?: bruises=t, no=f 5. odor: almond=a, anise=l, creosote=c, fishy=y, foul=f, musty=m, none=n, pungent=p, spicy=s 6. gill-attachment: attached=a, descending=d, free=f, notched=n 7. gill-spacing: close=c, crowded=w, distant=d 8. gill-size: broad=b, narrow=n 9. gill-color: black=k, brown=n, buff=b, chocolate=h, gray=g, green=r, orange=o, pink=p, purple=u, red=e, white=w, yellow=y 10. stalk-shape: enlarging=e, tapering=t 11. stalk-root: bulbous=b, club=c, cup=u, equal=e, rhizomorphs=z, rooted=r, missing=? 12. stalk-surface-above-ring: fibrous=f, scaly=y, silky=k, smooth=s 13. stalk-surface-below-ring: fibrous=f, scaly=y, silky=k, smooth=s 14. stalk-color-above-ring: brown=n, buff=b, cinnamon=c, gray=g, orange=o, pink=p, red=e, white=w, yellow=y 15. stalk-color-below-ring: brown=n, buff=b, cinnamon=c, gray=g, orange=o, pink=p, red=e, white=w, yellow=y 16. veil-type: partial=p, universal=u 17. veil-color: brown=n, orange=o, white=w, yellow=y 18. ring-number: none=n, one=o, two=t 19. ring-type: cobwebby=c, evanescent=e, flaring=f, large=l, none=n, pendant=p, sheathing=s, zone=z 20. spore-print-color: black=k, brown=n, buff=b, chocolate=h, green=r, orange=o, purple=u, white=w, yellow=y 21. population: abundant=a, clustered=c, numerous=n, scattered=s, several=v, solitary=y 22. habitat: grasses=g, leaves=l, meadows=m, paths=p, urban=u, waste=w, woods=dThe data in the mushrooms dataset is currently encoded with strings. These values will need to be encoded to numeric to work with sklearn. We'll use pd.get_dummies to convert the categorical variables into indicator variables. ###Code import pandas as pd import numpy as np from sklearn.model_selection import train_test_split mush_df = pd.read_csv('mushrooms.csv') mush_df2 = pd.get_dummies(mush_df) X_mush = mush_df2.iloc[:,2:] y_mush = mush_df2.iloc[:,1] # use the variables X_train2, y_train2 for Question 5 X_train2, X_test2, y_train2, y_test2 = train_test_split(X_mush, y_mush, random_state=0) # For performance reasons in Questions 6 and 7, we will create a smaller version of the # entire mushroom dataset for use in those questions. For simplicity we'll just re-use # the 25% test split created above as the representative subset. # # Use the variables X_subset, y_subset for Questions 6 and 7. X_subset = X_test2 y_subset = y_test2 ###Output _____no_output_____ ###Markdown Question 5Using `X_train2` and `y_train2` from the preceeding cell, train a DecisionTreeClassifier with default parameters and random_state=0. What are the 5 most important features found by the decision tree?As a reminder, the feature names are available in the `X_train2.columns` property, and the order of the features in `X_train2.columns` matches the order of the feature importance values in the classifier's `feature_importances_` property. *This function should return a list of length 5 containing the feature names in descending order of importance.**Note: remember that you also need to set random_state in the DecisionTreeClassifier.* ###Code def answer_five(): from sklearn.tree import DecisionTreeClassifier # Your code here clf = DecisionTreeClassifier(random_state = 0) clf = clf.fit(X_train2, y_train2) mostImportant = clf.feature_importances_ importance, mostImportant = zip(*sorted(zip(clf.feature_importances_, X_train2.columns),reverse=True)) top_features = [] top5 = np.argsort(-clf.feature_importances_)[:5] for i in top5: top_features.append(mush_df2.columns[i]) return top_features# Your answer here answer_five() ###Output _____no_output_____ ###Markdown Question 6For this question, we're going to use the `validation_curve` function in `sklearn.model_selection` to determine training and test scores for a Support Vector Classifier (`SVC`) with varying parameter values. Recall that the validation_curve function, in addition to taking an initialized unfitted classifier object, takes a dataset as input and does its own internal train-test splits to compute results.**Because creating a validation curve requires fitting multiple models, for performance reasons this question will use just a subset of the original mushroom dataset: please use the variables X_subset and y_subset as input to the validation curve function (instead of X_mush and y_mush) to reduce computation time.**The initialized unfitted classifier object we'll be using is a Support Vector Classifier with radial basis kernel. So your first step is to create an `SVC` object with default parameters (i.e. `kernel='rbf', C=1`) and `random_state=0`. Recall that the kernel width of the RBF kernel is controlled using the `gamma` parameter. With this classifier, and the dataset in X_subset, y_subset, explore the effect of `gamma` on classifier accuracy by using the `validation_curve` function to find the training and test scores for 6 values of `gamma` from `0.0001` to `10` (i.e. `np.logspace(-4,1,6)`). Recall that you can specify what scoring metric you want validation_curve to use by setting the "scoring" parameter. In this case, we want to use "accuracy" as the scoring metric.For each level of `gamma`, `validation_curve` will fit 3 models on different subsets of the data, returning two 6x3 (6 levels of gamma x 3 fits per level) arrays of the scores for the training and test sets.Find the mean score across the three models for each level of `gamma` for both arrays, creating two arrays of length 6, and return a tuple with the two arrays.e.g.if one of your array of scores is array([[ 0.5, 0.4, 0.6], [ 0.7, 0.8, 0.7], [ 0.9, 0.8, 0.8], [ 0.8, 0.7, 0.8], [ 0.7, 0.6, 0.6], [ 0.4, 0.6, 0.5]]) it should then become array([ 0.5, 0.73333333, 0.83333333, 0.76666667, 0.63333333, 0.5])*This function should return one tuple of numpy arrays `(training_scores, test_scores)` where each array in the tuple has shape `(6,)`.* ###Code def answer_six(): from sklearn.svm import SVC from sklearn.model_selection import validation_curve # Your code here train_scores, test_scores = validation_curve(SVC(kernel='rbf', C=1, random_state=0), X_subset, y_subset, "gamma", np.logspace(-4,1,6),scoring="accuracy") train_scoresMean = np.mean(train_scores, axis=1) test_scoresMean = np.mean(test_scores, axis=1) return (train_scoresMean,test_scoresMean)# Your answer here #answer_six() ###Output _____no_output_____ ###Markdown Question 7Based on the scores from question 6, what gamma value corresponds to a model that is underfitting (and has the worst test set accuracy)? What gamma value corresponds to a model that is overfitting (and has the worst test set accuracy)? What choice of gamma would be the best choice for a model with good generalization performance on this dataset (high accuracy on both training and test set)? Hint: Try plotting the scores from question 6 to visualize the relationship between gamma and accuracy. Remember to comment out the import matplotlib line before submission.*This function should return one tuple with the degree values in this order: `(Underfitting, Overfitting, Good_Generalization)` Please note there is only one correct solution.* ###Code def answer_seven(): # Your code here values = np.logspace(-4,1,6) return (values[0],values[5],values[3])# Return your answer #answer_seven() #answer_one() #answer_two() #answer_three() #answer_four() #answer_five() #answer_six() #answer_seven() ###Output _____no_output_____
notebooks/Day19_csv.ipynb
###Markdown Read merged CSV file from Blob Storage with Azure Data Factory ###Code %python storage_account_name = "dbpystorage" storage_account_access_key = "YOUR_ACCOUNT_ACCESS_KEY" %python file_location = "wasbs://[email protected]/" file_type = "csv" %python spark.conf.set("fs.azure.account.key."+storage_account_name+".blob.core.windows.net",storage_account_access_key) %python df = spark.read.format(file_type).option("header","true").option("inferSchema", "true").load(file_location) df.createOrReplaceTempView("Day9data_view") %sql SELECT * FROM Day9data_view %sql SELECT COUNT(*) FROM Day9data_view ###Output _____no_output_____
notebooks/user/mabay/BasicModelling.ipynb
###Markdown Basic modelling in AuTuMNThis notebook provides a brief overview the interface to running and interacting with models in AuTuMN ###Code # Start with imports # These should always live at the top of a notebook # Import commonly used external libraries - you won't always need these, but most of the time you will import numpy as np import pandas as pd # Import our project interface - this is the main method of accessing our models from autumn.tools.project import get_project ###Output _____no_output_____ ###Markdown ProjectsProjects encapsulate everything with we need to interact with a given model This includes parameters, calibration specifications, and tools for building and running the model itself ###Code # To open a project, we need to know the model name (ie the type of model we are using) # and the model region, which will have its own input data, calibration targets etc model_name = 'covid_19' model_region = 'malaysia' p = get_project(model_name, model_region) # Run the model with unmodified baseline parameters # This command returns a summer CompartmentalModel object that contains the completed run data # We will detail how to modify and interact with parameters in the next notebook params = p.param_set.baseline m = p.run_baseline_model(params) ###Output _____no_output_____ ###Markdown Model outputsThere are 2 kinds of output data in AuTuMN/summer models1. Outputs (model.outputs, model.get_outputs_df) The 'raw' model outputs - ie the value of every compartment at every timestep2. Derived outputs (model.derived_outputs, model.get_derived_outputs_df) Additional outputs that are computed after the model is run, which may be combinations of the other outputs, or have transformations applied etc ###Code # The raw model outputs are a numpy array of shape (times, compartments) # They can be accessed via model.outputs # For interactive use, we provide a much more friendly interface in form of pandas DataFrames # You can interact with these as you would any other tabular data # Please refer to the pandas documentation for full details odf = m.get_outputs_df() odf # Here we plot a single compartment odf['late_exposedXagegroup_20Xclinical_hospital_non_icuXtracing_untracedXstrain_deltaXvaccination_unvaccinated'].plot() # In the following example, we compare the clinical ICU impacts of all vaccinated and unvaccinated compartments # Select a subset of columns (compartments) by filtering on their names columns = odf.columns columns = [c for c in columns if "clinical_icu" in c] c_vacc = [c for c in columns if "vaccination_vaccinated" in c] c_unvacc = [c for c in columns if "vaccination_unvaccinated" in c] # Construct a new DataFrame using our column selections, and plot it analysis_df = pd.DataFrame() analysis_df['vacc'] = odf[c_vacc].sum(axis=1) analysis_df['unvacc'] = odf[c_unvacc].sum(axis=1) analysis_df.plot(title='ICU impact by vaccination status') # Derived outputs function in exactly the same way do_df = m.get_derived_outputs_df() do_df[['accum_deaths','incidence']].plot() ###Output _____no_output_____
es.rcs.tfm/es.rcs.tfm.nlp/src/test/python/tfm-2.3.5/create_bert.ipynb
###Markdown Find models and source code here https://github.com/google-research/bert ###Code # 1. Base uncased url = 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip' name = 'uncased_L-12_H-768_A-12' download_and_convert(url, name, max_sentence_length = 128, batch_size = 32) # 2. Large uncased url = 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip' name = 'uncased_L-24_H-1024_A-16' download_and_convert(url, name, max_sentence_length = 128, batch_size = 32) # 3. Base cased url = 'https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip' name = 'cased_L-12_H-768_A-12' download_and_convert(url, name, max_sentence_length = 128, batch_size = 32) # 4. Large cased url = 'https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip' name = 'cased_L-24_H-1024_A-16' download_and_convert(url, name, max_sentence_length = 128, batch_size = 32) # 5. Multilingual Cased (New, recommended) url = 'https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip' name = 'multi_cased_L-12_H-768_A-12' download_and_convert(url, name, max_sentence_length = 128, batch_size = 32) # 6. Large uncased url = 'https://storage.googleapis.com/bert_models/2019_05_30/wwm_uncased_L-24_H-1024_A-16.zip' name = 'wwm_uncased_L-24_H-1024_A-16' download_and_convert(url, name, max_sentence_length = 128, batch_size = 32) # 7. Large cased url = 'https://storage.googleapis.com/bert_models/2019_05_30/wwm_cased_L-24_H-1024_A-16.zip' name = 'wwm_cased_L-24_H-1024_A-16' download_and_convert(url, name, max_sentence_length = 128, batch_size = 32) print('All generated models are inside "models/" directory') def convert(name, max_sentence_length = 128, batch_size = 32, destination_model_folder = SPARKNLP_BERT_MODEL_PATH): model = create_model(BIOBERT_PATH + name, BERT_PATH + name + '_export_dir', max_sentence_length, batch_size) # Remove but it's possible to use this model shutil.rmtree(BERT_PATH + name + '_export_dir') final_model_name = name + '_M-{}'.format(max_sentence_length) + '_B-{}'.format(batch_size) model.write().overwrite().save(os.path.join(destination_model_folder, final_model_name)) print("SPARKNLP BERT model has been saved: {}".format(destination_model_folder+'/'+final_model_name)) return model ###Output _____no_output_____ ###Markdown Find models in: GOOGLE: https://github.com/google-research/bertBIOBERT: https://github.com/naver/biobert-pretrained/releasesDependiendo del nombre ocurren varias cosas:- Busca la cadena uncased en el nombre para establecer si el modelo es uncased- embeddings_resolver.py requiere que el modelo se denomine internamente bert_model.ckpt ###Code name = 'biobert_v1.1_pubmed_bert' convert(name, max_sentence_length = 128, batch_size = 32) ###Output source_bert_folder: ../../../../es.rcs.tfm.corpus/datasets/biobert/biobert_v1.1_pubmed_bert is_cased: True lowercase: False INFO:tensorflow:Restoring parameters from ../../../../es.rcs.tfm.corpus/datasets/biobert/biobert_v1.1_pubmed_bert/bert_model.ckpt Number of hidden units: 768 Number of layers: 12 BERT model has been saved: ../../../../es.rcs.tfm.corpus/models/test/biobert_v1.1_pubmed_bert_M-128_B-32
docs/_build/.doctrees/nbsphinx/contents/Protonation.ipynb
###Markdown Protonation ###Code pdbfile = msm.demo_systems.files['1tcd.pdb'] molecular_system = msm.convert(pdbfile) n_Hs = msm.get(molecular_system, selection='atom_type=="H"', n_atoms=True) print('{} hydrogens in the system'.format(n_Hs)) molecular_system = msm.add_missing_hydrogens(molecular_system, pH=7.4, engine='OpenMM') n_Hs = msm.get(molecular_system, selection='atom_type=="H"', n_atoms=True) print('{} hydrogens in the system'.format(n_Hs)) ###Output 4219 hydrogens in the system
Intro2PracDS_2020_04-1_IrisLinearRegression_spurious.ipynb
###Markdown 実践データ科学入門 2020年度木曜4限 第4回 その1 Iris 線形回帰やりすぎ ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn import datasets from sklearn import linear_model from sklearn.metrics import mean_squared_error, r2_score # Irisデータセット読み込み iris = datasets.load_iris() # データの特徴量 print(iris.feature_names) ###Output _____no_output_____ ###Markdown 目的変数 Y を PW 説明変数を SL, SW, PL として線形回帰 ###Code # 線形回帰モデルの準備 reg1 = linear_model.LinearRegression() reg2 = linear_model.LinearRegression() reg3 = linear_model.LinearRegression() # 説明変数と目的変数の設定 Y = iris.data[:, 3] # 目的変数はPW X1 = iris.data[:, 0:1] # case1: 説明変数はSLのみ X2 = iris.data[:, 0:2] # case2: 説明変数はSLとSW X3 = iris.data[:, 0:3] # case3: 説明変数はSL, SW, PL # 線形回帰 reg1.fit(X1, Y) reg2.fit(X2, Y) reg3.fit(X3, Y) # 学習した回帰モデルで予測値を出力 Y1p = reg1.predict(X1) Y2p = reg2.predict(X2) Y3p = reg3.predict(X3) # 学習誤差 MSE1 = mean_squared_error(Y, Y1p) MSE2 = mean_squared_error(Y, Y2p) MSE3 = mean_squared_error(Y, Y3p) # MSEを表示 print('case1: MSE = %f'%MSE1) print('case2: MSE = %f'%MSE2) print('case3: MSE = %f'%MSE3) ###Output _____no_output_____ ###Markdown 仮に目的変数と説明変数の間に明確な因果関係がなく,偽相関しかなかったとしても 回帰変数を増やすと MSE は減る---実は,説明変数として乱数で生成したデータを回帰変数に増やしても MSE は減ってしまう 乱数で生成したデータなので,PW とは全く因果関係がないのに減ってしまう ###Code # 追加する回帰変数の数 cols_max = 200 # MSEを格納するための配列を準備 MSEs = np.zeros(cols_max+3) # 最初の3つは上ですでに計算したMSE MSEs[0] = MSE1 MSEs[1] = MSE2 MSEs[2] = MSE3 # 線形回帰モデルの設定 regR = linear_model.LinearRegression() # 回帰変数用の配列の準備 XX = np.copy(X3) for cols in range(cols_max): # 一様乱数を振る R = np.random.rand(Y.size, 1) # (150 x 1)の行列として設定する必要がある # 説明変数を新しい列として追加する XX = np.append(XX, R, axis=1) # 列を追加するのには axis=1 # 学習と予測をまとめてできる YRp = regR.fit(XX, Y).predict(XX) # MSEの値を格納 MSEs[cols+3] = mean_squared_error(Y, YRp) # MSEを表示 print('説明変数の数 %3d: MSE = %5.3e R^2 = %f'%(cols+4, mean_squared_error(Y, YRp), r2_score(Y, YRp))) R2 = 1- np.mean((Y-YRp)**2) / np.var(Y) R2a = ( (Y.size-1)*R2 - (cols+3) ) / (Y.size-(cols+3)-1) print(R2, R2a) print() # MSE のプロット # 横軸は回帰変数の数,縦軸は回帰誤差の平均二乗誤差 fig = plt.figure(figsize=(12, 8)) ax = plt.axes() ax.set_xlabel("number of explanatory variables", size=24) ax.set_ylabel("MSE", size=24) ax.tick_params(labelsize=20) ax.plot(np.arange(1, cols_max+4), MSEs, linewidth=3) # MSE の対数プロット # 横軸は回帰変数の数,縦軸は回帰誤差の平均二乗誤差 fig2 = plt.figure(figsize=(12, 8)) ax2 = plt.axes() ax2.set_xlabel("number of explanatory variables", size=24) ax2.set_ylabel("MSE", size=24) ax2.tick_params(labelsize=20) ax2.plot(np.arange(1, cols_max+4), MSEs, linewidth=3) ax2.set_yscale('log') ###Output _____no_output_____ ###Markdown MSEだけを減らそうと思ったら,全く意味のない変数を足しても構わないのである実際に学習モデルの出力を確かめてみよう ###Code fig3 = plt.figure(figsize=(12, 8)) ax3 = plt.axes() ax3.set_xlabel('SL', size=24) ax3.set_ylabel('PW', size=24) ax3.tick_params(labelsize=20) ax3.scatter(XX[:, 0], Y, s=200, label='true value') ax3.scatter(XX[:, 0], YRp, s=50, label='prediction') ax3.legend(fontsize=20) ###Output _____no_output_____ ###Markdown 気持ち悪いくらいに完璧に学習モデルの出力が真値と合っている.--- 汎化誤差も見てみよう学習に用いたデータの出力が真値と合っていることは,学習がきちんと済んだことを示しているだけである. 統計モデルの確認には学習に用いていないデータに対しても出力が正しく合うかどうかを調べなければならない. ###Code # MSEを格納するための配列を準備 MSEsTest = np.zeros(cols_max+3) # 目的変数の設定 Ytrain = np.append(iris.data[0::3, 3], iris.data[1::3, 3]) Ytest = iris.data[2::3, 3] ### 説明変数:SL ### Xtrain = np.append(iris.data[0::3, 0:1], iris.data[1::3, 0:1], axis=0) Xtest = iris.data[2::3, 0:1] # 学習でデータでフィットして,テストデータで予測 Ypred = regR.fit(Xtrain, Ytrain).predict(Xtest) # 汎化誤差 MSEsTest[0] = mean_squared_error(Ytest, Ypred) print('説明変数の数 %3d: MSE = %f'%(1, MSEs[0])) ### 説明変数:SLとSW ### Xtrain = np.append(iris.data[0::3, 0:2], iris.data[1::3, 0:2], axis=0) Xtest = iris.data[2::3, 0:2] # 学習でデータでフィットして,テストデータで予測 Ypred = regR.fit(Xtrain, Ytrain).predict(Xtest) # 汎化誤差 MSEsTest[1] = mean_squared_error(Ytest, Ypred) print('説明変数の数 %3d: MSE = %f'%(2, MSEs[1])) ### 説明変数:SLとSWとPL ### Xtrain = np.append(iris.data[0::3, 0:3], iris.data[1::3, 0:3], axis=0) Xtest = iris.data[2::3, 0:3] # 学習でデータでフィットして,テストデータで予測 Ypred = regR.fit(Xtrain, Ytrain).predict(Xtest) # 汎化誤差 MSEsTest[2] = mean_squared_error(Ytest, Ypred) print('説明変数の数 %3d: MSE = %f'%(3, MSEs[2])) for cols in range(cols_max): # 説明変数に一様乱数の列を追加 R = np.random.rand(Ytrain.size, 1) Xtrain = np.append(Xtrain, R, axis=1) R = np.random.rand(Ytest.size, 1) Xtest = np.append(Xtest, R, axis=1) # 学習でデータでフィットして,テストデータで予測 Ypred = regR.fit(Xtrain, Ytrain).predict(Xtest) # 汎化誤差 MSEsTest[cols+3] = mean_squared_error(Ytest, Ypred) print('説明変数の数 %3d: MSE = %f'%(cols+4, MSEs[cols+3])) # MSE のプロット # 横軸は回帰変数の数,縦軸は回帰誤差の平均二乗誤差 fig = plt.figure(figsize=(12, 8)) ax = plt.axes() ax.set_xlabel("number of explanatory variables", size=24) ax.set_ylabel("MSE", size=24) ax.tick_params(labelsize=20) ax.plot(np.arange(1, cols_max+4), MSEs, linewidth=3, label='training error') ax.plot(np.arange(1, cols_max+4), MSEsTest, linewidth=3, label='test error') # MSE の対数プロット # 横軸は回帰変数の数,縦軸は回帰誤差の平均二乗誤差 fig = plt.figure(figsize=(12, 8)) ax = plt.axes() ax.set_xlabel("number of explanatory variables", size=24) ax.set_ylabel("MSE", size=24) ax.tick_params(labelsize=20) ax.plot(np.arange(1, cols_max+4), MSEs, linewidth=3, label='training error') ax.plot(np.arange(1, cols_max+4), MSEsTest, linewidth=3, label='test error') ax.set_yscale('log') ax.legend(fontsize=20) print('学習誤差が最小となる回帰変数の数 = %d'%(np.argmin(MSEs)+4)) print('汎化誤算が最小となる回帰変数の数 = %d'%(np.argmin(MSEsTest)+4)) ###Output _____no_output_____ ###Markdown 学習誤差は乱数を付け足してもほぼ 0 まで下げることができる.一方.汎化誤差は下がらない. ###Code fig4 = plt.figure(figsize=(12, 8)) ax4 = plt.axes() ax4.set_xlabel('SL', size=24) ax4.set_ylabel('PW', size=24) ax4.tick_params(labelsize=20) ax4.scatter(Xtest[:, 0], Ytest, s=200, label='true value') ax4.scatter(Xtest[:, 0], Ypred, s=50, label='prediction') ax4.legend(fontsize=20) reg = linear_model.LinearRegression() Ypred1 = reg.fit(Xtrain[:, 0:3], Ytrain).predict(Xtest[:, 0:3]) print('%3d変数のモデルの汎化誤差 = %f'%(cols_max+4, mean_squared_error(Ytest, Ypred))) print('SL, SW, PLを変数とするモデルの汎化誤差 = %f'%mean_squared_error(Ytest, Ypred1)) fig4 = plt.figure(figsize=(12, 8)) ax4 = plt.axes() ax4.set_xlabel('SL', size=24) ax4.set_ylabel('PW', size=24) ax4.tick_params(labelsize=20) ax4.scatter(Xtest[:, 0], Ytest, s=200, label='true value') ax4.scatter(Xtest[:, 0], Ypred1, s=50, label='prediction') ax4.legend(fontsize=20) ###Output _____no_output_____ ###Markdown 汎化誤差は SL, SW, PL の3変数の線形モデルと大して差はない 学習誤差だけに着目するのは統計モデルを構築する上で適切でないことがわかる尚,決定係数$$R^2 = 1 - \frac{\sum_{i=1}^N (y_i - \hat y_i)^2}{\sum_{i=1}^N (y_i - \bar y)^2} = 1 - \frac{\mathrm{MSE}}{\mathrm{Var}(y)}$$は MSE が小さければ 1 に近づく.ここで,$y_i$ は $i$番目の目的変数,$\hat y_i$ は $i$番目のデータの学習モデルによる予測値,$\hat y$, $\mathrm{Var}(y)$ は目的変数 $\{y_i\}_{i=1}^N$ の平均値と標本分散である.したがって学習データに対してのフィッティングを決定係数を用いて評価するのは危険である.ここで,平方和の計算の時にデータ数で割らずにそれぞれの平方和の自由度で割る評価もある.$$\tilde R^2 = 1 - \frac{\frac1{N-p-1} \sum_{i=1}^N (y_i - \hat y_i)^2}{\frac1{N-1} \sum_{i=1}^N (y_i - \bar y)^2} = 1 - \frac{\frac{N-1}{N-p-1}\mathrm{MSE}}{\tilde{\mathrm{Var}}(y)}$$この $\tilde R^2$ を自由度調整済み決定係数と呼ぶ.$p$ は回帰変数の数,$\tilde{\mathrm{Var}}(y)$ は目的変数 $\{y_i\}_{i=1}^N$ の不偏分散である.ここで,$$\begin{align}\tilde R^2 & = 1 - \frac{N-1}{N-p-1} \frac{\sum_{i=1}^N (y_i - \hat y_i)^2}{\sum_{i=1}^N (y_i - \bar y)^2} = \frac{N-1}{N-p-1} \left( \frac{N-p-1}{N-1} - \frac{\sum_{i=1}^N (y_i - \hat y_i)^2}{\sum_{i=1}^N (y_i - \bar y)^2} \right) \\& = \frac{N-1}{N-p-1} \left( 1 - \frac{\sum_{i=1}^N (y_i - \hat y_i)^2}{\sum_{i=1}^N (y_i - \bar y)^2}\right) - \frac{p}{N-p-1} = \frac{(N-1) R^2 - p}{N-p-1}\end{align}$$となるので,$\tilde R^2 \le 1$ である(負の値も取り得る).上の定義による決定係数 $R^2$ は,定数項が含まれる回帰モデルでは非負の値を取るが,原点を通過する回帰モデルでは負の値を取り得ることに注意する.偏差平方和の分解が成立しないためである(各自で確かめてみよう). ###Code R2 = np.zeros(cols_max+3) R2a = np.zeros(cols_max+3) for cols in range(cols_max+3): # 学習でデータでフィットして,テストデータで予測 Ypred = regR.fit(Xtrain[:, :cols+1], Ytrain).predict(Xtest[:, :cols+1]) # 汎化誤差 #MSEsTest[cols+3] = mean_squared_error(Ytest, Ypred) print('説明変数の数 %3d: R^2 = %f'%(cols+4, r2_score(Ytest, Ypred))) R2 = 1- np.mean((Ytest-Ypred)**2) / np.var(Ytest) R2a = ( (Ytest.size-1)*R2 - (cols+1) ) / (Ytest.size-(cols+1)-1) print(R2, R2a) print() ###Output _____no_output_____ ###Markdown 演習 scikit-learn に入っている別のデータセット:糖尿病患者の診療データはなかなかフィッティングが難しいデータセットである. これに対して,多項式回帰でも重回帰でもよいのでとにかくフィッティングして見せよ. ###Code # Irisデータセット読み込み diabetes = datasets.load_diabetes() # データの特徴量 print(diabetes.feature_names) ###Output _____no_output_____ ###Markdown - age: 年齢- sex: 性別- bmi: BMI = 体重kg / (身長m)^2- bp: 平均血圧- s1: 血清測定値1 (tc)- s2: 血清測定値2 (ldl)- s3: 血清測定値3 (hdl)- s4: 血清測定値4 (tch)- s5: 血清測定値5 (ltg)- s6: 血清測定値6 (glu)目標変数は1年後の疾患の進行状況 ###Code # データセット読み込み Y = diabetes.target X = diabetes.data[:, 2:3] # たとえば bmi plt.scatter(X, Y) ###Output _____no_output_____
Basic_Neural_Nets.ipynb
###Markdown Introduction to Keras Keras is an API designed for human beings, not machines. Keras follows best practices for reducing cognitive load: it offers consistent & simple APIs, it minimizes the number of user actions required for common use cases, and it provides clear & actionable error messages. It also has extensive documentation and developer guides. Keras is a wrapper around a backend library, so a backend like TensorFlow must be provided. A simple neural network! You will build a network that takes two numbers as an input, passes them through a hidden layer of 10 neurons, and finally outputs a single number. ###Code # Import the Sequential model and Dense layer from keras.models import Sequential from keras.layers import Dense # Create a Sequential model model = Sequential() # Add an input layer and a hidden layer with 10 neurons model.add(Dense(10, input_shape=(2,), activation="relu")) # Add a 1-neuron output layer model.add(Dense(1)) # Summarise your model model.summary() ###Output Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) (None, 10) 30 _________________________________________________________________ dense_1 (Dense) (None, 1) 11 ================================================================= Total params: 41 Trainable params: 41 Non-trainable params: 0 _________________________________________________________________ ###Markdown Analyzing the number of parameters: dense: 2 * 10 + 1 * 10 = 20 + 10 = 30 dense_1: 10 * 1 + 1 = 11 ###Code from tensorflow.keras.utils import plot_model plot_model(model, to_file='basic-net.png', show_shapes=True) !pip install ann_visualizer from ann_visualizer.visualize import ann_viz ann_viz(model, title="Artificial Neural network - Model Visualization") ###Output _____no_output_____
Analysis/8.0_MachineLearningAnalysis.ipynb
###Markdown Support Vector Regression Load Data ###Code df = pd.read_csv('df_pos.csv', index_col=0) df ###Output _____no_output_____ ###Markdown Data Pre-processing ###Code X = df.drop(['value', 'player_name'], axis=1).astype(float) y = df['value'] y = np.array(y).reshape(-1, 1) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X = scaler.fit_transform(X) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) ###Output _____no_output_____ ###Markdown Train model ###Code from sklearn.svm import SVR # linear', 'poly', 'rbf', 'sigmoid', 'precomputed # but due to non-linear condition, polynomial or gaussian # regressor = SVR(kernel='polynomial') regressor = SVR(kernel='poly') regressor.fit(X_train,y_train) ###Output C:\Users\Gk\anaconda3\lib\site-packages\sklearn\utils\validation.py:760: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel(). y = column_or_1d(y, warn=True) ###Markdown Prediction ###Code y_pred = regressor.predict(X_test) y_pred ###Output _____no_output_____ ###Markdown Model Evaluation ###Code from sklearn.metrics import r2_score, mean_squared_error mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) rmse = np.sqrt(mse) r2, mse, rmse ###Output _____no_output_____ ###Markdown Random Forest Regression ###Code from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor df = pd.read_csv('df_pos.csv', index_col=0) X = df.drop(['value', 'player_name'], axis=1).astype(float) y = df['value'] y = np.array(y).reshape(-1, 1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) rf = RandomForestRegressor() rf.fit(X_train, y_train) y_pred = rf.predict(X_test) y_pred from sklearn.metrics import r2_score, mean_squared_error mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) rmse = np.sqrt(mse) r2, mse, rmse ###Output _____no_output_____ ###Markdown PCA + Random Forest ###Code df_pos = pd.read_csv(r'C:\Users\Gk\Documents\dev\data\LinearRegression_Football_data\df_pos.csv', encoding='utf-8-sig', index_col=0) df_pos.position = df_pos.position.round() df_pos.position.unique() df_atk = df_pos[df_pos.position == 4].append(df_pos[df_pos.position == 2]) df_atk.reset_index(drop=True) df_for_pca = df_atk[['position', 'shots_total', 'shots_on', 'goals_total', 'goals_conceded', 'goals_assists', 'passes_key', \ 'tackles_total', 'tackles_blocks', 'tackles_interceptions', 'duels_total', 'duels_won', 'dribbles_attempts', \ 'dribbles_success', 'penalty_saved', 'games_appearences', 'substitutes_in', 'substitutes_bench']] len(df_for_pca.columns) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() data_rescaled = scaler.fit_transform(df_for_pca) from sklearn.decomposition import PCA pca = PCA().fit(data_rescaled) import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (12,6) fig, ax = plt.subplots() xi = np.arange(1, 19, step=1) y = np.cumsum(pca.explained_variance_ratio_) plt.ylim(0.0,1.1) plt.plot(xi, y, marker='o', linestyle='--', color='b') plt.xlabel('Number of Components') plt.xticks(np.arange(0, 19, step=1)) #change from 0-based array index to 1-based human-readable label plt.ylabel('Cumulative variance (%)') plt.title('The number of components needed to explain variance') plt.axhline(y=0.95, color='r', linestyle='-') plt.text(0.5, 0.85, '95% cut-off threshold', color = 'red', fontsize=16) ax.grid(axis='x') plt.show() data = PCA(n_components=8).fit_transform(df_for_pca) df_pca_1 = pd.DataFrame(data, columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']) df_pca_1 df_pca_1.corr()[df_pca_1.corr()>0.7] pca_cols = list(df_for_pca.columns) npca_cols = df_atk.columns.tolist() npca_features = [item for item in npca_cols if item not in pca_cols] len(npca_features) df_ols = pd.concat([df_atk[npca_features].reset_index(drop=True), df_pca_1.reset_index(drop=True)], axis=1) df_ols = df_ols.drop('player_name', axis=1) df_ols X = df_ols.drop('value', axis=1).astype(float) y = df_ols['value'] y = np.array(y).reshape(-1, 1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) rf = RandomForestRegressor() rf.fit(X_train, y_train) y_pred = rf.predict(X_test) y_pred from sklearn.metrics import r2_score, mean_squared_error mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) rmse = np.sqrt(mse) r2, mse, rmse ###Output _____no_output_____ ###Markdown PCA + Random Forest_All Data ###Code df = pd.read_csv('df_pos.csv', index_col=0) df df.corr()[df.corr() > 0.7].to_csv('pcarf_corr.csv', encoding='utf-8') df_for_pca = df[['height', 'weight', 'shots_total', 'shots_on', 'goals_total', 'goals_conceded', 'goals_assists', 'passes_key', 'duels_total', 'duels_won', 'dribbles_attempts', 'dribbles_success', 'penalty_saved', 'games_appearences', 'substitutes_in', 'substitutes_out', 'substitutes_bench']] len(df_for_pca.columns) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() data_rescaled = scaler.fit_transform(df_for_pca) from sklearn.decomposition import PCA pca = PCA().fit(data_rescaled) import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (12,6) fig, ax = plt.subplots() xi = np.arange(1, 18, step=1) y = np.cumsum(pca.explained_variance_ratio_) plt.ylim(0.0,1.1) plt.plot(xi, y, marker='o', linestyle='--', color='b') plt.xlabel('Number of Components') plt.xticks(np.arange(0, 18, step=1)) #change from 0-based array index to 1-based human-readable label plt.ylabel('Cumulative variance (%)') plt.title('The number of components needed to explain variance') plt.axhline(y=0.95, color='r', linestyle='-') plt.text(0.5, 0.85, '95% cut-off threshold', color = 'red', fontsize=16) ax.grid(axis='x') plt.show() data = PCA(n_components=8).fit_transform(df_for_pca) df_pca_1 = pd.DataFrame(data, columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']) df_pca_1 df_pca_1.corr()[df_pca_1.corr()>0.7] pca_cols = list(df_for_pca.columns) npca_cols = df.columns.tolist() npca_features = [item for item in npca_cols if item not in pca_cols] len(npca_features) df_rf = pd.concat([df[npca_features].reset_index(drop=True), df_pca_1.reset_index(drop=True)], axis=1) df_rf = df_rf.drop('player_name', axis=1) df_rf X = df_rf.drop('value', axis=1).astype(float) y = df_rf['value'] y = np.array(y).reshape(-1, 1) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) rf = RandomForestRegressor() rf.fit(X_train, y_train) y_pred = rf.predict(X_test) y_pred from sklearn.metrics import r2_score, mean_squared_error mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) rmse = np.sqrt(mse) r2, mse, rmse db_connection_str = 'mysql+pymysql://root:[email protected]/ML' db_connection = create_engine(db_connection_str) filtered_df = pd.read_sql('SELECT * FROM filtered_df',con=db_connection) filtered_df.tail(2) pd.options.display.max_columns = None filtered_df a = filtered_df[filtered_df['games_minutes_played'] >= 900] a a.groupby(['player_name']).mean() ###Output _____no_output_____
day11_DL_optimization_and_regularization/11_PyTorch_and_Dataloaders.ipynb
###Markdown 10: PyTorch practice, hints and Dataloaders Credits:* First part is based on YSDA [Practical RL course week04 materials](https://github.com/yandexdataschool/Practical_RL/tree/master/week04_%5Brecap%5D_deep_learning).* Second part is based on PyTorch official tutorials and [this kaggle kernel](https://www.kaggle.com/pinocookie/pytorch-dataset-and-dataloader)* Third part is based on PyTorch tutorial by [Stanford CS 231n course](http://cs231n.stanford.edu) ![img](https://pytorch.org/tutorials/_static/pytorch-logo-dark.svg)__This notebook__ will teach you to use pytorch low-level core. You can install it [here](http://pytorch.org/).__Pytorch feels__ differently than other frameworks (like tensorflow/theano) on almost every level. TensorFlow makes your code live in two "worlds" simultaneously: symbolic graphs and actual tensors. First you declare a symbolic "recipe" of how to get from inputs to outputs, then feed it with actual minibatches of data. In pytorch, __there's only one world__: all tensors have a numeric value.You compute outputs on the fly without pre-declaring anything. The code looks exactly as in pure numpy with one exception: pytorch computes gradients for you. And can run stuff on GPU. And has a number of pre-implemented building blocks for your neural nets. [And a few more things.](https://medium.com/towards-data-science/pytorch-vs-tensorflow-spotting-the-difference-25c75777377b)Let's dive into it! ###Code import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import torch from torch.utils.data import DataLoader, Dataset, Subset import torchvision from torchvision import transforms ###Output _____no_output_____ ###Markdown Task 1: Tensormancy__1.1 The [_disclaimer_](https://gist.githubusercontent.com/justheuristic/e2c1fa28ca02670cabc42cacf3902796/raw/fd3d935cef63a01b85ed2790b5c11c370245cbd7/stddisclaimer.h)__Let's write another function, this time in polar coordinates:$$\rho(\theta) = (1 + 0.9 \cdot cos (6 \cdot \theta) ) \cdot (1 + 0.01 \cdot cos(24 \cdot \theta)) \cdot (0.5 + 0.05 \cdot cos(200 \cdot \theta)) \cdot (10 + sin(10 \cdot \theta))$$Then convert it into cartesian coordinates ([howto](http://www.mathsisfun.com/polar-cartesian-coordinates.html)) and plot the results.Use torch tensors only: no lists, loops, numpy arrays, etc. ###Code theta = torch.linspace(-np.pi, np.pi, steps=1000) # compute rho(theta) as per formula above rho = ### YOUR CODE HERE # Now convert polar (rho, theta) pairs into cartesian (x,y) to plot them. x = ### YOUR CODE HERE y = ### YOUR CODE HERE plt.figure(figsize=(6, 6)) plt.fill(x.numpy(), y.numpy(), color='red') plt.grid() ###Output _____no_output_____ ###Markdown Task 2: Using the Dataloader ###Code from torch import nn from torch.nn import functional as F # !wget https://raw.githubusercontent.com/girafe-ai/intro-to-ml-harbour/master/day11_DL_optimization_and_regularization/notmnist.py from notmnist import load_notmnist X_train, y_train, X_test, y_test = load_notmnist() class DatasetMNIST(Dataset): def __init__(self, path='./notMNIST_small', letters='ABCDEFGHIJ', transform=None): self.data, self.labels, _ ,_ = load_notmnist(path=path, letters=letters, test_size=0) self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, index): # load image as ndarray type (Height * Width * Channels) # be carefull for converting dtype to np.uint8 [Unsigned integer (0 to 255)] # in this example, i don't use ToTensor() method of torchvision.transforms # so you can convert numpy ndarray shape to tensor in PyTorch (H, W, C) --> (C, H, W) image = self.data[index].transpose(1, 2, 0) label = self.labels[index] if self.transform is not None: image = self.transform(image) return image, label full_dataset = DatasetMNIST('./notMNIST_small', 'AB', transform=None) # we can access and get data with index by __getitem__(index) img, lab = full_dataset.__getitem__(0) print(img.shape) print(type(img)) a = torchvision.transforms.ToTensor() a(img).shape inds = np.random.randint(len(full_dataset), size=2) for i in range(2): plt.subplot(1, 2, i + 1) plt.imshow(full_dataset[inds[i]][0].reshape([28,28])) plt.title(str(full_dataset[inds[i]][1])) ###Output _____no_output_____ ###Markdown To the DataLoader ###Code train_loader = DataLoader(full_dataset, batch_size=8, shuffle=True) ###Output _____no_output_____ ###Markdown We can use dataloader as iterator by using iter() function. ###Code train_iter = iter(train_loader) print(type(train_iter)) ###Output <class 'torch.utils.data.dataloader._SingleProcessDataLoaderIter'> ###Markdown We can look at images and labels of batch size by extracting data `.next()` method. ###Code images, labels = train_iter.next() print('images shape on batch size = {}'.format(images.size())) print('labels shape on batch size = {}'.format(labels.size())) # make grid takes tensor as arg # tensor : (batchsize, channels, height, width) grid = torchvision.utils.make_grid(images.permute([0, 3, 1, 2])) plt.imshow(grid.numpy().transpose((1, 2, 0))) plt.axis('off') plt.title(labels.numpy()); ###Output Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). /Users/pepper/installed/miniconda3/lib/python3.7/site-packages/matplotlib/text.py:1150: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison if s != self._text: ###Markdown And now with transformations: ###Code train_dataset_with_transform = DatasetMNIST( transform=torchvision.transforms.ToTensor() ) img, lab = train_dataset_with_transform.__getitem__(0) print('image shape at the first row : {}'.format(img.size())) train_loader_tr = DataLoader(train_dataset_with_transform, batch_size=8, shuffle=True) train_iter_tr = iter(train_loader_tr) print(type(train_iter_tr)) images, labels = train_iter_tr.next() print('images shape on batch size = {}'.format(images.size())) print('labels shape on batch size = {}'.format(labels.size())) grid = torchvision.utils.make_grid(images) plt.imshow(grid.numpy().transpose((1, 2, 0))) plt.axis('off') plt.title(labels.numpy()); ###Output Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). ###Markdown Composing several transformations If you want to take data augmentation, you have to make List using `torchvision.transforms.Compose````class Compose(object): """Composes several transforms together. Args: transforms (list of ``Transform`` objects): list of transforms to compose. Example: >>> transforms.Compose([ >>> transforms.CenterCrop(10), >>> transforms.ToTensor(), >>> ]) """ def __init__(self, transforms): self.transforms = transforms def __call__(self, img): for t in self.transforms: img = t(img) return img def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string```this function can convert some image by order within `__call__` method. ###Code class Flatten(): def __call__(self, pic): return pic.flatten() def __repr__(self): return self.__class__.__name__ + '()' a = Flatten() a(img).shape new_transform = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), Flatten() ]) ###Output _____no_output_____ ###Markdown Putting all together ###Code import time from IPython.display import clear_output # use GPU if available device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") device def subset_ind(dataset, ratio: float): return ### YOUR CODE HERE dataset = DatasetMNIST( './notMNIST_small', # 'AB', transform=new_transform ) shrink_inds = subset_ind(dataset, 0.2) dataset = Subset(dataset, shrink_inds) print(f'\n\n dataset size: {len(dataset)}, labels: {np.unique(dataset.dataset.labels)}') val_size = 0.2 val_inds = subset_ind(dataset, val_size) train_dataset = Subset(dataset, [i for i in range(len(dataset)) if i not in val_inds]) val_dataset = Subset(dataset, val_inds) print(f' training size: {len(train_dataset)}\nvalidation size: {len(val_dataset)}') batch_size = 32 train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True) train_iter = iter(train_loader) print(type(train_iter)) images, labels = train_iter.next() print('images shape on batch size = {}'.format(images.size())) print('labels shape on batch size = {}'.format(labels.size())) loss_func = nn.CrossEntropyLoss() # create network again just in case model = nn.Sequential( nn.Linear(784, 10), nn.Softmax(dim=1) ) model.to(device, torch.float32) opt = torch.optim.Adam(model.parameters(), lr=1e-3) def train_model(model, train_loader, val_loader, loss_fn, opt, n_epochs: int): ''' model: нейросеть для обучения, train_loader, val_loader: загрузчики данных loss_fn: целевая метрика (которую будем оптимизировать) opt: оптимизатор (обновляет веса нейросети) n_epochs: кол-во эпох, полных проходов датасета ''' train_loss = [] val_loss = [] val_accuracy = [] for epoch in range(n_epochs): ep_train_loss = [] ep_val_loss = [] ep_val_accuracy = [] start_time = time.time() model.train(True) # enable dropout / batch_norm training behavior for X_batch, y_batch in train_loader: # move data to target device ### YOUR CODE HERE # train on batch: compute loss, calc grads, perform optimizer step and zero the grads ### YOUR CODE HERE ep_train_loss.append(loss.item()) model.train(False) # disable dropout / use averages for batch_norm with torch.no_grad(): for X_batch, y_batch in val_loader: # move data to target device ### YOUR CODE HERE # compute predictions ### YOUR CODE HERE ep_val_loss.append(### YOUR CODE HERE) y_pred = ### YOUR CODE HERE ep_val_accuracy.append(### YOUR CODE HERE) # print the results for this epoch: print(f'Epoch {epoch + 1} of {n_epochs} took {time.time() - start_time:.3f}s') train_loss.append(np.mean(ep_train_loss)) val_loss.append(np.mean(ep_val_loss)) val_accuracy.append(np.mean(ep_val_accuracy)) print(f"\t training loss: {train_loss[-1]:.6f}") print(f"\tvalidation loss: {val_loss[-1]:.6f}") print(f"\tvalidation accuracy: {100 * val_accuracy[-1]:.1f}") return train_loss, val_loss, val_accuracy n_epochs = 30 train_loss, val_loss, val_accuracy = train_model(model, train_loader, val_loader, loss_func, opt, n_epochs) def plot_train_process(train_loss, val_loss, val_accuracy): fig, axes = plt.subplots(1, 2, figsize=(15, 5)) axes[0].set_title('Loss') axes[0].plot(train_loss, label='train') axes[0].plot(val_loss, label='validation') axes[0].legend() axes[1].set_title('Validation accuracy') axes[1].plot(val_accuracy) plot_train_process(train_loss, val_loss, val_accuracy) ###Output _____no_output_____ ###Markdown Real network ###Code # create network again just in case model = nn.Sequential( nn.Linear(784, 500), nn.ReLU(), nn.Linear(500, 200), nn.ReLU(), nn.Linear(200, 10), nn.Softmax(dim=1) ) model.to(device, torch.float32) opt = torch.optim.Adam(model.parameters(), lr=1e-3) n_epochs = 30 train_loss, val_loss, val_accuracy = train_model(model, train_loader, val_loader, loss_func, opt, n_epochs) plot_train_process(train_loss, val_loss, val_accuracy) ###Output _____no_output_____ ###Markdown Overfit!!! ###Code # create network again just in case model = nn.Sequential( nn.Linear(784, 600), nn.ReLU(), nn.Linear(600, 500), nn.ReLU(), nn.Linear(500, 400), nn.ReLU(), nn.Linear(500, 200), nn.ReLU(), nn.Linear(200, 10), nn.Softmax(dim=1) ) model.to(device, torch.float32) opt = torch.optim.Adam(model.parameters(), lr=1e-3) n_epochs = 30 train_loss, val_loss, val_accuracy = train_model(model, train_loader, val_loader, loss_func, opt, n_epochs) plot_train_process(train_loss, val_loss, val_accuracy) ###Output _____no_output_____ ###Markdown Your turnTry to add some additional transformations (e.g. random crop, rotation etc.) and train your model! Dropout try Batchnorm try 3. Save the model (model checkpointing)Now we have trained a model! Obviously we do not want to retrain the model everytime we want to use it. Plus if you are training a super big model, you probably want to save checkpoint periodically so that you can always fall back to the last checkpoint in case something bad happened or you simply want to test models at different training iterations.Model checkpointing is fairly simple in PyTorch. First, we define a helper function that can save a model to the disk ###Code def save_checkpoint(checkpoint_path, model, optimizer): # state_dict: a Python dictionary object that: # - for a model, maps each layer to its parameter tensor; # - for an optimizer, contains info about the optimizer’s states and hyperparameters used. state = { 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict()} torch.save(state, checkpoint_path) print('model saved to %s' % checkpoint_path) def load_checkpoint(checkpoint_path, model, optimizer): state = torch.load(checkpoint_path) model.load_state_dict(state['state_dict']) optimizer.load_state_dict(state['optimizer']) print('model loaded from %s' % checkpoint_path) # create a brand new model model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Testing -- you should get a pretty poor performance since the model hasn't learned anything yet. test() ###Output _____no_output_____ ###Markdown Define a training loop with model checkpointing ###Code def train_save(epoch, save_interval, log_interval=100): model.train() # set training mode iteration = 0 for ep in range(epoch): for batch_idx, (data, target) in enumerate(trainset_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if iteration % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( ep, batch_idx * len(data), len(trainset_loader.dataset), 100. * batch_idx / len(trainset_loader), loss.item())) # different from before: saving model checkpoints if iteration % save_interval == 0 and iteration > 0: save_checkpoint('mnist-%i.pth' % iteration, model, optimizer) iteration += 1 test() # save the final model save_checkpoint('mnist-%i.pth' % iteration, model, optimizer) train_save(5, save_interval=500, log_interval=100) # create a new model model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # load from the final checkpoint load_checkpoint('mnist-4690.pth', model, optimizer) # should give you the final model accuracy test() ###Output _____no_output_____
examples/sparsity patterns for scipy.ipynb
###Markdown Step 1We want to create 10x12 matrix ###Code n_rows, n_cols = 10, 12 ###Output _____no_output_____ ###Markdown Let's create sparsity pattern, e.g., for a block-diagonal matrix ###Code n_block_sizes = [3, 1, 2] n_matrix_size = min(n_rows, n_cols) idx = spat.get('block', n_matrix_size, n_block_sizes) print("Sparsity Pattern:") print(idx) n_elements = len(idx) print(f"Number of elements: {n_elements}") ###Output Sparsity Pattern: [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2), (3, 3), (4, 4), (4, 5), (5, 4), (5, 5), (6, 6), (6, 7), (6, 8), (7, 6), (7, 7), (7, 8), (8, 6), (8, 7), (8, 8), (9, 9)] Number of elements: 24 ###Markdown Step 2Create a 10x12 LIL matrix with scipy,and assign the values `[1, 2, .., 24]` to the sparse matrix. ###Code idx = np.array(idx) idx_rows, idx_cols = idx[:, 0], idx[:, 1] mat = scipy.sparse.lil_matrix((n_rows, n_cols), dtype=np.int64) mat[idx_rows, idx_cols] = range(1, n_elements+1) mat.todense() ###Output _____no_output_____
experiments/tl_1/cores-metehan/trials/1/trial.ipynb
###Markdown Transfer Learning Template ###Code %load_ext autoreload %autoreload 2 %matplotlib inline import os, json, sys, time, random import numpy as np import torch from torch.optim import Adam from easydict import EasyDict import matplotlib.pyplot as plt from steves_models.steves_ptn import Steves_Prototypical_Network from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper from steves_utils.iterable_aggregator import Iterable_Aggregator from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig from steves_utils.torch_sequential_builder import build_sequential from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path) from steves_utils.PTN.utils import independent_accuracy_assesment from torch.utils.data import DataLoader from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory from steves_utils.ptn_do_report import ( get_loss_curve, get_results_table, get_parameters_table, get_domain_accuracies, ) from steves_utils.transforms import get_chained_transform ###Output _____no_output_____ ###Markdown Allowed ParametersThese are allowed parameters, not defaultsEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)Papermill uses the cell tag "parameters" to inject the real parameters below this cell.Enable tags to see what I mean ###Code required_parameters = { "experiment_name", "lr", "device", "seed", "dataset_seed", "n_shot", "n_query", "n_way", "train_k_factor", "val_k_factor", "test_k_factor", "n_epoch", "patience", "criteria_for_best", "x_net", "datasets", "torch_default_dtype", "NUM_LOGS_PER_EPOCH", "BEST_MODEL_PATH", } from steves_utils.CORES.utils import ( ALL_NODES, ALL_NODES_MINIMUM_1000_EXAMPLES, ALL_DAYS ) from steves_utils.ORACLE.utils_v2 import ( ALL_DISTANCES_FEET_NARROWED, ALL_RUNS, ALL_SERIAL_NUMBERS, ) standalone_parameters = {} standalone_parameters["experiment_name"] = "STANDALONE PTN" standalone_parameters["lr"] = 0.001 standalone_parameters["device"] = "cuda" standalone_parameters["seed"] = 1337 standalone_parameters["dataset_seed"] = 1337 standalone_parameters["n_way"] = 8 standalone_parameters["n_shot"] = 3 standalone_parameters["n_query"] = 2 standalone_parameters["train_k_factor"] = 1 standalone_parameters["val_k_factor"] = 2 standalone_parameters["test_k_factor"] = 2 standalone_parameters["n_epoch"] = 50 standalone_parameters["patience"] = 10 standalone_parameters["criteria_for_best"] = "source_loss" standalone_parameters["datasets"] = [ { "labels": ALL_SERIAL_NUMBERS, "domains": ALL_DISTANCES_FEET_NARROWED, "num_examples_per_domain_per_label": 100, "pickle_path": os.path.join(get_datasets_base_path(), "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl"), "source_or_target_dataset": "source", "x_transforms": ["unit_mag", "minus_two"], "episode_transforms": [], "domain_prefix": "ORACLE_" }, { "labels": ALL_NODES, "domains": ALL_DAYS, "num_examples_per_domain_per_label": 100, "pickle_path": os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"), "source_or_target_dataset": "target", "x_transforms": ["unit_power", "times_zero"], "episode_transforms": [], "domain_prefix": "CORES_" } ] standalone_parameters["torch_default_dtype"] = "torch.float32" standalone_parameters["x_net"] = [ {"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}}, {"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features":256}}, {"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features":80}}, {"class": "Flatten", "kargs": {}}, {"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm1d", "kargs": {"num_features":256}}, {"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}}, ] # Parameters relevant to results # These parameters will basically never need to change standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10 standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth" # Parameters parameters = { "experiment_name": "tl_1_cores-metehan", "device": "cuda", "lr": 0.001, "seed": 1337, "dataset_seed": 1337, "n_shot": 3, "n_query": 2, "train_k_factor": 3, "val_k_factor": 2, "test_k_factor": 2, "torch_default_dtype": "torch.float32", "n_epoch": 50, "patience": 3, "criteria_for_best": "target_loss", "x_net": [ {"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}}, { "class": "Conv2d", "kargs": { "in_channels": 1, "out_channels": 256, "kernel_size": [1, 7], "bias": False, "padding": [0, 3], }, }, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features": 256}}, { "class": "Conv2d", "kargs": { "in_channels": 256, "out_channels": 80, "kernel_size": [2, 7], "bias": True, "padding": [0, 3], }, }, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features": 80}}, {"class": "Flatten", "kargs": {}}, {"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm1d", "kargs": {"num_features": 256}}, {"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}}, ], "NUM_LOGS_PER_EPOCH": 10, "BEST_MODEL_PATH": "./best_model.pth", "n_way": 19, "datasets": [ { "labels": [ "1-10.", "1-11.", "1-15.", "1-16.", "1-17.", "1-18.", "1-19.", "10-4.", "10-7.", "11-1.", "11-14.", "11-17.", "11-20.", "11-7.", "13-20.", "13-8.", "14-10.", "14-11.", "14-14.", "14-7.", "15-1.", "15-20.", "16-1.", "16-16.", "17-10.", "17-11.", "17-2.", "19-1.", "19-16.", "19-19.", "19-20.", "19-3.", "2-10.", "2-11.", "2-17.", "2-18.", "2-20.", "2-3.", "2-4.", "2-5.", "2-6.", "2-7.", "2-8.", "3-13.", "3-18.", "3-3.", "4-1.", "4-10.", "4-11.", "4-19.", "5-5.", "6-15.", "7-10.", "7-14.", "8-18.", "8-20.", "8-3.", "8-8.", ], "domains": [1, 2, 3, 4, 5], "num_examples_per_domain_per_label": 100, "pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl", "source_or_target_dataset": "source", "x_transforms": ["unit_mag"], "episode_transforms": [], "domain_prefix": "CORES_", }, { "labels": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, ], "domains": [0, 1, 2], "num_examples_per_domain_per_label": 100, "pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/metehan.stratified_ds.2022A.pkl", "source_or_target_dataset": "target", "x_transforms": ["unit_mag"], "episode_transforms": [], "domain_prefix": "Metehan_", }, ], } # Set this to True if you want to run this template directly STANDALONE = False if STANDALONE: print("parameters not injected, running with standalone_parameters") parameters = standalone_parameters if not 'parameters' in locals() and not 'parameters' in globals(): raise Exception("Parameter injection failed") #Use an easy dict for all the parameters p = EasyDict(parameters) supplied_keys = set(p.keys()) if supplied_keys != required_parameters: print("Parameters are incorrect") if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters)) if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys)) raise RuntimeError("Parameters are incorrect") ################################### # Set the RNGs and make it all deterministic ################################### np.random.seed(p.seed) random.seed(p.seed) torch.manual_seed(p.seed) torch.use_deterministic_algorithms(True) ########################################### # The stratified datasets honor this ########################################### torch.set_default_dtype(eval(p.torch_default_dtype)) ################################### # Build the network(s) # Note: It's critical to do this AFTER setting the RNG ################################### x_net = build_sequential(p.x_net) start_time_secs = time.time() p.domains_source = [] p.domains_target = [] train_original_source = [] val_original_source = [] test_original_source = [] train_original_target = [] val_original_target = [] test_original_target = [] # global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), "unit_power") # unit_power, unit_mag # global_x_transform_func = lambda x: normalize(x, "unit_power") # unit_power, unit_mag def add_dataset( labels, domains, pickle_path, x_transforms, episode_transforms, domain_prefix, num_examples_per_domain_per_label, source_or_target_dataset:str, iterator_seed=p.seed, dataset_seed=p.dataset_seed, n_shot=p.n_shot, n_way=p.n_way, n_query=p.n_query, train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor), ): if x_transforms == []: x_transform = None else: x_transform = get_chained_transform(x_transforms) if episode_transforms == []: episode_transform = None else: raise Exception("episode_transforms not implemented") episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1]) eaf = Episodic_Accessor_Factory( labels=labels, domains=domains, num_examples_per_domain_per_label=num_examples_per_domain_per_label, iterator_seed=iterator_seed, dataset_seed=dataset_seed, n_shot=n_shot, n_way=n_way, n_query=n_query, train_val_test_k_factors=train_val_test_k_factors, pickle_path=pickle_path, x_transform_func=x_transform, ) train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test() train = Lazy_Iterable_Wrapper(train, episode_transform) val = Lazy_Iterable_Wrapper(val, episode_transform) test = Lazy_Iterable_Wrapper(test, episode_transform) if source_or_target_dataset=="source": train_original_source.append(train) val_original_source.append(val) test_original_source.append(test) p.domains_source.extend( [domain_prefix + str(u) for u in domains] ) elif source_or_target_dataset=="target": train_original_target.append(train) val_original_target.append(val) test_original_target.append(test) p.domains_target.extend( [domain_prefix + str(u) for u in domains] ) else: raise Exception(f"invalid source_or_target_dataset: {source_or_target_dataset}") for ds in p.datasets: add_dataset(**ds) # from steves_utils.CORES.utils import ( # ALL_NODES, # ALL_NODES_MINIMUM_1000_EXAMPLES, # ALL_DAYS # ) # add_dataset( # labels=ALL_NODES, # domains = ALL_DAYS, # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"cores_{u}" # ) # from steves_utils.ORACLE.utils_v2 import ( # ALL_DISTANCES_FEET, # ALL_RUNS, # ALL_SERIAL_NUMBERS, # ) # add_dataset( # labels=ALL_SERIAL_NUMBERS, # domains = list(set(ALL_DISTANCES_FEET) - {2,62}), # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"), # source_or_target_dataset="source", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"oracle1_{u}" # ) # from steves_utils.ORACLE.utils_v2 import ( # ALL_DISTANCES_FEET, # ALL_RUNS, # ALL_SERIAL_NUMBERS, # ) # add_dataset( # labels=ALL_SERIAL_NUMBERS, # domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}), # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"), # source_or_target_dataset="source", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"oracle2_{u}" # ) # add_dataset( # labels=list(range(19)), # domains = [0,1,2], # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "metehan.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"met_{u}" # ) # # from steves_utils.wisig.utils import ( # # ALL_NODES_MINIMUM_100_EXAMPLES, # # ALL_NODES_MINIMUM_500_EXAMPLES, # # ALL_NODES_MINIMUM_1000_EXAMPLES, # # ALL_DAYS # # ) # import steves_utils.wisig.utils as wisig # add_dataset( # labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES, # domains = wisig.ALL_DAYS, # num_examples_per_domain_per_label=100, # pickle_path=os.path.join(get_datasets_base_path(), "wisig.node3-19.stratified_ds.2022A.pkl"), # source_or_target_dataset="target", # x_transform_func=global_x_transform_func, # domain_modifier=lambda u: f"wisig_{u}" # ) ################################### # Build the dataset ################################### train_original_source = Iterable_Aggregator(train_original_source, p.seed) val_original_source = Iterable_Aggregator(val_original_source, p.seed) test_original_source = Iterable_Aggregator(test_original_source, p.seed) train_original_target = Iterable_Aggregator(train_original_target, p.seed) val_original_target = Iterable_Aggregator(val_original_target, p.seed) test_original_target = Iterable_Aggregator(test_original_target, p.seed) # For CNN We only use X and Y. And we only train on the source. # Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda) val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda) test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda) train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda) val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda) test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda) datasets = EasyDict({ "source": { "original": {"train":train_original_source, "val":val_original_source, "test":test_original_source}, "processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source} }, "target": { "original": {"train":train_original_target, "val":val_original_target, "test":test_original_target}, "processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target} }, }) from steves_utils.transforms import get_average_magnitude, get_average_power print(set([u for u,_ in val_original_source])) print(set([u for u,_ in val_original_target])) s_x, s_y, q_x, q_y, _ = next(iter(train_processed_source)) print(s_x) # for ds in [ # train_processed_source, # val_processed_source, # test_processed_source, # train_processed_target, # val_processed_target, # test_processed_target # ]: # for s_x, s_y, q_x, q_y, _ in ds: # for X in (s_x, q_x): # for x in X: # assert np.isclose(get_average_magnitude(x.numpy()), 1.0) # assert np.isclose(get_average_power(x.numpy()), 1.0) ################################### # Build the model ################################### model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=(2,256)) optimizer = Adam(params=model.parameters(), lr=p.lr) ################################### # train ################################### jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device) jig.train( train_iterable=datasets.source.processed.train, source_val_iterable=datasets.source.processed.val, target_val_iterable=datasets.target.processed.val, num_epochs=p.n_epoch, num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH, patience=p.patience, optimizer=optimizer, criteria_for_best=p.criteria_for_best, ) total_experiment_time_secs = time.time() - start_time_secs ################################### # Evaluate the model ################################### source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test) target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test) source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val) target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val) history = jig.get_history() total_epochs_trained = len(history["epoch_indices"]) val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val)) confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl) per_domain_accuracy = per_domain_accuracy_from_confusion(confusion) # Add a key to per_domain_accuracy for if it was a source domain for domain, accuracy in per_domain_accuracy.items(): per_domain_accuracy[domain] = { "accuracy": accuracy, "source?": domain in p.domains_source } # Do an independent accuracy assesment JUST TO BE SURE! # _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device) # _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device) # _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device) # _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device) # assert(_source_test_label_accuracy == source_test_label_accuracy) # assert(_target_test_label_accuracy == target_test_label_accuracy) # assert(_source_val_label_accuracy == source_val_label_accuracy) # assert(_target_val_label_accuracy == target_val_label_accuracy) experiment = { "experiment_name": p.experiment_name, "parameters": dict(p), "results": { "source_test_label_accuracy": source_test_label_accuracy, "source_test_label_loss": source_test_label_loss, "target_test_label_accuracy": target_test_label_accuracy, "target_test_label_loss": target_test_label_loss, "source_val_label_accuracy": source_val_label_accuracy, "source_val_label_loss": source_val_label_loss, "target_val_label_accuracy": target_val_label_accuracy, "target_val_label_loss": target_val_label_loss, "total_epochs_trained": total_epochs_trained, "total_experiment_time_secs": total_experiment_time_secs, "confusion": confusion, "per_domain_accuracy": per_domain_accuracy, }, "history": history, "dataset_metrics": get_dataset_metrics(datasets, "ptn"), } ax = get_loss_curve(experiment) plt.show() get_results_table(experiment) get_domain_accuracies(experiment) print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"]) print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"]) json.dumps(experiment) ###Output _____no_output_____
3_drawing_marbles.ipynb
###Markdown https://www.hackerrank.com/challenges/s10-mcq-6/problem P(2B|1R) = P(2B and 1R) / P(1R) ###Code (3/7)*(4/6)/(3/7) ###Output _____no_output_____
PCA/PCA Mini-Project-zh.ipynb
###Markdown PCA 迷你项目 使用特征脸方法和 SVM 进行脸部识别我们在讨论 PCA 时花了很长的时间讨论理论问题,因此,在此迷你项目中,我们将请你研究一些 sklearn 代码。特征脸代码很有趣并且很丰富,足以当做此迷你项目的实验台。注意:在此示例中使用的数据集来自“[Labeled Faces in the Wild](http://vis-www.cs.umass.edu/lfw/)”,亦称为 [LFW_ Download](http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz) (233MB) 并经过预处理。这是[原始数据](http://scikit-learn.org/0.15/auto_examples/applications/face_recognition.html)。 ###Code from time import time import logging import pylab as pl import numpy as np from sklearn.model_selection import train_test_split from sklearn.datasets import fetch_lfw_people from sklearn.model_selection import GridSearchCV from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.decomposition import RandomizedPCA from sklearn.decomposition import PCA from sklearn.svm import SVC ###Output _____no_output_____ ###Markdown 加载数据集 ###Code # Download the data, if not already on disk and load it as numpy arrays lfw_people = fetch_lfw_people('data', min_faces_per_person=70, resize=0.4) # introspect the images arrays to find the shapes (for plotting) n_samples, h, w = lfw_people.images.shape np.random.seed(42) # for machine learning we use the data directly (as relative pixel # position info is ignored by this model) X = lfw_people.data n_features = X.shape[1] # the label to predict is the id of the person y = lfw_people.target target_names = lfw_people.target_names n_classes = target_names.shape[0] print("Total dataset size:") print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) print( "n_classes: %d" % n_classes) ###Output Total dataset size: n_samples: 1288 n_features: 1850 n_classes: 7 ###Markdown 拆分为训练集和测试集 ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) ###Output _____no_output_____ ###Markdown 计算 PCA我们现在可以对脸部数据集(当做无标签数据集)计算 [PCA](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html)(特征脸)了:无监督式特征提取/降维。 ###Code n_components = 150 print( "Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0]) ) t0 = time() # TODO: Create an instance of PCA, initializing with n_components=n_components and whiten=True pca = PCA(n_components=n_components, whiten=True, svd_solver='randomized') #TODO: pass the training dataset (X_train) to pca's 'fit()' method pca = pca.fit(X_train) print("done in %0.3fs" % (time() - t0)) ###Output Extracting the top 150 eigenfaces from 966 faces done in 0.190s ###Markdown 将输入数据投射到特征脸标准正交基 ###Code eigenfaces = pca.components_.reshape((n_components, h, w)) t0 = time() X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) print("done in %0.3fs" % (time() - t0)) ###Output done in 0.032s ###Markdown 训练 SVM 分类模型我们将 [SVM 分类器](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)拟合到训练集中。我们将使用 [GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) 为该分类器找到一组合适的参数。 ###Code param_grid = { 'C': [1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], } # for sklearn version 0.16 or prior, the class_weight parameter value is 'auto' clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid) clf = clf.fit(X_train_pca, y_train) print("Best estimator found by grid search:") print(clf.best_estimator_) ###Output Best estimator found by grid search: SVC(C=1000.0, cache_size=200, class_weight='balanced', coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.001, kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) ###Markdown 用测试集评估模型质量 1. 分类报告训练好分类器后,我们在测试数据集上运行该分类器,并定性地评估结果。Sklearn 的[分类报告](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html)显示了每个类别的一些主要分类指标。 ###Code y_pred = clf.predict(X_test_pca) print(classification_report(y_test, y_pred, target_names=target_names)) ###Output precision recall f1-score support Ariel Sharon 0.60 0.69 0.64 13 Colin Powell 0.76 0.90 0.82 60 Donald Rumsfeld 0.71 0.74 0.73 27 George W Bush 0.90 0.88 0.89 146 Gerhard Schroeder 0.86 0.76 0.81 25 Hugo Chavez 0.90 0.60 0.72 15 Tony Blair 0.85 0.78 0.81 36 avg / total 0.84 0.83 0.83 322 ###Markdown 2. 混淆矩阵查看分类器效果的另一种方式是查看[混淆矩阵](http://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/)。为此,我们可以直接调用 [sklearn.metrics.confusion_matrix](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html): ###Code print(confusion_matrix(y_test, y_pred, labels=range(n_classes))) ###Output [[ 9 0 3 1 0 0 0] [ 2 52 1 4 0 1 0] [ 4 0 22 1 0 0 0] [ 1 11 2 127 3 1 1] [ 0 2 0 1 19 1 2] [ 0 3 0 1 2 8 1] [ 0 2 1 2 1 0 30]] ###Markdown 3. 绘制最显著的特征脸 ###Code def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" pl.figure(figsize=(1.8 * n_col, 2.4 * n_row)) pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): pl.subplot(n_row, n_col, i + 1) pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray) pl.title(titles[i], size=12) pl.xticks(()) pl.yticks(()) # plot the result of the prediction on a portion of the test set def title(y_pred, y_test, target_names, i): pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] true_name = target_names[y_test[i]].rsplit(' ', 1)[-1] return ('predicted: %s\ntrue: %s' % (pred_name, true_name)) prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])] plot_gallery(X_test, prediction_titles, h, w) pl.show() eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])] plot_gallery(eigenfaces, eigenface_titles, h, w) pl.show() ###Output _____no_output_____ ###Markdown 练习:每个主成分的可释方差我们提到 PCA 将对主成分排序,第一个主成分会显示最大方差的方向,第二个主成分具有第二大方差,等等。第一个主成分解释了多少方差?第二个呢?打印pca.explained_variance_ratio_,第一主成分解释了19%方差,第二主成分解释了15%方差该数据主要说明主成分变动对方差的影响,例如:2x+3b=y,x和b是特征,取值范围都是0-1,x从0到1,y变化了多少。 练习:要使用多少个主成分?现在你将实验不同数量的主成分。在多类别分类问题(例如此问题,要应用 2 个以上的标签)中,准确率指标没有二类别问题的准确率指标直观。相反,我们将使用一个热门指标,即 F1 分数。我们将在关于评估指标的课程中深入了解 F1 分数,但是你自己将明白好的分类器的 F1 分数是高还是低。你将通过改变主成分的数量,观察 F1 分数会如何变化。当你添加更多主成分(作为特征)来训练分类器时,你认为分类器的效果会更好还是更差?当主成分较少时,添加主成分,会使F1分数提高,但当主成分增加到一定数量时,F1分数会下降。 练习:F1 分数与所使用的主成分数量将 n_components 更改为以下值:[10、15、25、50、100、250]。对于每个主成分数量,注意 Ariel Sharon 的 F1 分数。(对于 10 个主成分,代码中的绘制函数将崩溃,但是你应该能够看到 F1 分数。)如果你看到更高的 F1 分数,是否意味着分类器的效果更好或更差?n_components=10 precision recall f1-score support Ariel Sharon 0.10 0.15 0.12 13 Colin Powell 0.43 0.53 0.48 60 Donald Rumsfeld 0.26 0.33 0.30 27 George W Bush 0.66 0.58 0.62 146Gerhard Schroeder 0.17 0.20 0.18 25 Hugo Chavez 0.25 0.13 0.17 15 Tony Blair 0.50 0.39 0.44 36 avg / total 0.49 0.46 0.47 322 n_components=15 precision recall f1-score support Ariel Sharon 0.25 0.46 0.32 13 Colin Powell 0.64 0.72 0.68 60 Donald Rumsfeld 0.46 0.63 0.53 27 George W Bush 0.82 0.68 0.74 146Gerhard Schroeder 0.39 0.44 0.42 25 Hugo Chavez 0.60 0.40 0.48 15 Tony Blair 0.50 0.50 0.50 36 avg / total 0.66 0.62 0.63 322n_components=25 precision recall f1-score support Ariel Sharon 0.56 0.69 0.62 13 Colin Powell 0.72 0.87 0.79 60 Donald Rumsfeld 0.48 0.52 0.50 27 George W Bush 0.86 0.82 0.84 146Gerhard Schroeder 0.56 0.56 0.56 25 Hugo Chavez 0.89 0.53 0.67 15 Tony Blair 0.70 0.64 0.67 36 avg / total 0.75 0.74 0.74 322n_components=50 precision recall f1-score support Ariel Sharon 0.62 0.77 0.69 13 Colin Powell 0.83 0.92 0.87 60 Donald Rumsfeld 0.68 0.56 0.61 27 George W Bush 0.86 0.90 0.88 146Gerhard Schroeder 0.74 0.68 0.71 25 Hugo Chavez 0.77 0.67 0.71 15 Tony Blair 0.83 0.67 0.74 36 avg / total 0.81 0.82 0.81 322n_components=100 precision recall f1-score support Ariel Sharon 0.64 0.69 0.67 13 Colin Powell 0.82 0.90 0.86 60 Donald Rumsfeld 0.75 0.67 0.71 27 George W Bush 0.89 0.95 0.92 146Gerhard Schroeder 0.82 0.72 0.77 25 Hugo Chavez 0.90 0.60 0.72 15 Tony Blair 0.90 0.78 0.84 36 avg / total 0.85 0.85 0.85 322n_components=150 precision recall f1-score support Ariel Sharon 0.56 0.69 0.62 13 Colin Powell 0.74 0.87 0.80 60 Donald Rumsfeld 0.76 0.81 0.79 27 George W Bush 0.93 0.87 0.90 146Gerhard Schroeder 0.76 0.76 0.76 25 Hugo Chavez 0.73 0.53 0.62 15 Tony Blair 0.88 0.83 0.86 36 avg / total 0.84 0.83 0.83 322 n_components=250 precision recall f1-score support Ariel Sharon 0.55 0.85 0.67 13 Colin Powell 0.76 0.90 0.82 60 Donald Rumsfeld 0.81 0.63 0.71 27 George W Bush 0.94 0.89 0.91 146Gerhard Schroeder 0.77 0.80 0.78 25 Hugo Chavez 0.78 0.47 0.58 15 Tony Blair 0.81 0.81 0.81 36 avg / total 0.84 0.83 0.83 322 n_components=500 precision recall f1-score support Ariel Sharon 0.43 0.69 0.53 13 Colin Powell 0.63 0.87 0.73 60 Donald Rumsfeld 0.59 0.63 0.61 27 George W Bush 0.87 0.75 0.80 146Gerhard Schroeder 0.59 0.40 0.48 25 Hugo Chavez 0.62 0.53 0.57 15 Tony Blair 0.71 0.67 0.69 36 avg / total 0.73 0.71 0.71 322n_components=966 precision recall f1-score support Ariel Sharon 0.00 0.00 0.00 13 Colin Powell 0.00 0.00 0.00 60 Donald Rumsfeld 0.00 0.00 0.00 27 George W Bush 0.45 0.99 0.62 146Gerhard Schroeder 0.00 0.00 0.00 25 Hugo Chavez 0.00 0.00 0.00 15 Tony Blair 0.00 0.00 0.00 36 avg / total 0.21 0.45 0.28 322 练习:降维和过拟合在使用很高数量的主成分时,是否看到任何过拟合现象?在这种情况下,PCA 降维是否能够改善效果?使用高数量的主成分时,准确率会降低,可能和成分之间相互依赖关系有关,不一定是过拟合,通过PCA降维,能很好的改善效果 ###Code pca.explained_variance_ratio_ ###Output _____no_output_____
src/summary.ipynb
###Markdown Dataset OverviewHow much data do we have? What ages? ###Code import pandas as pd import plotly.io as pio; pio.renderers.default='notebook' import plotly.express as px import pickle df_train = pickle.load(open("../data/selfie_train.pkl", "rb")) df_test = pickle.load(open("../data/selfie_test.pkl", "rb")) df_train.head() import matplotlib.pyplot as plt # the histogram of the data n, bins, patches = plt.hist(df_train.age.to_numpy(), 20, density=False, facecolor='g', alpha=0.75) plt.xlabel('Age') plt.ylabel('Image count') #plt.grid(True) plt.savefig('../selfies_by_age.png', dpi=100) plt.show() print(f"We have {df_train.shape[0] + df_test.shape[0]} images total.") ###Output We have 21070 images total.
code/resnet_40k_v5_adam.ipynb
###Markdown This notebook leverades 40k training data. Labels include all images for rare labels, and random images for most common labels.Key difference here is using ADAM optimizer ###Code %reload_ext autoreload %autoreload 2 %matplotlib inline import json from fastai.imports import * from fastai.transforms import * from fastai.conv_learner import * from fastai.model import * from fastai.dataset import * from fastai.sgdr import * from fastai.plots import * from fastai.metrics import fbeta, f1 torch.cuda.is_available() torch.backends.cudnn.enabled f_model = resnet34 PATH = 'data/iMaterialist/' TRAIN = 'train_40k' LABEL_CSV = f'{PATH}train_40k_labels.csv' sz=224 ###Output _____no_output_____ ###Markdown Define Metrics ###Code def get_data(sz): tfms = tfms_from_model(f_model, sz, aug_tfms=transforms_side_on, max_zoom=1.05) return ImageClassifierData.from_csv(PATH, TRAIN, LABEL_CSV, tfms=tfms, suffix='.jpg', test_name='test') def f1_max(preds, targs): return max([f1(preds, targs, t) for t in np.arange(0.05, 0.5, 0.01)]) def f1_23(preds, targs): return f1(preds, targs, thresh=0.23) data = get_data(sz) learn = ConvLearner.pretrained(f_model, data, precompute=True, metrics=[f1_23], opt_fn=optim.Adam) ###Output _____no_output_____ ###Markdown Step 1First we begin retraining the last layer to map training images to our new labels. Because we do not need to send images through all but the last layer more than once, we turn on precompute to cache the vector embeddings before training the last fully connected logistic regression layer. ###Code learn.lr_find(start_lr=1e-3, end_lr=1e3) learn.sched.plot_lr() plt.show() learn.sched.plot() plt.show() ###Output _____no_output_____ ###Markdown We started to see overfitting after 4 epochs. We will stop this initial training at 4 epochs ###Code lr = 0.01 learn.fit(lr, 3, cycle_len=1, cycle_mult=2) learn.sched.plot_loss() plt.ylim(0.06,0.09) learn.lr_find(start_lr=1e-4, end_lr=1e2) learn.sched.plot_lr() plt.show() learn.sched.plot() plt.show() lr = 0.001 learn.fit(lr, 3, cycle_len=1, cycle_mult=2) learn.sched.plot_loss() lr = 0.01 learn.fit(lr, 4, cycle_len=1, cycle_mult=2) learn.sched.plot_loss() learn.save('resnet34_train_40k_v5_1') ###Output _____no_output_____ ###Markdown Unfreeze ###Code lrs = np.array([lr/100,lr/10,lr]) learn.unfreeze() learn.fit(lrs, 3, cycle_len=1, cycle_mult=2) learn.sched.plot_loss() learn.save('resnet34_train_40k_v5_2') ###Output _____no_output_____ ###Markdown Find best threshold ###Code multi_preds, y = learn.TTA() preds = np.mean(multi_preds, 0) def opt_th(preds, targs): ths = np.arange(0.05, 0.5, 0.01) f1s = [f1(torch.from_numpy(preds), torch.from_numpy(targs), th) for th in ths] idx = np.argmax(f1s) print(f'Best Threshold: {ths[idx]}') print(f'Best Score: {f1s[idx]}') print(f'Index: {idx}') plt.plot(ths, f1s, 'o-') plt.show() opt_th(preds, y) ###Output Best Threshold: 0.23000000000000004 Best Score: 0.5059032176336459 Index: 18 ###Markdown Load Model and score on test data ###Code multi_preds, y = learn.TTA(is_test=True) preds = np.mean(multi_preds, 0) def save_results_to_csv(model, data, preds, threshold): import pdb; pdb.set_trace(); cs = np.array(model.data.classes) label_preds = list(map(lambda x: ' '.join(cs[x]), (preds > threshold))) test_ids = list(map(lambda x: int(x[5:-4]), data.test_ds.fnames)) pred_df = ( pd.DataFrame(list(zip(test_ids, label_preds)), columns=['image_id', 'label_id']) .sort_values('image_id') .reset_index(drop=True) ) with open('data/iMaterialist/test.json') as f: test_urls = pd.DataFrame(json.load(f)['images']) with open('data/iMaterialist/validation.json') as f: j = json.load(f) valid_urls = pd.DataFrame(j['images']) valid_labels = pd.DataFrame(j['annotations']) valid_labels['labelId'] = valid_labels['labelId'].apply(lambda x: ' '.join(x)) valid = valid_urls.merge(valid_labels, how='inner', on='imageId')[['url', 'labelId']] test_labels = test_urls.merge(valid, how='inner', on='url').drop('url', axis=1) pred_df.loc[:9896, 'label_id'] = test_labels['labelId'].values pred_df.to_csv(f'data/iMaterialist/submission_resnet34_v4_train_40k_0.54_t_{str(threshold)}.csv', index=False) save_results_to_csv(learn, data, preds, 0.24) !head -10 ./data/iMaterialist/submission_resnet34_v4_train_40k_0.54_t_0.24.csv !head -10 ./data/iMaterialist/sample_submission.csv ###Output image_id,label_id 1,106 115 126 145 161 176 185 32 2,115 13 14 158 18 184 190 220 227 44 47 56 81 9 92 3,104 112 129 224 31 83 4,113 121 145 150 218 63 77 5,1 126 167 170 196 208 213 216 22 32 48 54 79 6,108 110 132 20 4 50 54 6 68 7,144 187 202 205 215 23 8,171 199 21 38 50 9,113 123 128 159 186 199 228 35 38 47 80 86 95
code/.ipynb_checkpoints/machineLearning-checkpoint.ipynb
###Markdown Fetching and Splitting Cleaned Data into Test and Train Sets ###Code flairs = flairs= ["Scheduled","Politics","Photography","Policy/Economy","AskIndia","Sports", "Non-Political","Science/Technology","Food","Business/Finance","Coronavirus"] data = pd.read_csv('../data/cleansedData300.csv') data["combined"] = data.title.astype("str")+" "+ data.url.astype("str")+" "+ data.author.astype("str")+" "+data.authors.astype("str")+" "+ data.comments.astype("str") y = data.flair X = data.combined X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.1,random_state = 5) ###Output _____no_output_____ ###Markdown Performing Machine Learning Algorithms ###Code accNB = multinomialNaiveBayes(X_train, X_test, y_train, y_test) print("Multi-Nomial Naive Bayes Accuracy: ",accuracy_score(accNB, y_test)) print() print(classification_report(y_test, accNB,target_names=flairs)) accLR = logisticRegression(X_train, X_test, y_train, y_test) print("Logistic Regression Accuracy: ",accuracy_score(accLR, y_test)) print() print(classification_report(y_test, accLR,target_names=flairs)) accRF = randomForest(X_train, X_test, y_train, y_test) print("Random Forest Accuracy: ",accuracy_score(accRF, y_test)) print() print(classification_report(y_test, accRF,target_names=flairs)) accMLP = multiLayerPerceptron(X_train, X_test, y_train, y_test) print("Multi Layer Perceptron Accuracy: ",accuracy_score(accMLP, y_test)) print() print(classification_report(y_test, accMLP,target_names=flairs)) accSVM = mySVM(X_train, X_test, y_train, y_test) print("SVM Accuracy: ",accuracy_score(accSVM, y_test)) print() print(classification_report(y_test, accMLP,target_names=flairs)) ###Output SVM Accuracy: 0.6367041198501873 precision recall f1-score support Scheduled 0.38 0.21 0.27 28 Politics 0.64 0.24 0.35 29 Photography 0.68 0.53 0.60 32 Policy/Economy 0.46 0.68 0.55 19 AskIndia 0.26 0.29 0.27 24 Sports 0.81 0.61 0.69 28 Non-Political 0.26 0.71 0.37 17 Science/Technology 0.61 0.46 0.52 24 Food 1.00 1.00 1.00 27 Business/Finance 0.35 0.52 0.42 25 Coronavirus 0.90 0.64 0.75 14 accuracy 0.52 267 macro avg 0.58 0.54 0.53 267 weighted avg 0.58 0.52 0.53 267 ###Markdown Saving Naive Bayes Model since it was the best of all ###Code nb = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) tempNB = nb.fit(X_train, y_train) pickle.dump(tempNB,open("../data/finalModel.sav","wb")) ###Output _____no_output_____
02b-tools-pandas/pandas-intro.ipynb
###Markdown ![data-x](https://raw.githubusercontent.com/afo/data-x-plaksha/master/imgsource/dx_logo.png)--- Pandas Introduction with Stock Data and Correlation Examples**Author list:** Ikhlaq Sidhu & Alexander Fred Ojala**References / Sources:** Includes examples from Wes McKinney and the 10min intro to Pandas**License Agreement:** Feel free to do whatever you want with this code___ What Does Pandas Do? What is a Pandas Table Object? ###Code # ## This table is a dictionary of sequences (like np arrays) # <img src="https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/pandas-p3.jpg"> ###Output _____no_output_____ ###Markdown Topics:1. Dataframe creation2. Reading data in dataFrames3. Data Manipulation Import package ###Code # pandas import pandas as pd # Extra packages import numpy as np import matplotlib.pyplot as plt # for plotting # jupyter notebook magic to display plots in output %matplotlib inline plt.rcParams['figure.figsize'] = (10,6) # make the plots bigger ###Output _____no_output_____ ###Markdown Part:1 Creation Pandas dataframes**Key Points:** Main data types in Pandas:* Series (similar to numpy arrays, but with index)* DataFrames (table or spreadsheet with Series in the columns) We use `pd.DataFrame( )` and can insert almost any data type as an argument**Function:** `pd.DataFrame(data=None, index=None, columns=None, dtype=None, copy=False)`Input data ca be a numpy ndarray (structured or homogeneous), dictionary, or DataFrame. 1.1 Create Dataframe using an array ###Code # Try it with an array import numpy as np np.random.seed(0) # set seed for reproducibility a1 = np.random.randn(3) a2 = np.random.randn(3) a3 = np.random.randn(3) print (a1) print (a2) print (a3) # Create our first DataFrame w/ an np.array - it becomes a column df0 = pd.DataFrame(a1) df0 print(df0) # difference when you print and output of the last row # Check type type(df0) # DataFrame from list of np.arrays df0 = pd.DataFrame([a1, a2, a3]) df0 # notice that there is no column label, only integer values, # and the index is set automatically # We can set column and index names df0 = pd.DataFrame([a1, a2, a3],columns=['a1','a2','a3'],index=['a','b','c']) df0 # add more columns to dataframe, like a dictionary, dimensions must match df0['col4']=a2 df0 # DataFrame from 2D np.array np.random.seed(0) array_2d = np.array(np.random.randn(9)).reshape(3,3) array_2d df0 = pd.DataFrame(array_2d,columns=['rand_normal_1','Random Again','Third'] \ , index=[100,200,99]) df0 ###Output _____no_output_____ ###Markdown 1.2 Create Dataframe using an dictionary ###Code # DataFrame from a Dictionary dict1 = {'a1':a1, 'a2':a2,'a3':a3} dict1 df1 = pd.DataFrame(dict1,index=[1,2,3]) # note that we now have columns without assignment df1 # We can add a list with strings and ints as a column df1['L'] = ["List", 3, "words"] df1 ###Output _____no_output_____ ###Markdown Pandas Series objectEvery column is a Series. Like an np.array, but we can combine data types and it has its own index ###Code type(df1['L']) df1['L'] # dtype object # different datatypes in a column print(type(df1['L'][1]), type(df1['L'][2])) # Note: Every column in a DataFrame is a Series print(df1['L']) print() print(type(df1['L'])) # Create a Series from a Python list s = pd.Series([1,5,3]) # automatic index, 0,1,2... s s2 = pd.Series([2, 3, 4], index = ['a','b','c']) #specific index s2 s2['a'] # We can add the Series s to the DataFrame above as column Series # Remember to match indices df1['Series'] = s df1 # We can also rename columns df1 = df1.rename(columns = {'L':'RenamedL'}) df1 # We can delete columns del df1['RenamedL'] df1 # or drop columns, see axis = 1 # does not change df1 if we don't set inplace=True df1.drop('a2',axis=1) # returns a copy df1 # or drop rows df1.drop(1,axis=0) ###Output _____no_output_____ ###Markdown 1.3 Indexing / Slicing a Pandas Datframe ###Code # Example: view only one column df1['a1'] # Or view several column df1[['a1','a3']] # slice of the DataFrame returned # this slices the first three rows first followed by first 2 rows of the sliced frame (df1[0:3][0:2]) # Lets print the five first 2 elements of column a1 # This is a new Series (like a new table) df1['a1'][0:2] # Lets print the 2 column, and top 2 values- note the list of columns df1[['a1','a3']][0:2] ###Output _____no_output_____ ###Markdown Instead of double indexing, we can use loc, iloc loc gets rows (or columns) with particular labels from the index. iloc gets rows (or columns) at particular positions in the index (so it only takes integers). .iloc() ###Code df1.iloc[0,0] df1.iloc[0:2,0:2] # 2nd to 4th row, 4th to 5th column # iloc will also accept 2 'lists' of position numbers df1.iloc[[0,2],[0,2]] 1# Data only from row with index value '1' print (df1.iloc[1]) print() print (df1.iloc[1,:]) ###Output _____no_output_____ ###Markdown .loc() ###Code # Usually we want to grab values by column names # Note: You have to know indices and columns df1.loc[0:2,['a3','a2']] # Boolean indexing # return full rows where a2>0 df1[df1['a2']>0] # df1['a2']>0 - checks condition ans returns boolean and gives # return column a3 values where a2 >0 df1['a3'][df1['a2']>0] # If you want the values in an np array npg = df1.loc[:,"a2"].values #otherwise it returns a indexed series print(type(npg)) print() npg ###Output _____no_output_____ ###Markdown More Basic Statistics ###Code df1.describe() df1.describe().loc[['mean','std'],['a2','a3']] # We can change the index sorting df1.sort_index(axis=0, ascending=False).head() # starts a year ago ###Output _____no_output_____ ###Markdown For more functionalities check this notebookhttps://github.com/ikhlaqsidhu/data-x/blob/master/02b-tools-pandas_intro-mplib_afo/legacy/10-minutes-to-pandas-w-data-x.ipynb Part 2: Reading data in pandas Dataframe Now, lets get some data in CSV format. Description:Aggregate data on applicants to graduate school at Berkeley for the six largest departments in 1973 classified by admission and sex.https://vincentarelbundock.github.io/Rdatasets/doc/datasets/UCBAdmissions.html ###Code df = pd.read_csv('data/ucbadmissions.csv') # check statistics df.columns df.head() df.tail(2) df.groupby(['Admit','Gender']).sum() df.describe() df.info() pd.unique(df['Dept']) # Total number of applicants to Dept A df[df['Dept']=='A']['Freq'].sum() df.groupby('Dept').sum() df.groupby('Dept').sum().plot.bar(grid=True) ###Output _____no_output_____ ###Markdown Install Pandas datareader to access APIs with Stock dataRead about data sources here (note, not all works anymore): https://pandas-datareader.readthedocs.io/en/latest/remote_data.html ###Code # Uncomment line below to install # !pip install pandas_datareader pd.core.common.is_list_like = pd.api.types.is_list_like from pandas_datareader import data as web from datetime import datetime as dt df_google = web.DataReader('GOOGL', data_source='iex', start=dt(2018, 1, 1), end=dt.now()).reset_index() df_apple = web.DataReader('AAPL', data_source='iex', start=dt(2018, 1, 1), end=dt.now()).reset_index() df_google.head() # Volume is the number of shares or contracts traded # check dtypes in each column df_google.dtypes ###Output _____no_output_____ ###Markdown Breakout: Check the file attributes & general statitics using Pandas ###Code # shape # show first five values # show last three # return column names # get statistics- mean and std of "open" column ###Output _____no_output_____ ###Markdown Convert the Date string to pandas datetime object ###Code df_google['date'][0] type(df_google['date'][0]) # convert string 'date' to datetime format df_google['date'] = pd.to_datetime(df_google['date'],infer_datetime_format=True) df_google.head() # set index df_google = df_google.set_index('date') # Then we can query date indices with strings # Only January df_google['2018-01'] df_google['2018-01-03':'2018-01-09'] df_google.loc['2018-02-28':'2018-04-21','open':'low'].head() #### Opening price statistics open_price = df_google['open'].map(lambda x: int(np.floor(x/100)*100)) open_price.value_counts() open_price.hist() open_price.value_counts().sort_values().plot(kind='bar') ###Output _____no_output_____ ###Markdown Masks and Boolean Indexing ###Code # Check mask 1 df_google['open']>1200 # Use mask 1 df_google['open'][df_google['open']>1200] # shows only rows with opening price greater than 1200 # Show only the fisrt 10 rows where df_google['open'][df_google['open']>1200][:10] # Show rows where opening stock is >1200 before August 1st 2018 df_google[(df_google['open']>1200) & (df_google.index < dt(2018,8,1))] # we can also drop all NaN values df_google[df_google>1220] df_google[df_google>1220].dropna(axis=0).head(10) #play with axis # another way to filter is with isin() df_google[df_google['open'].isin([1170.62,1184.98])] ###Output _____no_output_____ ###Markdown Manipulating Values ###Code # Recall df_google.head(4) # All the ways to view (by location, by index, iat, etc) # can also be used to set values # good for data normalization df_google['volume'] = df_google['volume']/1000.0 df_google.head(4) # Change specific entry df_google.iloc[0,1] = 2 df_google.head(3) ###Output _____no_output_____ ###Markdown More Statistics and Operations ###Code # mean by column, also try var() for variance df_google.mean() # Use the apply method to perform calculations on every elementi df_google[0:10].apply(np.sqrt) df_google['month']=df_google.index.month_name() df_google.groupby('month')['open'].count() df_google[0:5].mean(axis = 1) # row means of first five rows ###Output _____no_output_____ ###Markdown PlotCorrelation Load several stocks ###Code # Reload dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google stock data dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfm = pd.read_csv('data/microsoft.csv').drop('Unnamed: 0',axis=1) # Google stock data dfn = pd.read_csv('data/nike.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb = pd.read_csv('data/boeing.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb.head() # Rename columns dfg = dfg.rename(columns = {'Close':'GOOG'}) #print (dfg.head()) dfa = dfa.rename(columns = {'Close':'AAPL'}) #print (dfa.head()) dfm = dfm.rename(columns = {'Close':'MSFT'}) #print (dfm.head()) dfn = dfn.rename(columns = {'Close':'NKE'}) #print (dfn.head()) dfb = dfb.rename(columns = {'Close':'BA'}) dfb.head(2) # Lets merge some tables # They will all merge on the common column Date df = dfg[['Date','GOOG']].merge(dfa[['Date','AAPL']]) df = df.merge(dfm[['Date','MSFT']]) df = df.merge(dfn[['Date','NKE']]) df = df.merge(dfb[['Date','BA']]) df.head() df['Date'] = pd.to_datetime(df['Date']) df = df.set_index('Date') df.head() df.plot() df['2017'][['NKE','BA']].plot() # show a correlation matrix (pearson) crl = df.corr() crl crl.sort_values(by='GOOG',ascending=False) s = crl.unstack() so = s.sort_values(ascending=False) so[so<1] # zero mean to plot correlation df.mean() sim=df-df.mean() sim.tail() sim[['MSFT','BA']].plot() ###Output _____no_output_____
TerP450/TerP450-TLP Version.ipynb
###Markdown Create Matrix for Features ###Code import itertools import Bio import os import numpy as np import pandas as pd from Bio import SeqIO from Bio.Seq import Seq from itertools import combinations name_450Tryptorubinlikes="Fasta/sirinsp450.fasta" name_450nonTryptorubinlikes="Fasta/all p450.fasta" foldernameoutput="Output/Tryptorubinlike_peptide" #set to legth of sequence motive you want to test length=4 #creates a list of all sequence motifs to look for combinations=list(itertools.combinations_with_replacement(["a","b","p","r","s","t","n"], length)) permutations=list() for a in combinations: permutations=permutations+ list(itertools.permutations(a,length)) permutations=([ "".join(a) for a in list(dict.fromkeys(permutations))]) # remove all combinations not in sequences def easysequence (sequence): #creates a string out of the sequence file, that only states if AA is acidic (a), basic (b), polar (p), neutral/unpolar (n),aromatic (r),Cystein (s) or a Prolin (t) seqstr=str(sequence) seqlist=list(seqstr) a=0 easylist=[] for i in seqlist: if i == 'E' or i== 'D': easylist=easylist+['a'] if i == 'K' or i=='R' or i=='H': easylist=easylist+['b'] if i == 'S' or i=='T' or i=='N' or i=='Q': easylist=easylist+['p'] if i == 'F' or i=='Y' or i=='W': easylist=easylist+['r'] if i == 'C': easylist=easylist+['s'] if i == 'P': easylist=easylist+['t'] if i == 'G' or i=='A' or i=='V' or i=='L' or i=='I' or i=='M': easylist=easylist+['n'] seperator='' easysequence=seperator.join(easylist) return easysequence for p in permutations: counter=0 counter2=0 for seq_record in SeqIO.parse(name_450Tryptorubinlikes, "fasta"): easyseq=easysequence(seq_record.seq) if easyseq.count(p)>0: counter=counter+1 counter2=counter2+1 if counter<(round(counter2)/2): permutations.remove(p) print ("Number of remaining features:",len(permutations)) #final "permutations" should be a list of all sequence motifs to look for try: os.makedirs(foldernameoutput) except OSError: print ("Creation of the directory %s failed" % foldernameoutput) else: print ("Successfully created the directory %s" % foldernameoutput) filename_permutations=foldernameoutput+"/permutations.txt" with open(filename_permutations, 'w') as f: for s in permutations: f.write(str(s) + '\n') ###Output _____no_output_____ ###Markdown Create Feature Matrix ###Code import Bio import pandas as pd from Bio import SeqIO from Bio import pairwise2 from Bio.Seq import Seq import re # fill in names of files here! foldernameoutput="Output/Tryptorubinlike_peptide" name_450tryptorubinlike_peptides="Fasta/sirinsp450.fasta" filename_permutations=foldernameoutput+"/permutations.txt" pathcompletetable=foldernameoutput+"completetable.csv" p450nontryptorubinlike_peptides=pd.read_csv(pathcompletetable) # S#14703] cytochrome P450 51 Cyp51 ([P#10800] [CYP51] cytochrome P450, family 51 ) from https://cyped.biocatnet.de/sequence/14703 alignmentfa=("MSAVALPRVSGGHDEHGHLEEFRTDPIGLMQRVRDECGDVGTFQLAGKQVVLLSGSHANEFFFRAGDDDLDQAKAYPFMTPIFGEGVVFDASPERRKEMLHNAALRGEQMKGHAATIEDQVRRMIADWGEAGEIDLLDFFAELTIYTSSACLIGKKFRDQLDGRFAKLYHELERGTDPLAYVDPYLPIESLRRRDEARNGLVALVADIMNGRIANPPTDKSDRDMLDVLIAVKAETGTPRFSADEITGMFISMMFAGHHTSSGTASWTLIELMRHRDAYAAVIDELDELYGDGRSVSFHLRQIPQLENVLKETLRLHPPLIILMRVAKGEFEVQGHRIHEGDLVAASPAISNRIPEDFPDPHDFVPARYEQPRQEDLLNRWTWIPFGAGRHRCVGAAFAIMQIKAIFSVLLREYEFEMAQPPESYRNDHSKMVVQLAQPACVRYRRRTGV") fragments=("begin","sbr1","sbr2","core","end") with open(filename_permutations, 'r') as f: permutations = [line.rstrip('\n') for line in f] def find_between( s, first, last ): try: start = s.index( first ) + len( first ) end = s.index( last, start ) return s[start:end] except ValueError: return "" def easysequence (sequence): #creates a string out of the sequence file, that only states if AA is acidic (a), basic (b), polar (p), neutral/unpolar (n),aromatic (r),Cystein (s) or a Prolin (t) seqstr=str(sequence) seqlist=list(seqstr) a=0 easylist=[] for i in seqlist: if i == 'E' or i== 'D': easylist=easylist+['a'] if i == 'K' or i=='R' or i=='H': easylist=easylist+['b'] if i == 'S' or i=='T' or i=='N' or i=='Q': easylist=easylist+['p'] if i == 'F' or i=='Y' or i=='W': easylist=easylist+['r'] if i == 'C': easylist=easylist+['s'] if i == 'P': easylist=easylist+['t'] if i == 'G' or i=='A' or i=='V' or i=='L' or i=='I' or i=='M': easylist=easylist+['n'] seperator='' easysequence=seperator.join(easylist) return easysequence def merge_two_dicts(x, y): z = x.copy() # start with x's keys and values z.update(y) # modifies z with y's keys and values & returns None return z # creates tables p450tryptorubinlike_peptides=pd.DataFrame(columns=['accessionnumber', 'sequence','easysequence ']) for f in fragments: for i in permutations: n=i+f p450tryptorubinlike_peptides[n]=[] print ("!") #getting Data for p450 associated with tryptorubinlike_peptides for seq_record in SeqIO.parse(name_450tryptorubinlike_peptides, "fasta"): # Very expensive to open a gap in seq1: nogaps = lambda x, y: -2000 - y specificgaps = lambda x, y: (-2 - y) alignments = pairwise2.align.globalmc(alignmentfa, seq_record.seq, 1, -1, nogaps, specificgaps) seqa=find_between(str(alignments[0]),"seqA='","'").replace("-","") seqb=find_between(str(alignments[0]),"seqB='","'") #begin for match in re.finditer("MSAVALPRVSGGHDEHGHLEEFRTDPIGLMQRVRDECGDVGTFQLAGKQVVLLSGSHANEFFFRAGDDDLDQAKAYPFMTPIFGEGVVFDAS",seqa): end=match.span()[1] begb=easysequence(seqb[:end+1].replace("-","")) #sbr1 for match in re.finditer("PERRKEMLHNAALRGEQMKGHAATIEDQVRRMIADWGEAGEIDLLDFFAELTIYTSSACLIGKKFRDQLDGRFAKLYHELERGTDPLAYVDPYLPIESLRRRDEARNGLVALVADIMNGRIANPPTDKSDRDMLDVLIAVKAETGTPRFSADEITGMFISMMFAGHHTSSGTASWTLIELMRH",seqa): begin= match.span()[0] end=match.span()[1] sbr1b=easysequence(seqb[begin-1:end+1].replace("-","")) #sbr2 for match in re.finditer("RRDEARNGLVALVADIMNGRIANPPTDKSDRDMLDVLIAVKAETGTPRFSADEITGMFISMMFAGHHTSSGTASWTLIELMRH",seqa): begin= match.span()[0] end=match.span()[1] sbr2b=easysequence(seqb[begin-1:end+1].replace("-","")) #core for match in re.finditer("RDAYAAVIDELDELYGDGRSVSFHLRQIPQLENVLKETLRLHPPLIILMRVAKGEFEVQGHRIHEGDLVAASPAISNRIPEDFPDPHDFVPARYEQPRQEDLLNRWTWIPFGAGRHRCV",seqa): begin= match.span()[0] end=match.span()[1] coreb=easysequence(seqb[begin-1:end+1].replace("-","")) endb= easysequence(seqb[end+1:].replace("-","")) listfragments=(begb,sbr1b,sbr2b,coreb,endb) easyseq=easysequence(seq_record.seq) new_row={'accessionnumber':seq_record.id, 'sequence':str(seq_record.seq),'easysequence ':easyseq} c=0 for frag in listfragments: f=fragments[c] c=c+1 for i in permutations: n=i+f new_row =merge_two_dicts(new_row,{n:frag.count(i)}) p450tryptorubinlike_peptides = p450tryptorubinlike_peptides.append(new_row, ignore_index=True) #getting Data for p450 not associated with tryptorubinlike_peptides def easysequence (sequence): #creates a string out of the sequence file, that only states if AA is acidic (a), basic (b), polar (p), neutral/unpolar (n),aromatic (r),Cystein (s) or a Prolin (t) seqstr=str(sequence) seqlist=list(seqstr) a=0 easylist=[] for i in seqlist: if i == 'E' or i== 'D': easylist=easylist+['a'] if i == 'K' or i=='R' or i=='H': easylist=easylist+['b'] if i == 'S' or i=='T' or i=='N' or i=='Q': easylist=easylist+['p'] if i == 'F' or i=='Y' or i=='W': easylist=easylist+['r'] if i == 'C': easylist=easylist+['s'] if i == 'P': easylist=easylist+['t'] if i == 'G' or i=='A' or i=='V' or i=='L' or i=='I' or i=='M': easylist=easylist+['n'] seperator='' easysequence=seperator.join(easylist) return easysequence p450nontryptorubinlike_peptides.drop(columns=['target'],inplace=True) p450tryptorubinlike_peptides.drop(columns=['accessionnumber','sequence','easysequence '],inplace=True) #adds target p450tryptorubinlike_peptides['target']=1 p450nontryptorubinlike_peptides['target']=0 #join tables tables=[p450tryptorubinlike_peptides,p450nontryptorubinlike_peptides] completetable = pd.concat(tables,ignore_index=True) #modify table (drop accessionnumber, sequences) pathcompletetable=foldernameoutput+"/completetablecomplete.csv" completetable.to_csv(pathcompletetable, index=False) print ("3") ###Output ! 2 aaabbegin abaabegin aaapbegin apaabegin aaarbegin araabegin \ 0 0.0 0.0 0.0 0.0 0.0 0.0 1 0.0 0.0 0.0 0.0 0.0 0.0 2 0.0 0.0 0.0 0.0 0.0 0.0 3 0.0 0.0 0.0 0.0 0.0 0.0 4 0.0 0.0 0.0 0.0 0.0 0.0 ... ... ... ... ... ... ... 14706 0.0 0.0 0.0 0.0 0.0 0.0 14707 0.0 0.0 0.0 0.0 0.0 0.0 14708 0.0 0.0 0.0 0.0 0.0 0.0 14709 0.0 0.0 0.0 0.0 0.0 0.0 14710 0.0 0.0 0.0 0.0 0.0 0.0 aaasbegin asaabegin aaatbegin ataabegin ... tntnend nttnend \ 0 0.0 0.0 0.0 0.0 ... 0.0 0.0 1 0.0 0.0 0.0 0.0 ... 0.0 0.0 2 0.0 0.0 0.0 0.0 ... 0.0 0.0 3 0.0 0.0 0.0 0.0 ... 0.0 0.0 4 0.0 0.0 0.0 0.0 ... 0.0 0.0 ... ... ... ... ... ... ... ... 14706 0.0 0.0 0.0 0.0 ... 0.0 0.0 14707 0.0 0.0 0.0 0.0 ... 0.0 0.0 14708 0.0 0.0 0.0 0.0 ... 0.0 0.0 14709 0.0 0.0 0.0 0.0 ... 0.0 0.0 14710 0.0 0.0 0.0 0.0 ... 0.0 0.0 ntntend tnnnend ntnnend nntnend nnntend nnnnend target \ 0 0.0 0.0 1.0 1.0 1.0 2.0 1 1 0.0 0.0 1.0 1.0 0.0 1.0 1 2 0.0 0.0 1.0 1.0 1.0 2.0 1 3 0.0 0.0 1.0 1.0 1.0 2.0 1 4 0.0 0.0 1.0 1.0 1.0 2.0 1 ... ... ... ... ... ... ... ... 14706 0.0 0.0 0.0 0.0 0.0 1.0 0 14707 0.0 0.0 0.0 0.0 0.0 1.0 0 14708 0.0 0.0 0.0 0.0 0.0 0.0 0 14709 0.0 0.0 0.0 0.0 0.0 2.0 0 14710 0.0 0.0 0.0 0.0 0.0 0.0 0 prediction 0 NaN 1 NaN 2 NaN 3 NaN 4 NaN ... ... 14706 0.0 14707 0.0 14708 1.0 14709 1.0 14710 1.0 [14711 rows x 6377 columns] 3 ###Markdown Use feature importance to create new feature matrix Only use this cell, if you already ran the programm and calculated feature importances ###Code import itertools import Bio import os import re import numpy as np import pandas as pd from Bio import SeqIO from Bio.Seq import Seq from itertools import combinations #import file with features foldernameoutput="Output/Tryptorubinlike_peptide" filenamefeatureimportances=foldernameoutput+"/importances.txt" filename_permutations=foldernameoutput+"/permutationsrefined.txt" howmanyfeatures=30 howmany_aa_add=0 def findallsequencemotifs(contents): listmotifs=re.findall("[rnbatsp]\w+", contents) return listmotifs #read file f=open(filenamefeatureimportances, "r") if f.mode == 'r': contents =f.read() listmotifs= findallsequencemotifs(contents) #only take first n of motifs (howmanyfeatures) listmotifsshort=[] for i in range (0,howmanyfeatures): listmotifsshort=listmotifsshort+[listmotifs[i]] #modify features #add letter def find_all_permutations_for_additional_letters(aa): combinations=list(itertools.combinations_with_replacement(["a","b","p","r","s","t","n"], aa)) permutations=list() for a in combinations: permutations=permutations+ list(itertools.permutations(a,aa)) permutations=([ "".join(a) for a in list(dict.fromkeys(permutations))]) return permutations addet_letters=find_all_permutations_for_additional_letters(howmany_aa_add) #add letters to all motifs newlist=[] for i in listmotifsshort: for a in addet_letters: newlist=newlist+[i+a] with open(filename_permutations, 'w') as f: for s in newlist: f.write(str(s) + '\n') ###Output ['snnn', 'ptrn', 'nntn', 'trnn', 'nbar', 'ntrn', 'nbnr', 'tnnn', 'prnn', 'npbb', 'tabr', 'ntnn', 'annp', 'rata', 'rsnn', 'abrn', 'ttab', 'trnp', 'btnn', 'pnpb'] ['aa', 'ab', 'ba', 'ap', 'pa', 'ar', 'ra', 'as', 'sa', 'at', 'ta', 'an', 'na', 'bb', 'bp', 'pb', 'br', 'rb', 'bs', 'sb', 'bt', 'tb', 'bn', 'nb', 'pp', 'pr', 'rp', 'ps', 'sp', 'pt', 'tp', 'pn', 'np', 'rr', 'rs', 'sr', 'rt', 'tr', 'rn', 'nr', 'ss', 'st', 'ts', 'sn', 'ns', 'tt', 'tn', 'nt', 'nn'] ['snnnaa', 'snnnab', 'snnnba', 'snnnap', 'snnnpa', 'snnnar', 'snnnra', 'snnnas', 'snnnsa', 'snnnat', 'snnnta', 'snnnan', 'snnnna', 'snnnbb', 'snnnbp', 'snnnpb', 'snnnbr', 'snnnrb', 'snnnbs', 'snnnsb', 'snnnbt', 'snnntb', 'snnnbn', 'snnnnb', 'snnnpp', 'snnnpr', 'snnnrp', 'snnnps', 'snnnsp', 'snnnpt', 'snnntp', 'snnnpn', 'snnnnp', 'snnnrr', 'snnnrs', 'snnnsr', 'snnnrt', 'snnntr', 'snnnrn', 'snnnnr', 'snnnss', 'snnnst', 'snnnts', 'snnnsn', 'snnnns', 'snnntt', 'snnntn', 'snnnnt', 'snnnnn', 'ptrnaa', 'ptrnab', 'ptrnba', 'ptrnap', 'ptrnpa', 'ptrnar', 'ptrnra', 'ptrnas', 'ptrnsa', 'ptrnat', 'ptrnta', 'ptrnan', 'ptrnna', 'ptrnbb', 'ptrnbp', 'ptrnpb', 'ptrnbr', 'ptrnrb', 'ptrnbs', 'ptrnsb', 'ptrnbt', 'ptrntb', 'ptrnbn', 'ptrnnb', 'ptrnpp', 'ptrnpr', 'ptrnrp', 'ptrnps', 'ptrnsp', 'ptrnpt', 'ptrntp', 'ptrnpn', 'ptrnnp', 'ptrnrr', 'ptrnrs', 'ptrnsr', 'ptrnrt', 'ptrntr', 'ptrnrn', 'ptrnnr', 'ptrnss', 'ptrnst', 'ptrnts', 'ptrnsn', 'ptrnns', 'ptrntt', 'ptrntn', 'ptrnnt', 'ptrnnn', 'nntnaa', 'nntnab', 'nntnba', 'nntnap', 'nntnpa', 'nntnar', 'nntnra', 'nntnas', 'nntnsa', 'nntnat', 'nntnta', 'nntnan', 'nntnna', 'nntnbb', 'nntnbp', 'nntnpb', 'nntnbr', 'nntnrb', 'nntnbs', 'nntnsb', 'nntnbt', 'nntntb', 'nntnbn', 'nntnnb', 'nntnpp', 'nntnpr', 'nntnrp', 'nntnps', 'nntnsp', 'nntnpt', 'nntntp', 'nntnpn', 'nntnnp', 'nntnrr', 'nntnrs', 'nntnsr', 'nntnrt', 'nntntr', 'nntnrn', 'nntnnr', 'nntnss', 'nntnst', 'nntnts', 'nntnsn', 'nntnns', 'nntntt', 'nntntn', 'nntnnt', 'nntnnn', 'trnnaa', 'trnnab', 'trnnba', 'trnnap', 'trnnpa', 'trnnar', 'trnnra', 'trnnas', 'trnnsa', 'trnnat', 'trnnta', 'trnnan', 'trnnna', 'trnnbb', 'trnnbp', 'trnnpb', 'trnnbr', 'trnnrb', 'trnnbs', 'trnnsb', 'trnnbt', 'trnntb', 'trnnbn', 'trnnnb', 'trnnpp', 'trnnpr', 'trnnrp', 'trnnps', 'trnnsp', 'trnnpt', 'trnntp', 'trnnpn', 'trnnnp', 'trnnrr', 'trnnrs', 'trnnsr', 'trnnrt', 'trnntr', 'trnnrn', 'trnnnr', 'trnnss', 'trnnst', 'trnnts', 'trnnsn', 'trnnns', 'trnntt', 'trnntn', 'trnnnt', 'trnnnn', 'nbaraa', 'nbarab', 'nbarba', 'nbarap', 'nbarpa', 'nbarar', 'nbarra', 'nbaras', 'nbarsa', 'nbarat', 'nbarta', 'nbaran', 'nbarna', 'nbarbb', 'nbarbp', 'nbarpb', 'nbarbr', 'nbarrb', 'nbarbs', 'nbarsb', 'nbarbt', 'nbartb', 'nbarbn', 'nbarnb', 'nbarpp', 'nbarpr', 'nbarrp', 'nbarps', 'nbarsp', 'nbarpt', 'nbartp', 'nbarpn', 'nbarnp', 'nbarrr', 'nbarrs', 'nbarsr', 'nbarrt', 'nbartr', 'nbarrn', 'nbarnr', 'nbarss', 'nbarst', 'nbarts', 'nbarsn', 'nbarns', 'nbartt', 'nbartn', 'nbarnt', 'nbarnn', 'ntrnaa', 'ntrnab', 'ntrnba', 'ntrnap', 'ntrnpa', 'ntrnar', 'ntrnra', 'ntrnas', 'ntrnsa', 'ntrnat', 'ntrnta', 'ntrnan', 'ntrnna', 'ntrnbb', 'ntrnbp', 'ntrnpb', 'ntrnbr', 'ntrnrb', 'ntrnbs', 'ntrnsb', 'ntrnbt', 'ntrntb', 'ntrnbn', 'ntrnnb', 'ntrnpp', 'ntrnpr', 'ntrnrp', 'ntrnps', 'ntrnsp', 'ntrnpt', 'ntrntp', 'ntrnpn', 'ntrnnp', 'ntrnrr', 'ntrnrs', 'ntrnsr', 'ntrnrt', 'ntrntr', 'ntrnrn', 'ntrnnr', 'ntrnss', 'ntrnst', 'ntrnts', 'ntrnsn', 'ntrnns', 'ntrntt', 'ntrntn', 'ntrnnt', 'ntrnnn', 'nbnraa', 'nbnrab', 'nbnrba', 'nbnrap', 'nbnrpa', 'nbnrar', 'nbnrra', 'nbnras', 'nbnrsa', 'nbnrat', 'nbnrta', 'nbnran', 'nbnrna', 'nbnrbb', 'nbnrbp', 'nbnrpb', 'nbnrbr', 'nbnrrb', 'nbnrbs', 'nbnrsb', 'nbnrbt', 'nbnrtb', 'nbnrbn', 'nbnrnb', 'nbnrpp', 'nbnrpr', 'nbnrrp', 'nbnrps', 'nbnrsp', 'nbnrpt', 'nbnrtp', 'nbnrpn', 'nbnrnp', 'nbnrrr', 'nbnrrs', 'nbnrsr', 'nbnrrt', 'nbnrtr', 'nbnrrn', 'nbnrnr', 'nbnrss', 'nbnrst', 'nbnrts', 'nbnrsn', 'nbnrns', 'nbnrtt', 'nbnrtn', 'nbnrnt', 'nbnrnn', 'tnnnaa', 'tnnnab', 'tnnnba', 'tnnnap', 'tnnnpa', 'tnnnar', 'tnnnra', 'tnnnas', 'tnnnsa', 'tnnnat', 'tnnnta', 'tnnnan', 'tnnnna', 'tnnnbb', 'tnnnbp', 'tnnnpb', 'tnnnbr', 'tnnnrb', 'tnnnbs', 'tnnnsb', 'tnnnbt', 'tnnntb', 'tnnnbn', 'tnnnnb', 'tnnnpp', 'tnnnpr', 'tnnnrp', 'tnnnps', 'tnnnsp', 'tnnnpt', 'tnnntp', 'tnnnpn', 'tnnnnp', 'tnnnrr', 'tnnnrs', 'tnnnsr', 'tnnnrt', 'tnnntr', 'tnnnrn', 'tnnnnr', 'tnnnss', 'tnnnst', 'tnnnts', 'tnnnsn', 'tnnnns', 'tnnntt', 'tnnntn', 'tnnnnt', 'tnnnnn', 'prnnaa', 'prnnab', 'prnnba', 'prnnap', 'prnnpa', 'prnnar', 'prnnra', 'prnnas', 'prnnsa', 'prnnat', 'prnnta', 'prnnan', 'prnnna', 'prnnbb', 'prnnbp', 'prnnpb', 'prnnbr', 'prnnrb', 'prnnbs', 'prnnsb', 'prnnbt', 'prnntb', 'prnnbn', 'prnnnb', 'prnnpp', 'prnnpr', 'prnnrp', 'prnnps', 'prnnsp', 'prnnpt', 'prnntp', 'prnnpn', 'prnnnp', 'prnnrr', 'prnnrs', 'prnnsr', 'prnnrt', 'prnntr', 'prnnrn', 'prnnnr', 'prnnss', 'prnnst', 'prnnts', 'prnnsn', 'prnnns', 'prnntt', 'prnntn', 'prnnnt', 'prnnnn', 'npbbaa', 'npbbab', 'npbbba', 'npbbap', 'npbbpa', 'npbbar', 'npbbra', 'npbbas', 'npbbsa', 'npbbat', 'npbbta', 'npbban', 'npbbna', 'npbbbb', 'npbbbp', 'npbbpb', 'npbbbr', 'npbbrb', 'npbbbs', 'npbbsb', 'npbbbt', 'npbbtb', 'npbbbn', 'npbbnb', 'npbbpp', 'npbbpr', 'npbbrp', 'npbbps', 'npbbsp', 'npbbpt', 'npbbtp', 'npbbpn', 'npbbnp', 'npbbrr', 'npbbrs', 'npbbsr', 'npbbrt', 'npbbtr', 'npbbrn', 'npbbnr', 'npbbss', 'npbbst', 'npbbts', 'npbbsn', 'npbbns', 'npbbtt', 'npbbtn', 'npbbnt', 'npbbnn', 'tabraa', 'tabrab', 'tabrba', 'tabrap', 'tabrpa', 'tabrar', 'tabrra', 'tabras', 'tabrsa', 'tabrat', 'tabrta', 'tabran', 'tabrna', 'tabrbb', 'tabrbp', 'tabrpb', 'tabrbr', 'tabrrb', 'tabrbs', 'tabrsb', 'tabrbt', 'tabrtb', 'tabrbn', 'tabrnb', 'tabrpp', 'tabrpr', 'tabrrp', 'tabrps', 'tabrsp', 'tabrpt', 'tabrtp', 'tabrpn', 'tabrnp', 'tabrrr', 'tabrrs', 'tabrsr', 'tabrrt', 'tabrtr', 'tabrrn', 'tabrnr', 'tabrss', 'tabrst', 'tabrts', 'tabrsn', 'tabrns', 'tabrtt', 'tabrtn', 'tabrnt', 'tabrnn', 'ntnnaa', 'ntnnab', 'ntnnba', 'ntnnap', 'ntnnpa', 'ntnnar', 'ntnnra', 'ntnnas', 'ntnnsa', 'ntnnat', 'ntnnta', 'ntnnan', 'ntnnna', 'ntnnbb', 'ntnnbp', 'ntnnpb', 'ntnnbr', 'ntnnrb', 'ntnnbs', 'ntnnsb', 'ntnnbt', 'ntnntb', 'ntnnbn', 'ntnnnb', 'ntnnpp', 'ntnnpr', 'ntnnrp', 'ntnnps', 'ntnnsp', 'ntnnpt', 'ntnntp', 'ntnnpn', 'ntnnnp', 'ntnnrr', 'ntnnrs', 'ntnnsr', 'ntnnrt', 'ntnntr', 'ntnnrn', 'ntnnnr', 'ntnnss', 'ntnnst', 'ntnnts', 'ntnnsn', 'ntnnns', 'ntnntt', 'ntnntn', 'ntnnnt', 'ntnnnn', 'annpaa', 'annpab', 'annpba', 'annpap', 'annppa', 'annpar', 'annpra', 'annpas', 'annpsa', 'annpat', 'annpta', 'annpan', 'annpna', 'annpbb', 'annpbp', 'annppb', 'annpbr', 'annprb', 'annpbs', 'annpsb', 'annpbt', 'annptb', 'annpbn', 'annpnb', 'annppp', 'annppr', 'annprp', 'annpps', 'annpsp', 'annppt', 'annptp', 'annppn', 'annpnp', 'annprr', 'annprs', 'annpsr', 'annprt', 'annptr', 'annprn', 'annpnr', 'annpss', 'annpst', 'annpts', 'annpsn', 'annpns', 'annptt', 'annptn', 'annpnt', 'annpnn', 'rataaa', 'rataab', 'rataba', 'rataap', 'ratapa', 'rataar', 'ratara', 'rataas', 'ratasa', 'rataat', 'ratata', 'rataan', 'ratana', 'ratabb', 'ratabp', 'ratapb', 'ratabr', 'ratarb', 'ratabs', 'ratasb', 'ratabt', 'ratatb', 'ratabn', 'ratanb', 'ratapp', 'ratapr', 'ratarp', 'rataps', 'ratasp', 'ratapt', 'ratatp', 'ratapn', 'ratanp', 'ratarr', 'ratars', 'ratasr', 'ratart', 'ratatr', 'ratarn', 'ratanr', 'ratass', 'ratast', 'ratats', 'ratasn', 'ratans', 'ratatt', 'ratatn', 'ratant', 'ratann', 'rsnnaa', 'rsnnab', 'rsnnba', 'rsnnap', 'rsnnpa', 'rsnnar', 'rsnnra', 'rsnnas', 'rsnnsa', 'rsnnat', 'rsnnta', 'rsnnan', 'rsnnna', 'rsnnbb', 'rsnnbp', 'rsnnpb', 'rsnnbr', 'rsnnrb', 'rsnnbs', 'rsnnsb', 'rsnnbt', 'rsnntb', 'rsnnbn', 'rsnnnb', 'rsnnpp', 'rsnnpr', 'rsnnrp', 'rsnnps', 'rsnnsp', 'rsnnpt', 'rsnntp', 'rsnnpn', 'rsnnnp', 'rsnnrr', 'rsnnrs', 'rsnnsr', 'rsnnrt', 'rsnntr', 'rsnnrn', 'rsnnnr', 'rsnnss', 'rsnnst', 'rsnnts', 'rsnnsn', 'rsnnns', 'rsnntt', 'rsnntn', 'rsnnnt', 'rsnnnn', 'abrnaa', 'abrnab', 'abrnba', 'abrnap', 'abrnpa', 'abrnar', 'abrnra', 'abrnas', 'abrnsa', 'abrnat', 'abrnta', 'abrnan', 'abrnna', 'abrnbb', 'abrnbp', 'abrnpb', 'abrnbr', 'abrnrb', 'abrnbs', 'abrnsb', 'abrnbt', 'abrntb', 'abrnbn', 'abrnnb', 'abrnpp', 'abrnpr', 'abrnrp', 'abrnps', 'abrnsp', 'abrnpt', 'abrntp', 'abrnpn', 'abrnnp', 'abrnrr', 'abrnrs', 'abrnsr', 'abrnrt', 'abrntr', 'abrnrn', 'abrnnr', 'abrnss', 'abrnst', 'abrnts', 'abrnsn', 'abrnns', 'abrntt', 'abrntn', 'abrnnt', 'abrnnn', 'ttabaa', 'ttabab', 'ttabba', 'ttabap', 'ttabpa', 'ttabar', 'ttabra', 'ttabas', 'ttabsa', 'ttabat', 'ttabta', 'ttaban', 'ttabna', 'ttabbb', 'ttabbp', 'ttabpb', 'ttabbr', 'ttabrb', 'ttabbs', 'ttabsb', 'ttabbt', 'ttabtb', 'ttabbn', 'ttabnb', 'ttabpp', 'ttabpr', 'ttabrp', 'ttabps', 'ttabsp', 'ttabpt', 'ttabtp', 'ttabpn', 'ttabnp', 'ttabrr', 'ttabrs', 'ttabsr', 'ttabrt', 'ttabtr', 'ttabrn', 'ttabnr', 'ttabss', 'ttabst', 'ttabts', 'ttabsn', 'ttabns', 'ttabtt', 'ttabtn', 'ttabnt', 'ttabnn', 'trnpaa', 'trnpab', 'trnpba', 'trnpap', 'trnppa', 'trnpar', 'trnpra', 'trnpas', 'trnpsa', 'trnpat', 'trnpta', 'trnpan', 'trnpna', 'trnpbb', 'trnpbp', 'trnppb', 'trnpbr', 'trnprb', 'trnpbs', 'trnpsb', 'trnpbt', 'trnptb', 'trnpbn', 'trnpnb', 'trnppp', 'trnppr', 'trnprp', 'trnpps', 'trnpsp', 'trnppt', 'trnptp', 'trnppn', 'trnpnp', 'trnprr', 'trnprs', 'trnpsr', 'trnprt', 'trnptr', 'trnprn', 'trnpnr', 'trnpss', 'trnpst', 'trnpts', 'trnpsn', 'trnpns', 'trnptt', 'trnptn', 'trnpnt', 'trnpnn', 'btnnaa', 'btnnab', 'btnnba', 'btnnap', 'btnnpa', 'btnnar', 'btnnra', 'btnnas', 'btnnsa', 'btnnat', 'btnnta', 'btnnan', 'btnnna', 'btnnbb', 'btnnbp', 'btnnpb', 'btnnbr', 'btnnrb', 'btnnbs', 'btnnsb', 'btnnbt', 'btnntb', 'btnnbn', 'btnnnb', 'btnnpp', 'btnnpr', 'btnnrp', 'btnnps', 'btnnsp', 'btnnpt', 'btnntp', 'btnnpn', 'btnnnp', 'btnnrr', 'btnnrs', 'btnnsr', 'btnnrt', 'btnntr', 'btnnrn', 'btnnnr', 'btnnss', 'btnnst', 'btnnts', 'btnnsn', 'btnnns', 'btnntt', 'btnntn', 'btnnnt', 'btnnnn', 'pnpbaa', 'pnpbab', 'pnpbba', 'pnpbap', 'pnpbpa', 'pnpbar', 'pnpbra', 'pnpbas', 'pnpbsa', 'pnpbat', 'pnpbta', 'pnpban', 'pnpbna', 'pnpbbb', 'pnpbbp', 'pnpbpb', 'pnpbbr', 'pnpbrb', 'pnpbbs', 'pnpbsb', 'pnpbbt', 'pnpbtb', 'pnpbbn', 'pnpbnb', 'pnpbpp', 'pnpbpr', 'pnpbrp', 'pnpbps', 'pnpbsp', 'pnpbpt', 'pnpbtp', 'pnpbpn', 'pnpbnp', 'pnpbrr', 'pnpbrs', 'pnpbsr', 'pnpbrt', 'pnpbtr', 'pnpbrn', 'pnpbnr', 'pnpbss', 'pnpbst', 'pnpbts', 'pnpbsn', 'pnpbns', 'pnpbtt', 'pnpbtn', 'pnpbnt', 'pnpbnn'] 980 ###Markdown Machine learning ###Code import sklearn import pandas as pd import pickle import forestci as fci import numpy as np from sklearn.model_selection import cross_val_score from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import confusion_matrix from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import precision_recall_curve from sklearn.metrics import plot_precision_recall_curve from sklearn.metrics import average_precision_score import matplotlib.pyplot as plt from sklearn.metrics import balanced_accuracy_score from sklearn.model_selection import train_test_split from imblearn.over_sampling import RandomOverSampler ros = RandomOverSampler(random_state=0) #define max depth of decision tree maxd=20 foldernameoutput="Output/Tryptorubinlike_peptide" pathcompletetable=foldernameoutput+"/completetablecomplete.csv" completetable=pd.read_csv(pathcompletetable) # define target and features completetable.drop(columns=['prediction'],inplace=True) x_data = completetable.loc[:, completetable.columns != 'target' ] y_data = completetable['target'] # split into training and test set from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x_data, y_data ,test_size = 0.5, shuffle=True) #resample to balance x_train, y_train = ros.fit_resample(x_train, y_train) # use decision tree and linear regression classifier for predictions forest=RandomForestClassifier(max_depth=maxd,min_samples_leaf=1,class_weight="balanced") tree= DecisionTreeClassifier(max_depth=maxd,class_weight="balanced",min_samples_leaf=1) # train classifiers tree = tree.fit(x_train,y_train) forest = forest.fit(x_train,y_train) #predict for test set test_predict=tree.predict(x_test) test_predictf=forest.predict(x_test) #predict for complete set y_pred = forest.predict(x_data) completetable['prediction']=y_pred y_scores = forest.predict_proba(x_test)[:, 1] p, r, thresholds = precision_recall_curve(y_test, y_scores) #calculate accuracy average_precision = average_precision_score(y_test, test_predict) average_precisionf = average_precision_score(y_test, test_predictf) baccu=balanced_accuracy_score(y_test, test_predict) baccuf=balanced_accuracy_score(y_test, test_predictf) scores = cross_val_score(tree, x_data, y_data, cv=5, scoring='f1_macro') scoresf = cross_val_score(forest, x_data, y_data, cv=5, scoring='f1_macro') CM = confusion_matrix(y_test, test_predict) TN = CM[0][0] FN = CM[1][0] TP = CM[1][1] FP = CM[0][1] CMf = confusion_matrix(y_test, test_predictf) TNf = CMf[0][0] FNf = CMf[1][0] TPf = CMf[1][1] FPf = CMf[0][1] #probability of "positive" labeled actually positive: pt=TP/(TP+FP) #probability of Tryptorubinlike p450 beeing found t=TP/(FN+TP) #probability of "positive" labeled actually positive: pf=TPf/(TPf+FPf) #probability of Tryptorubinlike p450 beeing found tf=TPf/(FNf+TPf) print("Tree Score:", tree.score(x_test, y_test)) print ("Crossvalidation scores tree:",scores) print ("Tree:""True negatives:",TN," False Negatives:",FN,"True positives:",TP," False Positives:",FP) print ("Probability of positive labeled actually positive:", pt) print ("Probability of Tryptorubinlike p450 beeing found",t) print("Forest Score:", forest.score(x_test, y_test)) print ("Crossvalidation scores forest:",scoresf) print ("Forest Balanced Accuracy Score:",baccuf) print ("forest:""True negatives:",TNf," False Negatives:",FNf,"True positives:",TPf," False Positives:",FPf) print ("forest probability of positive labeled actually positive:", pf) print ("forest probability of Tryptorubinlike p450 beeing found",tf) print('Forest Average precision-recall score: {0:0.2f}'.format( average_precisionf)) # plot precision-recall curves fig, ax = plt.subplots() disp = plot_precision_recall_curve(forest, x_test, y_test, ax=ax,color='black') disp.ax_.set_title('Forest 2-class Precision-Recall curve: ' 'AP={0:0.2f}'.format(average_precisionf)) plt.legend(bbox_to_anchor=(1,1), loc="upper left") plt.savefig(("recisionrecalltryptorubin.png"), format="png") def adjusted_classes(y_scores, t): return [1 if y >= t else 0 for y in y_scores] def precision_recall_threshold(p, r, thresholds, t=0.5): # generate new class predictions based on the adjusted_classes # function above and view the resulting confusion matrix. y_pred_adj = adjusted_classes(y_scores, t) # plot the curve plt.figure(figsize=(14,14)) plt.title("Precision and Recall curve ^ = current threshold") plt.step(r, p, color='black', alpha=0.2, where='post') plt.fill_between(r, p, step='post', alpha=0.2, color='black') plt.ylim([0.5, 1.01]); plt.xlim([0.5, 1.01]); plt.xlabel('Recall'); plt.ylabel('Precision'); # plot the current threshold on the line close_default_clf = np.argmin(np.abs(thresholds - t)) # plot precision-recall curves predict=forest.predict_proba(x_test) idx_Tryptorubinlike_peptide = np.where(y_test == 1)[0] idx_nonTryptorubinlike_peptide = np.where(y_test == 0)[0] # Histogram predictions without error bars: fig, ax = plt.subplots(1) ax.hist(predict[idx_Tryptorubinlike_peptide, 1], histtype='step', label='Tryptorubin-like peptide P450') ax.hist(predict[idx_nonTryptorubinlike_peptide, 1], histtype='step', label='Non-tryptorubin-like peptide P450') ax.set_xlabel('Prediction (tryptorubin-like peptide p450 probability)') ax.set_ylabel('Number of observations') plt.legend() plt.savefig("histtlp.png",format="png",dpi=1000) plt.show() # Calculate the variance variance = fci.random_forest_error(forest, x_train, x_test) fig, ax = plt.subplots(1) ax.scatter(predict[idx_Tryptorubinlike_peptide, 1], np.sqrt(variance[idx_Tryptorubinlike_peptide]), label='Tryptorubin-like peptide P450') ax.scatter(predict[idx_nonTryptorubinlike_peptide, 1], np.sqrt(variance[idx_nonTryptorubinlike_peptide]), label='Non-tryptorubin-like peptide P450') ax.set_xlabel('Prediction (tryptorubin-like peptide p450 probability)') ax.set_ylabel('Standard deviation') plt.legend() plt.savefig("variancetlp.png",format="png",dpi=1000) plt.show() predict=np.array(predict[:, 1]) precision_rf, recall_rf = precision_recall_curve(y_test.ravel(), [predict]) plt.plot(recall_lr, precision_lr,color='black') plt.xlabel('Recall') plt.ylabel('Precision') plt.savefig("recalldiagrtlp.png",format="png",dpi=1000) plt.show() #precision_recall_threshold(p, r, thresholds, 0.7) #save trained classifier filename = foldernameoutput+'/decisiontreeclassifier.sav' pickle.dump(tree, open(filename, 'wb')) filename = foldernameoutput+'/home/friederike/Dokumente/Diplom/Work Friederike/Frankfurt/p450Project/Tryptorubin-like_peptidep450ML/Output/permutationsof4+locus/completetable.csv/forestclassifier.sav' pickle.dump(forest, open(filename, 'wb')) pathcompletetable=foldernameoutput+"/completetable.csv" completetable.to_csv(pathcompletetable, index=False) ###Output 7355 14658 Max depth: 20 [0. 0. 0. ... 0. 0. 0.] Tree Score: 0.9990483958673192 Crossvalidation scores tree: [0.71360452 0.94982947 0.91150904 0.94982947 0.8494884 ] Tree:True negatives: 7331 False Negatives: 6 True positives: 18 False Positives: 1 Probability of positive labeled actually positive: 0.9473684210526315 Probability of Tryptorubinlike p450 beeing found 0.75 forest Score: 0.9994562262098967 Crossvalidation scores forest: [0.97359899 1. 0.91150904 0.94427397 0.94427397] Forest Balanced Accuracy Score: 0.9166666666666667 forest:True negatives: 7332 False Negatives: 4 True positives: 20 False Positives: 0 forest probability of positive labeled actually positive: 1.0 forest probability of Tryptorubinlike p450 beeing found 0.8333333333333334 Forest Average precision-recall score: 0.83 [ 488 618 850 1315 1538 1565 2220 2262 2717 2797 2907 3334 3732 3795 3915 3965 4493 5540 5704 5828 6028 6113 6793 6967] ###Markdown Create a fasta file of all p450, that are false positve ###Code import Bio import numpy as np import pandas as pd from Bio import SeqIO from Bio.Seq import Seq import pickle foldernameoutput="Output/Tryptorubinlike_peptide" name_450nonTryptorubinlikes='Fasta/all p450.fasta' pathcompletetable=foldernameoutput+"/completetable.csv" completetable=pd.read_csv(pathcompletetable) p450nonTryptorubinlike = completetable.loc[completetable['target'] == 0] listfalsepositive=[] # predicts all non Tryptorubinlike p450, if algorithm says "Tryptorubinlike"-> writes to fasta counter =-1 for seq_record in SeqIO.parse(name_450nonTryptorubinlikes, "fasta"): counter= counter+1 prediction=p450nonTryptorubinlike.iloc[counter,-1] if prediction==1: listfalsepositive.append(seq_record) namelistflasepositive=foldernameoutput+"/listfalsepositive.fasta" SeqIO.write(listfalsepositive, namelistflasepositive, "fasta") ###Output _____no_output_____ ###Markdown Print Decision Tree ###Code import sklearn import pickle from sklearn.tree import export_graphviz import pydotplus import pandas as pd import graphviz foldernameoutput="Output/Tryptorubinlike_peptide" filename = foldernameoutput+'/forrestclassifier.sav' tree = pickle.load(open(filename, 'rb')) estimator=tree.estimators_[5] dot_data = export_graphviz(estimator, feature_names=x_data.columns, out_file=None, filled=True, rounded=True) pydot_graph = pydotplus.graph_from_dot_data(dot_data) name_pdf=foldernameoutput+'/tree.pdf' pydot_graph.write_pdf(name_pdf) ###Output _____no_output_____ ###Markdown Analyse sequences with fragment fastas ###Code import sklearn import pandas as pd import pickle import Bio import numpy as np from Bio import SeqIO from Bio.Seq import Seq from sklearn.metrics import roc_curve, precision_recall_curve, auc, make_scorer, recall_score, accuracy_score, precision_score, confusion_matrix # enter names of your files here filenamebegin='Fasta/alignedncbi/allp450ncbiprot-begint-trimmed.fasta' filenamesbr1='Fasta/alignedncbi/allp450ncbiprot-sbr1t-trimmed.fasta' filenamesbr2='Fasta/alignedncbi/allp450ncbiprot-sbr2t-trimmed.fasta' filenamecore='Fasta/alignedncbi/allp450ncbiprot-coret-trimmed.fasta' filenameend='Fasta/alignedncbi/allp450ncbiprot-endt-trimmed.fasta' foldernameoutput="Output/Tryptorubinlike_peptide" filename_permutations=foldernameoutput+"/permutations.txt" filename = foldernameoutput+'/forestclassifier.sav' fragments=("begin","sbr1","sbr2","core","end") filename_index="Output/permutationsof4+locus/newindex.txt" with open(filename_index, 'r') as f: index = [line.rstrip('\n') for line in f] with open(filename_permutations, 'r') as f: permutations = [line.rstrip('\n') for line in f] def easysequence (sequence): #creates a string out of the sequence file, that only states if AA is acidic (a), basic (b), polar (p), neutral/unpolar (n),aromatic (r),Cystein (s) or a Prolin (t) seqstr=str(sequence) seqlist=list(seqstr) a=0 easylist=[] for i in seqlist: if i == 'E' or i== 'D': easylist=easylist+['a'] if i == 'K' or i=='R' or i=='H': easylist=easylist+['b'] if i == 'S' or i=='T' or i=='N' or i=='Q': easylist=easylist+['p'] if i == 'F' or i=='Y' or i=='W': easylist=easylist+['r'] if i == 'C': easylist=easylist+['s'] if i == 'P': easylist=easylist+['t'] if i == 'G' or i=='A' or i=='V' or i=='L' or i=='I' or i=='M': easylist=easylist+['n'] seperator='' easysequence=seperator.join(easylist) return easysequence def merge_two_dicts(x, y): z = x.copy() # start with x's keys and values z.update(y) # modifies z with y's keys and values & returns None return z #reads file and creates table tablep450=pd.DataFrame() #lists for all different files lbegin=[] lend=[] lsbr1=[] lsbr2=[] lcore=[] for seq_record in SeqIO.parse(filenamebegin, "fasta"): lbegin.append(str(seq_record.seq)) for seq_record in SeqIO.parse(filenamesbr1, "fasta"): lsbr1.append(str(seq_record.seq)) for seq_record in SeqIO.parse(filenamesbr2, "fasta"): lsbr2.append(str(seq_record.seq)) for seq_record in SeqIO.parse(filenamecore, "fasta"): lcore.append(str(seq_record.seq)) for seq_record in SeqIO.parse(filenameend, "fasta"): lend.append(str(seq_record.seq)) for s in range (0,len(lbegin)-1): listfragment=[] seqbegin=lbegin[s] seqsbr1=lsbr1[s] seqsbr2=lsbr2[s] seqcore=lcore[s] seqend=lend[s] listfragments=[easysequence(str(seqbegin)),easysequence(str(seqsbr1)),easysequence(str(seqsbr2)),easysequence(str(seqcore)),easysequence(str(seqend))] new_row={} c=0 for frag in listfragments: f=fragments[c] c=c+1 for i in permutations: n=i+f new_row =merge_two_dicts(new_row,{n:frag.count(i)}) tablep450 = tablep450.append(new_row, ignore_index=True) tablep450n=tablep450.reindex(columns=index) tablep450n.to_csv("Output/Tryptorubinlike_peptide/tablep450allncbi", index=False) listpositive=[] #SeqIO.write(p450_sequences, filename_outputp450, "fasta") forest = pickle.load(open(filename, 'rb')) predictions = forest.predict_proba(tablep450n) # predicts all non Tryptorubinlike p450, if algorithm says "Tryptorubinlike"-> writes to fasta counter =-1 predicitonsfile=pd.DataFrame(columns=['title','probability'] ) for seq_record in SeqIO.parse(filenamebegin, "fasta"): counter= counter+1 prediction=predictions[counter][1] row={} if prediction>0.5: row={'title':seq_record.id,'probability':prediction} predicitonsfile = predicitonsfile.append(row, ignore_index=True) listpositive.append(seq_record) namelistpositive=foldernameoutput+"/listpositivetest.fasta" SeqIO.write(listpositive, namelistpositive, "fasta") pathcompletetable=foldernameoutput+"/predicitiontabletest.csv" predicitonsfile.to_csv(pathcompletetable, index=False) ###Output 58119 ###Markdown Analyse Tree adapted from https://scikit-learn.org/stable/auto_examples/inspection/plot_permutation_importance.htmlsphx-glr-auto-examples-inspection-plot-permutation-importance-py ###Code import matplotlib.pyplot as plt import numpy as np import pickle import sklearn #how many rows? rows=20 foldernameoutput="Output/Tryptorubinlike_peptide" filename = foldernameoutput+'/forestclassifier.sav' filenameout=foldernameoutput+"/importances.txt" fname=foldernameoutput+"/importances.pdf" forest = pickle.load(open(filename, 'rb')) feature_names=x_data.columns importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(0,rows): print(feature_names[indices[f]],str(importances[indices[f]])) # Plot the impurity-based feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(rows, importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(x_data.shape[1]),feature_names) plt.show() with open(filenameout, 'w') as file: for f in range(0,rows): file.write(fea ###Output RandomForestClassifier(class_weight='balanced', max_depth=20) Feature ranking: nttpcore 0.039171951607479924 pbbacore 0.03361910248134424 ntnnend 0.03161578688708081 nanpsbr1 0.026053122072174268 trnbcore 0.019034033456644863 tprpcore 0.018232256952108908 tnnaend 0.016191423304965303 nnanend 0.015707965168367404 nannend 0.01528752419158246 brnnsbr2 0.014514814672473356 nnrbsbr2 0.014441357815089602 rpnncore 0.014070110222691265 btabcore 0.013802081675247355 nnracore 0.01359654746057435 bbatcore 0.01322811949106521 nnbncore 0.013037636145589114 bbnnend 0.012901437799194426 nnnnsbr2 0.012173186868602843 bnntcore 0.011822029568558186 prbtsbr1 0.011529598120261579 ###Markdown Give list of all p450, that contain certain motivs ###Code # type in the motifs (easycode) that the p450 should contain yes=() # type in mptifs, that p450 should not contain no=() filename='allp450ncbi.fasta' foldernameoutput="Output/Tryptorubinlike_peptide" filenameoutput=foldernameoutput+"/"+str(yes)+".csv" def easysequence (sequence): #creates a string out of the sequence file, that only states if AA is acidic (a), basic (b), polar (p), neutral/unpolar (n),aromatic (r),Cystein (s) or a Prolin (t) seqstr=str(sequence) seqlist=list(seqstr) a=0 easylist=[] for i in seqlist: if i == 'E' or i== 'D': easylist=easylist+['a'] if i == 'K' or i=='R' or i=='H': easylist=easylist+['b'] if i == 'S' or i=='T' or i=='N' or i=='Q': easylist=easylist+['p'] if i == 'F' or i=='Y' or i=='W': easylist=easylist+['r'] if i == 'C': easylist=easylist+['s'] if i == 'P': easylist=easylist+['t'] if i == 'G' or i=='A' or i=='V' or i=='L' or i=='I' or i=='M': easylist=easylist+['n'] seperator='' easysequence=seperator.join(easylist) return easysequence newfile=pd.DataFrame(columns=['info', 'sequence']) for i in yes: newfile[i]=[] for seq_record in SeqIO.parse(filename, "fasta"): new_row={} countyes=0 countno=0 for i in yes: new_row =merge_two_dicts(new_row,{i:easyseq.count(i)}) countyes=countyes+i:easyseq.count(i) for i in no: countno=countno+i:easyseq.count(i) if countyes>0 and countno==0: newfile= newfile.append(new_row, ignore_index=True) newfile.to_csv(filenameoutput, index=False) ###Output _____no_output_____
meta-learning.ipynb
###Markdown Implementation of Contrastive Loss.Users are free to copy and distribute only with citation.https://github.com/ShravanAnandk7/Keras-Image-Embeddings-using-Contrastive-LossLast updated 09 Jan 2022TODO: 1) Add cosine distance metric 2) Add Batch-Hard and Semi-Hard triplet generation 3) Resize with padding in pre-processing pipe Import libraries ###Code import os import numpy as np import pandas as pd from functools import partial from cv2 import cv2 import tensorflow as tf import random import itertools import tensorflow.keras.utils as KU import tensorflow.keras.layers as KL import tensorflow.keras.models as KM import tensorflow.keras.losses as KLo import tensorflow.keras.optimizers as KO import tensorflow.keras.backend as K from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D from imgaug import augmenters as arg ###Output _____no_output_____ ###Markdown Parameters ###Code BASE_DIR = os.getcwd() #os.path.dirname(__file__) os.chdir(BASE_DIR) MODEL_DIR = os.path.join(BASE_DIR,"models") DATASET_DIR = os.path.join(BASE_DIR,"datasets") BATCH_SIZE = 10 NUM_EPOCHS = 2 INPUT_SHAPE = 299 EMBEDDING_SIZE = 32 LOSS_MARGIN = 0.4 HUBER_DELTA = 0.5 ###Output _____no_output_____ ###Markdown Define image augmenter ###Code AUGMENTATION = arg.Sequential( [ arg.OneOf([arg.Fliplr(0.5), arg.Flipud(0.5)]), arg.Affine(scale = (0.85, 1.05),name="scale"), arg.Rotate(rotate = (-10,10),name = "1a2_rotate_1"), arg.TranslateX(percent = (-0.05, 0.05), name= "1a3_translatex_1"), arg.TranslateY(percent = (-0.05, 0.05), name= "1a4_translatey_1"), arg.OneOf([ arg.Sometimes(0.9,arg.MultiplyAndAddToBrightness(mul=(0.70, 1.30), add=(-5, 5)),name="2a1_MulAddBrightness"), arg.MultiplySaturation(mul=(0.95,1.05),name="2b3_MulSat"), arg.MultiplyAndAddToBrightness(mul=(1,1.5), add=(-10,10),name="2b4_MulAddBrightness") ]), arg.Sometimes(0.2,arg.GaussianBlur(sigma = (0.0, 1.5)),name="3a1_gaussian_blur_0.2") ] ) ###Output _____no_output_____ ###Markdown Define datagenerator class ###Code class FewShotTripletDataGen(KU.Sequence): def __init__(self,path,image_dim, batch_size = 1, shuffle = True, augmenter = None): self.image_dim = image_dim self.batch_size = batch_size self.shuffle = shuffle self.augmenter = augmenter categories = os.listdir(path) folder_paths = list(map(partial(os.path.join,path),categories)) images = list(map(os.listdir, folder_paths)) self.dataframe = pd.DataFrame( { "categories" :categories, "folder path" : folder_paths, "images": images, "number": len(images) }) # print(self.dataframe) print("Categories found",self.dataframe.__len__()) self.duplets = list(itertools.permutations(np.arange(len(self.dataframe)),2)) self.triplets = [((x,a),(x,b),(y,c)) for x,y in self.duplets for a,b,c in list(itertools.product(np.arange(self.dataframe.loc[x]["number"]), np.arange(self.dataframe.loc[x]["number"]), np.arange(self.dataframe.loc[y]["number"]))) if (x,a) != (x,b)] # print(list(itertools.permutations(np.arange(self.dataframe.loc[0]["number"]),2))) print(len(self.triplets)) self.on_epoch_end() print("Total triplets : ",len(self.triplets)) def __len__(self): return int(np.floor(len(self.triplets) / self.batch_size)) def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.triplets)) if self.shuffle == True: np.random.shuffle(self.indexes) def __getitem__(self, index): """ Outputs = [Anchor, Positive, Negative] with shape (Batch, 3, Height, Width, 3) """ batch_indexes = self.indexes[index*self.batch_size:(index+1) *self.batch_size] X, y = self.__batch_all_triplet_data_gen(batch_indexes) return X, y def __batch_all_triplet_data_gen(self,batch_indexes): X=[] anchor_list = [] positive_list = [] negative_list = [] # print("batch Indices : ", batch_indexes) for row_id in batch_indexes: anchor = os.path.join(self.dataframe.loc[self.triplets[row_id][0][0]]["folder path"],self.dataframe.loc[self.triplets[row_id][0][0]]["images"][self.triplets[row_id][0][1]]) positive = os.path.join(self.dataframe.loc[self.triplets[row_id][1][0]]["folder path"],self.dataframe.loc[self.triplets[row_id][1][0]]["images"][self.triplets[row_id][1][1]]) negative = os.path.join(self.dataframe.loc[self.triplets[row_id][2][0]]["folder path"],self.dataframe.loc[self.triplets[row_id][2][0]]["images"][self.triplets[row_id][2][1]]) # print(anchor,'\n',positive,'\n',negative) anchor = self.pre_process(self.__augmenter(cv2.imread(anchor))) positive = self.pre_process(self.__augmenter(cv2.imread(positive))) negative = self.pre_process(self.__augmenter(cv2.imread(negative))) # # print(anchor.shape, positive.shape, negative.shape) anchor_list.append(anchor) positive_list.append(positive) negative_list.append(negative) return (np.asarray(anchor_list),np.asarray(positive_list),np.array(negative_list)), None def pre_process(self,image): """ Model specific image preprocessing function TODO: Resize with crop and padding """ image = cv2.resize(image,self.image_dim) image = image/127.5 -1 return image def __augmenter(self,image): if self.augmenter is not None: image_shape = image.shape image = self.augmenter.augment_image(image) #Augmentation shouldn't change image size assert image.shape == image_shape return image # train_gen = FewShotTripletDataGen(path = os.path.join( # DATASET_DIR,"few-shot-dataset","train"), # image_dim=(INPUT_SHAPE,INPUT_SHAPE), # batch_size=BATCH_SIZE,augmenter=AUGMENTATION) # train_gen[0] ###Output _____no_output_____ ###Markdown Define custom loss layer for implementation of contrastive loss ###Code class TripletLossLayer(KL.Layer): def __init__(self,margin=1,delta=1,**kwargs): self.margin = margin self.huber_delta = delta super(TripletLossLayer, self).__init__(**kwargs) pass def euclidean_distance(self,x,y): """ Euclidean distance metric """ return K.sum(K.square(x-y), axis=-1) def cosine_distance(self,x,y): """ Cosine distance metric """ pass def triplet_loss(self, inputs): anchor, positive, negative = inputs p_dist = self.euclidean_distance(anchor[0],positive[0]) n_dist = self.euclidean_distance(anchor[0],negative[0]) t_loss = K.maximum(p_dist - n_dist + self.margin, 0) # Huber loss L1_loss = K.switch(t_loss < self.huber_delta, 0.5 * t_loss ** 2, self.huber_delta * (t_loss - 0.5 * self.huber_delta)) return K.sum(L1_loss) def call(self, inputs): loss = self.triplet_loss(inputs) self.add_loss(loss) ###Output _____no_output_____ ###Markdown Define the base network keras model to generate embeddings,Replace the base_model function with your custom model arcitecture ###Code def base_network(): """ Base CNN model trained for embedding extraction """ return( KM.Sequential( [ KL.Input(shape=(INPUT_SHAPE,INPUT_SHAPE,3)), KL.Conv2D(8,(3,3)), KL.ReLU(), KL.MaxPool2D(pool_size=(1,2)), # KL.BatchNormalization(), KL.Conv2D(16,(3,3)), KL.ReLU(), KL.MaxPool2D(pool_size=(2,1)), KL.BatchNormalization(), KL.Conv2D(32,(3,3)), KL.ReLU(), KL.MaxPool2D(pool_size=(1,1)), KL.GlobalAveragePooling2D(), # Don't Change the below layers KL.Dense(EMBEDDING_SIZE,activation = 'relu'), # KL.Lambda(lambda x: K.l2_normalize(x,axis=-1)) ])) base = base_network() print(base.summary()) ###Output Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 297, 297, 8) 224 _________________________________________________________________ re_lu (ReLU) (None, 297, 297, 8) 0 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 297, 148, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 295, 146, 16) 1168 _________________________________________________________________ re_lu_1 (ReLU) (None, 295, 146, 16) 0 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 147, 146, 16) 0 _________________________________________________________________ batch_normalization (BatchNo (None, 147, 146, 16) 64 _________________________________________________________________ conv2d_2 (Conv2D) (None, 145, 144, 32) 4640 _________________________________________________________________ re_lu_2 (ReLU) (None, 145, 144, 32) 0 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 145, 144, 32) 0 _________________________________________________________________ global_average_pooling2d (Gl (None, 32) 0 _________________________________________________________________ dense (Dense) (None, 32) 1056 ================================================================= Total params: 7,152 Trainable params: 7,120 Non-trainable params: 32 _________________________________________________________________ None ###Markdown Load trained model weights ###Code base.load_weights(os.path.join(BASE_DIR, "models","few-shot.h5")) ###Output _____no_output_____ ###Markdown Triplet network model ###Code def triplet_network(base): Anchor = KL.Input(shape=(INPUT_SHAPE,INPUT_SHAPE,3),name= "anchor_input") Positive = KL.Input(shape=(INPUT_SHAPE,INPUT_SHAPE,3),name= "positive_input") Negative = KL.Input(shape=(INPUT_SHAPE,INPUT_SHAPE,3),name= "negative_input") Anchor_Emb = base(Anchor) Positive_Emb = base(Positive) Negative_Emb = base(Negative) loss = TripletLossLayer(LOSS_MARGIN,HUBER_DELTA)([Anchor_Emb,Positive_Emb,Negative_Emb]) model = KM.Model(inputs = [Anchor,Positive,Negative], outputs=loss) return model triplet_model = triplet_network(base) ###Output _____no_output_____ ###Markdown Train model and save the weights ###Code optimizer = KO.Adam(lr = 0.001) triplet_model.compile(loss=None,optimizer=optimizer) print("Train Data :") train_gen = FewShotTripletDataGen(path = os.path.join( DATASET_DIR,"few-shot-dataset","train"), image_dim=(INPUT_SHAPE,INPUT_SHAPE), batch_size=BATCH_SIZE,augmenter=AUGMENTATION) print("Test Data :") valid_gen = FewShotTripletDataGen(path = os.path.join( DATASET_DIR,"few-shot-dataset","test"), image_dim=(INPUT_SHAPE,INPUT_SHAPE), batch_size=BATCH_SIZE) triplet_model.fit(x=train_gen, batch_size=BATCH_SIZE, validation_data=valid_gen, epochs=NUM_EPOCHS, workers=1) # Save trained model weights base.save_weights(os.path.join(BASE_DIR, "models","few-shot.h5")) ###Output Train Data : Categories found 9 52488 Total triplets : 52488 Test Data : Categories found 10 90000 Total triplets : 90000 Epoch 1/2 63/5248 [..............................] - ETA: 4:22:54 - loss: 0.0876 ###Markdown Generate embeddings from trained model ###Code image_path = os.path.join( DATASET_DIR,"few-shot-dataset","test","cat","0013.jpg") print(image_path) input = train_gen.pre_process(cv2.imread(image_path)) output_embeddings = base.predict(np.expand_dims(input,axis=0)) print(output_embeddings) ###Output d:\06 Development\05 Git Reps\Keras-Image-Embeddings-using-Contrastive-Loss\datasets\few-shot-dataset\test\cat\0013.jpg [[0.25258532 0.10275707 0. 0.19828758 0.03829682 0.19903469 0. 0.08477098 0. 0.17425714 0. 0. 0. 0.16769291 0. 0. 0. 0.1337439 0. 0. 0. 0. 0. 0.31681383 0. 0.28377467 0. 0. 0.10693571 0. 0. 0.0271601 ]]
jupyter/4-big-O-for-python-data-structures-List-and-Dict.ipynb
###Markdown Big O for Python Data Structures In this lecture we will go over the Big O of built-in data structures in Python: Lists and Dictionaries. Lists In Python lists act as dynamic arrays and support a number of common operations through methods called on them. The two most common operations performed on a list are indexing and assigning to an index position. These operations are both designed to be run in constant time, O(1). Let's imagine you wanted to test different methods to construct a list that is [0,1,2...10000]. Let go ahead and compare various methods, such as appending to the end of a list, concatenating a list, or using tools such as casting and list comprehension. For example: ###Code def method1(): l = [] for n in range(10000): l = l + [n] def method2(): l = [] for n in range(10000): l.append(n) def method3(): l = [n for n in range(10000)] def method4(): l = list(range(10000)) ###Output _____no_output_____ ###Markdown Let's now test these methods using the timeit magic function: ###Code %timeit method1() %timeit method2() %timeit method3() %timeit method4() ###Output 115 ms ± 1.31 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) 553 µs ± 9.82 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) 266 µs ± 1.35 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) 175 µs ± 1.38 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) ###Markdown We can clearly see that the most effective method is the built-in range() function in Python!It is important to keep these factors in mind when writing efficient code. More importantly begin thinking about how we are able to index with O(1). We will discuss this in more detail when we cover arrays general. For now, take a look at the table below for an overview of Big-O efficiencies. Table of Big-O for common list operations | Operation | Big-O Efficiency ||------------------|------------------|| index [] | O(1) || Index assignment | O(1) || append | O(1) || pop() | O(1) || pop(i) | O(N) || insert(i,item) | O(N) || del operator | O(N) || iteration | O(N) || contains (in) | O(N) || get slice [x:y] | O(K) || del slice | O(N) || set slice | O(N+K) || reverse | O(N) || concatenate | O(K) || sort | O(N log N) || multiply | O(NK) | Dictionaries Dictionaries in Python are an implementation of a hash table. They operate with keys and values, for example: ###Code d = {'k1':1,'k2':2} d['k1'] ###Output _____no_output_____
course-1-neural-networks/Logistic+Regression+with+a+Neural+Network+mindset+v5.ipynb
###Markdown Logistic Regression with a Neural Network mindsetWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.**Instructions:**- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.**You will learn to:**- Build the general architecture of a learning algorithm, including: - Initializing parameters - Calculating the cost function and its gradient - Using an optimization algorithm (gradient descent) - Gather all three functions above into a main model function, in the right order. 1 - Packages First, let's run the cell below to import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end. ###Code import numpy as np import matplotlib.pyplot as plt import h5py import scipy from PIL import Image from scipy import ndimage from lr_utils import load_dataset %matplotlib inline ###Output /opt/conda/lib/python3.5/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment. warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.') /opt/conda/lib/python3.5/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment. warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.') ###Markdown 2 - Overview of the Problem set **Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labeled as cat (y=1) or non-cat (y=0) - a test set of m_test images labeled as cat or non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.Let's get more familiar with the dataset. Load the data by running the following code. ###Code # Loading the data (cat/non-cat) train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() ###Output _____no_output_____ ###Markdown We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images. ###Code # Example of a picture index = 25 plt.imshow(train_set_x_orig[index]) print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.") ###Output y = [1], it's a 'cat' picture. ###Markdown Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. **Exercise:** Find the values for: - m_train (number of training examples) - m_test (number of test examples) - num_px (= height = width of a training image)Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`. ###Code ### START CODE HERE ### (≈ 3 lines of code) m_train = train_set_x_orig.shape[0] m_test = test_set_x_orig.shape[0] num_px = train_set_x_orig.shape[1] ### END CODE HERE ### print ("Number of training examples: m_train = " + str(m_train)) print ("Number of testing examples: m_test = " + str(m_test)) print ("Height/Width of each image: num_px = " + str(num_px)) print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)") print ("train_set_x shape: " + str(train_set_x_orig.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x shape: " + str(test_set_x_orig.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) ###Output Number of training examples: m_train = 209 Number of testing examples: m_test = 50 Height/Width of each image: num_px = 64 Each image is of size: (64, 64, 3) train_set_x shape: (209, 64, 64, 3) train_set_y shape: (1, 209) test_set_x shape: (50, 64, 64, 3) test_set_y shape: (1, 50) ###Markdown **Expected Output for m_train, m_test and num_px**: **m_train** 209 **m_test** 50 **num_px** 64 For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: ```pythonX_flatten = X.reshape(X.shape[0], -1).T X.T is the transpose of X``` ###Code # Reshape the training and test examples ### START CODE HERE ### (≈ 2 lines of code) train_set_x_flatten = train_set_x_orig.reshape(m_train, -1).T test_set_x_flatten = test_set_x_orig.reshape(m_test, -1).T ### END CODE HERE ### print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0])) ###Output train_set_x_flatten shape: (12288, 209) train_set_y shape: (1, 209) test_set_x_flatten shape: (12288, 50) test_set_y shape: (1, 50) sanity check after reshaping: [17 31 56 22 33] ###Markdown **Expected Output**: **train_set_x_flatten shape** (12288, 209) **train_set_y shape** (1, 209) **test_set_x_flatten shape** (12288, 50) **test_set_y shape** (1, 50) **sanity check after reshaping** [17 31 56 22 33] To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel). Let's standardize our dataset. ###Code train_set_x = train_set_x_flatten/255. test_set_x = test_set_x_flatten/255. ###Output _____no_output_____ ###Markdown **What you need to remember:**Common steps for pre-processing a new dataset are:- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)- "Standardize" the data 3 - General Architecture of the learning algorithm It's time to design a simple algorithm to distinguish cat images from non-cat images.You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!****Mathematical expression of the algorithm**:For one example $x^{(i)}$:$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$ $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$The cost is then computed by summing over all training examples:$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$**Key steps**:In this exercise, you will carry out the following steps: - Initialize the parameters of the model - Learn the parameters for the model by minimizing the cost - Use the learned parameters to make predictions (on the test set) - Analyse the results and conclude 4 - Building the parts of our algorithm The main steps for building a Neural Network are:1. Define the model structure (such as number of input features) 2. Initialize the model's parameters3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent)You often build 1-3 separately and integrate them into one function we call `model()`. 4.1 - Helper functions**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp(). ###Code # GRADED FUNCTION: sigmoid def sigmoid(z): """ Compute the sigmoid of z Arguments: z -- A scalar or numpy array of any size. Return: s -- sigmoid(z) """ ### START CODE HERE ### (≈ 1 line of code) s = 1/(1+np.exp(-z)) ### END CODE HERE ### return s print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2])))) ###Output sigmoid([0, 2]) = [ 0.5 0.88079708] ###Markdown **Expected Output**: **sigmoid([0, 2])** [ 0.5 0.88079708] 4.2 - Initializing parameters**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation. ###Code # GRADED FUNCTION: initialize_with_zeros def initialize_with_zeros(dim): """ This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0. Argument: dim -- size of the w vector we want (or number of parameters in this case) Returns: w -- initialized vector of shape (dim, 1) b -- initialized scalar (corresponds to the bias) """ ### START CODE HERE ### (≈ 1 line of code) w = np.zeros((dim, 1)) b = 0 ### END CODE HERE ### assert(w.shape == (dim, 1)) assert(isinstance(b, float) or isinstance(b, int)) return w, b dim = 2 w, b = initialize_with_zeros(dim) print ("w = " + str(w)) print ("b = " + str(b)) ###Output w = [[ 0.] [ 0.]] b = 0 ###Markdown **Expected Output**: ** w ** [[ 0.] [ 0.]] ** b ** 0 For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1). 4.3 - Forward and Backward propagationNow that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.**Hints**:Forward Propagation:- You get X- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$Here are the two formulas you will be using: $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$ ###Code # GRADED FUNCTION: propagate def propagate(w, b, X, Y): """ Implement the cost function and its gradient for the propagation explained above Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples) Return: cost -- negative log-likelihood cost for logistic regression dw -- gradient of the loss with respect to w, thus same shape as w db -- gradient of the loss with respect to b, thus same shape as b Tips: - Write your code step by step for the propagation. np.log(), np.dot() """ m = X.shape[1] # FORWARD PROPAGATION (FROM X TO COST) ### START CODE HERE ### (≈ 2 lines of code) A = sigmoid(np.dot(w.T,X) + b) # compute activation cost = (-1/m)*np.sum(Y*np.log(A) + (1-Y)*np.log(1-A)) # compute cost ### END CODE HERE ### # BACKWARD PROPAGATION (TO FIND GRAD) ### START CODE HERE ### (≈ 2 lines of code) dw = (1/m) * np.dot(X,(A-Y).T) db = (1/m) * np.sum(A-Y) ### END CODE HERE ### assert(dw.shape == w.shape) assert(db.dtype == float) cost = np.squeeze(cost) assert(cost.shape == ()) grads = {"dw": dw, "db": db} return grads, cost w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]]) grads, cost = propagate(w, b, X, Y) print ("dw = " + str(grads["dw"])) print ("db = " + str(grads["db"])) print ("cost = " + str(cost)) ###Output dw = [[ 0.99845601] [ 2.39507239]] db = 0.00145557813678 cost = 5.80154531939 ###Markdown **Expected Output**: ** dw ** [[ 0.99845601] [ 2.39507239]] ** db ** 0.00145557813678 ** cost ** 5.801545319394553 4.4 - Optimization- You have initialized your parameters.- You are also able to compute a cost function and its gradient.- Now, you want to update the parameters using gradient descent.**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate. ###Code # GRADED FUNCTION: optimize def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False): """ This function optimizes w and b by running a gradient descent algorithm Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of shape (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples) num_iterations -- number of iterations of the optimization loop learning_rate -- learning rate of the gradient descent update rule print_cost -- True to print the loss every 100 steps Returns: params -- dictionary containing the weights w and bias b grads -- dictionary containing the gradients of the weights and bias with respect to the cost function costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve. Tips: You basically need to write down two steps and iterate through them: 1) Calculate the cost and the gradient for the current parameters. Use propagate(). 2) Update the parameters using gradient descent rule for w and b. """ costs = [] for i in range(num_iterations): # Cost and gradient calculation (≈ 1-4 lines of code) ### START CODE HERE ### grads, cost = propagate(w, b, X, Y) ### END CODE HERE ### # Retrieve derivatives from grads dw = grads["dw"] db = grads["db"] # update rule (≈ 2 lines of code) ### START CODE HERE ### w = w - (learning_rate * dw) b = b - (learning_rate * db) ### END CODE HERE ### # Record the costs if i % 100 == 0: costs.append(cost) # Print the cost every 100 training iterations if print_cost and i % 100 == 0: print ("Cost after iteration %i: %f" %(i, cost)) params = {"w": w, "b": b} grads = {"dw": dw, "db": db} return params, grads, costs params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False) print ("w = " + str(params["w"])) print ("b = " + str(params["b"])) print ("dw = " + str(grads["dw"])) print ("db = " + str(grads["db"])) ###Output w = [[ 0.19033591] [ 0.12259159]] b = 1.92535983008 dw = [[ 0.67752042] [ 1.41625495]] db = 0.219194504541 ###Markdown **Expected Output**: **w** [[ 0.19033591] [ 0.12259159]] **b** 1.92535983008 **dw** [[ 0.67752042] [ 1.41625495]] **db** 0.219194504541 **Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$2. Convert the entries of a into 0 (if activation 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this). ###Code # GRADED FUNCTION: predict def predict(w, b, X): ''' Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b) Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Returns: Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X ''' m = X.shape[1] Y_prediction = np.zeros((1,m)) w = w.reshape(X.shape[0], 1) # Compute vector "A" predicting the probabilities of a cat being present in the picture ### START CODE HERE ### (≈ 1 line of code) A = sigmoid(np.dot(w.T,X) + b) ### END CODE HERE ### for i in range(A.shape[1]): # Convert probabilities A[0,i] to actual predictions p[0,i] ### START CODE HERE ### (≈ 4 lines of code) if A[0,i] <= 0.5: Y_prediction[0,i] = 0 if A[0,i] > 0.5: Y_prediction[0,i] = 1 ### END CODE HERE ### assert(Y_prediction.shape == (1, m)) return Y_prediction w = np.array([[0.1124579],[0.23106775]]) b = -0.3 X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]]) print ("predictions = " + str(predict(w, b, X))) ###Output predictions = [[ 1. 1. 0.]] ###Markdown **Expected Output**: **predictions** [[ 1. 1. 0.]] **What to remember:**You've implemented several functions that:- Initialize (w,b)- Optimize the loss iteratively to learn parameters (w,b): - computing the cost and its gradient - updating the parameters using gradient descent- Use the learned (w,b) to predict the labels for a given set of examples 5 - Merge all functions into a model You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.**Exercise:** Implement the model function. Use the following notation: - Y_prediction_test for your predictions on the test set - Y_prediction_train for your predictions on the train set - w, costs, grads for the outputs of optimize() ###Code # GRADED FUNCTION: model def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ ### START CODE HERE ### # initialize parameters with zeros (≈ 1 line of code) w, b = initialize_with_zeros(X_train.shape[0]) # Gradient descent (≈ 1 line of code) parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost = False) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples (≈ 2 lines of code) Y_prediction_test = predict(w, b, X_test) Y_prediction_train = predict(w, b, X_train) ### END CODE HERE ### # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "w" : w, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} return d ###Output _____no_output_____ ###Markdown Run the following cell to train your model. ###Code d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True) ###Output train accuracy: 99.04306220095694 % test accuracy: 70.0 % ###Markdown **Expected Output**: **Cost after iteration 0 ** 0.693147 $\vdots$ $\vdots$ **Train Accuracy** 99.04306220095694 % **Test Accuracy** 70.0 % **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set. ###Code # Example of a picture that was wrongly classified. index = 1 plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3))) print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.") ###Output /opt/conda/lib/python3.5/site-packages/ipykernel/__main__.py:4: DeprecationWarning: using a non-integer number instead of an integer will result in an error in the future ###Markdown Let's also plot the cost function and the gradients. ###Code # Plot learning curve (with costs) costs = np.squeeze(d['costs']) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(d["learning_rate"])) plt.show() ###Output _____no_output_____ ###Markdown **Interpretation**:You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. 6 - Further analysis (optional/ungraded exercise) Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. Choice of learning rate **Reminder**:In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens. ###Code learning_rates = [0.01, 0.001, 0.0001] models = {} for i in learning_rates: print ("learning rate is: " + str(i)) models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False) print ('\n' + "-------------------------------------------------------" + '\n') for i in learning_rates: plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"])) plt.ylabel('cost') plt.xlabel('iterations (hundreds)') legend = plt.legend(loc='upper center', shadow=True) frame = legend.get_frame() frame.set_facecolor('0.90') plt.show() ###Output learning rate is: 0.01 train accuracy: 99.52153110047847 % test accuracy: 68.0 % ------------------------------------------------------- learning rate is: 0.001 train accuracy: 88.99521531100478 % test accuracy: 64.0 % ------------------------------------------------------- learning rate is: 0.0001 train accuracy: 68.42105263157895 % test accuracy: 36.0 % ------------------------------------------------------- ###Markdown **Interpretation**: - Different learning rates give different costs and thus different predictions results.- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.- In deep learning, we usually recommend that you: - Choose the learning rate that better minimizes the cost function. - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) 7 - Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)! ###Code ## START CODE HERE ## (PUT YOUR IMAGE NAME) my_image = "my_image.jpg" # change this to the name of your image file ## END CODE HERE ## # We preprocess the image to fit your algorithm. fname = "images/" + my_image image = np.array(ndimage.imread(fname, flatten=False)) my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T my_predicted_image = predict(d["w"], d["b"], my_image) plt.imshow(image) print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.") ###Output _____no_output_____
RP_Template_ReadWrite.ipynb
###Markdown Some Filepath for RP ###Code #rp_info =dicom.read_file(r'D:\kai\DICOMtoNPZ\ral\RP.1.2.246.352.71.5.417454940236.2035996.20190820095628.dcm') #rp_info = pydicom.read_file(r'D:\kai\DICOMtoNPZ\ral\RP.1.2.246.352.71.5.417454940236.2035996.20190820095628.dcm') # Template Data rp_info = pydicom.read_file(r'RP_Template/Brachy_RP.1.2.246.352.71.5.417454940236.2063186.20191015164204.dcm') ###Output _____no_output_____ ###Markdown Only Read File ###Code # Start research from here import pydicom #rp_info =dicom.read_file(r'D:\kai\DICOMtoNPZ\ral\RP.1.2.246.352.71.5.417454940236.2035996.20190820095628.dcm') #rp_info = pydicom.read_file(r'D:\kai\DICOMtoNPZ\ral\RP.1.2.246.352.71.5.417454940236.2035996.20190820095628.dcm') # Template Data rp_info = pydicom.read_file(r'RP_Template/Brachy_RP.1.2.246.352.71.5.417454940236.2063186.20191015164204.dcm') App = rp_info.ApplicationSetupSequence ChannelSequence = App[0].ChannelSequence #print('ChannelSequence[0].BrachyControlPointSequence = ') #print(ChannelSequence[0].BrachyControlPointSequence) print('ChannelSequence[0] = ') for idx in range(len(ChannelSequence)): cs = ChannelSequence[idx] print('idx = {} ->'.format(idx)) print(cs) break for idx, cp in enumerate(ChannelSequence[0].BrachyControlPointSequence): #print(cp) #print("position:", cp.ControlPoint3DPosition, end = " ") #print("position:", cp.ControlPoint3DPosition) #print(type(cp.ControlPoint3DPosition)) if (idx >= 0 ) : print('idx == {} -> cp = '.format(idx)) print(cp) print(type(cp.ControlPointIndex)) print('===============') #continue print("Time:", cp.CumulativeTimeWeight) try: for jj in cp.BrachyReferencedDoseReferenceSequence: #print(jj) print("ref:", rp_info.DoseReferenceSequence[jj.ReferencedDoseReferenceNumber-1].DoseReferenceDescription , end = " ") print("coeff:", jj.CumulativeDoseReferenceCoefficient) except Exception as e: print(e) pass print('') ChannelSequence[3] ###Output _____no_output_____ ###Markdown Write File and read file again ###Code # Write File Test and read again import pydicom #rp_info =dicom.read_file(r'D:\kai\DICOMtoNPZ\ral\RP.1.2.246.352.71.5.417454940236.2035996.20190820095628.dcm') #rp_info = pydicom.read_file(r'D:\kai\DICOMtoNPZ\ral\RP.1.2.246.352.71.5.417454940236.2035996.20190820095628.dcm') # Template Data rp_info = pydicom.read_file(r'RP_Template/Brachy_RP.1.2.246.352.71.5.417454940236.2063186.20191015164204.dcm') out_rp_filepath = r'Brachy.RP.out.dcm' App = rp_info.ApplicationSetupSequence ChannelSequence = App[0].ChannelSequence print('ChannelSequence[0] = ') for idx, cp in enumerate(ChannelSequence[0].BrachyControlPointSequence): print("position:", cp.ControlPoint3DPosition) if (idx >= 0 ) : #print('idx == {} -> cp = '.format(idx)) #print(cp) #print('===============') pass if (idx == 0) : cp.ControlPoint3DPosition[0] = 33.33333 #continue print("Time:", cp.CumulativeTimeWeight) try: for jj in cp.BrachyReferencedDoseReferenceSequence: #print(jj) print("ref:", rp_info.DoseReferenceSequence[jj.ReferencedDoseReferenceNumber-1].DoseReferenceDescription , end = " ") print("coeff:", jj.CumulativeDoseReferenceCoefficient) except Exception as e: print(e) pass print('') pydicom.write_file(out_rp_filepath, rp_info) in_rp_filepath = r'Brachy.RP.out.dcm' rp_fp = pydicom.read_file(in_rp_filepath) seq = rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].BrachyControlPointSequence for idx, cp in enumerate(seq): print("position:", cp.ControlPoint3DPosition) ###Output ChannelSequence[0] = position: ['12.778672628366', '-14.486222523329', '1.80953906063796'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.778672628366', '-14.486222523329', '1.80953906063796'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.4938084644467', '-12.702280882149', '-8.025927609987'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.4938084644467', '-12.702280882149', '-8.025927609987'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.351376382487', '-11.810310061558', '-12.9436609453'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.351376382487', '-11.810310061558', '-12.9436609453'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.2089443005273', '-10.918339240968', '-17.861394280612'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.2089443005273', '-10.918339240968', '-17.861394280612'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.0665122185677', '-10.026368420377', '-22.779127615925'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['12.0665122185677', '-10.026368420377', '-22.779127615925'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.924080136608', '-9.1343975997867', '-27.696860951237'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.924080136608', '-9.1343975997867', '-27.696860951237'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.7816480546484', '-8.2424267791963', '-32.61459428655'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.7816480546484', '-8.2424267791963', '-32.61459428655'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.6392159726887', '-7.3504559586058', '-37.532327621862'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.6392159726887', '-7.3504559586058', '-37.532327621862'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.496783890729', '-6.4584851380154', '-42.450060957175'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.496783890729', '-6.4584851380154', '-42.450060957175'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.3543518087694', '-5.5665143174249', '-47.367794292487'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.3543518087694', '-5.5665143174249', '-47.367794292487'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.2119197268097', '-4.6745434968344', '-52.2855276278'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.2119197268097', '-4.6745434968344', '-52.2855276278'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.0694876448501', '-3.782572676244', '-57.203260963112'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['11.0694876448501', '-3.782572676244', '-57.203260963112'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.9270555628904', '-2.8906018556535', '-62.120994298425'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.9270555628904', '-2.8906018556535', '-62.120994298425'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.7846234809307', '-1.9986310350631', '-67.038727633737'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.7846234809307', '-1.9986310350631', '-67.038727633737'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.6421913989711', '-1.1066602144726', '-71.95646096905'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.6421913989711', '-1.1066602144726', '-71.95646096905'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.4997593170114', '-2.1468939e-1', '-76.874194304362'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.4997593170114', '-2.1468939e-1', '-76.874194304362'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.3573272350518', '6.7728143e-1', '-81.791927639675'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.3573272350518', '6.7728143e-1', '-81.791927639675'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.2148951530921', '1.56925224729876', '-86.709660974987'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['10.2148951530921', '1.56925224729876', '-86.709660974987'] Time: 0 'Dataset' object has no attribute 'BrachyReferencedDoseReferenceSequence' position: ['33.33333', '-14.486222523329', '1.80953906063796'] position: ['12.778672628366', '-14.486222523329', '1.80953906063796'] position: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'] position: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'] position: ['12.4938084644467', '-12.702280882149', '-8.025927609987'] position: ['12.4938084644467', '-12.702280882149', '-8.025927609987'] position: ['12.351376382487', '-11.810310061558', '-12.9436609453'] position: ['12.351376382487', '-11.810310061558', '-12.9436609453'] position: ['12.2089443005273', '-10.918339240968', '-17.861394280612'] position: ['12.2089443005273', '-10.918339240968', '-17.861394280612'] position: ['12.0665122185677', '-10.026368420377', '-22.779127615925'] position: ['12.0665122185677', '-10.026368420377', '-22.779127615925'] position: ['11.924080136608', '-9.1343975997867', '-27.696860951237'] position: ['11.924080136608', '-9.1343975997867', '-27.696860951237'] position: ['11.7816480546484', '-8.2424267791963', '-32.61459428655'] position: ['11.7816480546484', '-8.2424267791963', '-32.61459428655'] position: ['11.6392159726887', '-7.3504559586058', '-37.532327621862'] position: ['11.6392159726887', '-7.3504559586058', '-37.532327621862'] position: ['11.496783890729', '-6.4584851380154', '-42.450060957175'] position: ['11.496783890729', '-6.4584851380154', '-42.450060957175'] position: ['11.3543518087694', '-5.5665143174249', '-47.367794292487'] position: ['11.3543518087694', '-5.5665143174249', '-47.367794292487'] position: ['11.2119197268097', '-4.6745434968344', '-52.2855276278'] position: ['11.2119197268097', '-4.6745434968344', '-52.2855276278'] position: ['11.0694876448501', '-3.782572676244', '-57.203260963112'] position: ['11.0694876448501', '-3.782572676244', '-57.203260963112'] position: ['10.9270555628904', '-2.8906018556535', '-62.120994298425'] position: ['10.9270555628904', '-2.8906018556535', '-62.120994298425'] position: ['10.7846234809307', '-1.9986310350631', '-67.038727633737'] position: ['10.7846234809307', '-1.9986310350631', '-67.038727633737'] position: ['10.6421913989711', '-1.1066602144726', '-71.95646096905'] position: ['10.6421913989711', '-1.1066602144726', '-71.95646096905'] position: ['10.4997593170114', '-2.1468939e-1', '-76.874194304362'] position: ['10.4997593170114', '-2.1468939e-1', '-76.874194304362'] position: ['10.3573272350518', '6.7728143e-1', '-81.791927639675'] position: ['10.3573272350518', '6.7728143e-1', '-81.791927639675'] position: ['10.2148951530921', '1.56925224729876', '-86.709660974987'] position: ['10.2148951530921', '1.56925224729876', '-86.709660974987'] ###Markdown The format is like rp_info.ApplicationSetupSequence[0].ChannelSequence[0].NumberOfControlPoints is 38rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].BrachyControlPointSequence[ 0 .. 37](300a, 0112) Control Point Index IS: "0"(300a, 02d2) Control Point Relative Position DS: "3.5"(300a, 02d4) Control Point 3D Position DS: ['12.778672628366', '-14.486222523329', '1.80953906063796'](300a, 02d6) Cumulative Time Weight DS: "0"(300a, 0112) Control Point Index IS: "1"(300a, 02d2) Control Point Relative Position DS: "3.5"(300a, 02d4) Control Point 3D Position DS: ['12.778672628366', '-14.486222523329', '1.80953906063796'](300a, 02d6) Cumulative Time Weight DS: "0"(300a, 0112) Control Point Index IS: "2"(300a, 02d2) Control Point Relative Position DS: "8.5"(300a, 02d4) Control Point 3D Position DS: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'](300a, 02d6) Cumulative Time Weight DS: "0"(300a, 0112) Control Point Index IS: "3"(300a, 02d2) Control Point Relative Position DS: "8.5"(300a, 02d4) Control Point 3D Position DS: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'](300a, 02d6) Cumulative Time Weight DS: "0"...(300a, 0112) Control Point Index IS: "36"(300a, 02d2) Control Point Relative Position DS: "93.5"(300a, 02d4) Control Point 3D Position DS: ['10.2148951530921', '1.56925224729876', '-86.709660974987'](300a, 02d6) Cumulative Time Weight DS: "0"(300a, 0112) Control Point Index IS: "37"(300a, 02d2) Control Point Relative Position DS: "93.5"(300a, 02d4) Control Point 3D Position DS: ['10.2148951530921', '1.56925224729876', '-86.709660974987'](300a, 02d6) Cumulative Time Weight DS: "0" ###Code import pydicom import copy rp_info = pydicom.read_file(r'RP_Template/Brachy_RP.1.2.246.352.71.5.417454940236.2063186.20191015164204.dcm') #App = rp_info.ApplicationSetupSequence #ChannelSequence = App[0].ChannelSequence.Number of Control Points print(rp_info.ApplicationSetupSequence[0].ChannelSequence[0].NumberOfControlPoints) #print(rp_info.ApplicationSetupSequence[0].ChannelSequence[0].BrachyControlPointSequence) # We call BrachyControlPointSequence[i] BCPItem here BCPItemTemplate = copy.deepcopy(rp_info.ApplicationSetupSequence[0].ChannelSequence[0].BrachyControlPointSequence[37]) #print(BrachyControlPointSequenceItemTemplate) # # (ControlPointIndex, ControlPointRelativePosition, ControlPoint3DPosition, CumulativeTimeWeight) # (0, 3.5, [,,], 0 ) # (0, ) #for idx, cp in enumerate(ChannelSequence[0].BrachyControlPointSequence): print(rp_info) ###Output 38 (0008, 0005) Specific Character Set CS: 'ISO_IR 192' (0008, 0012) Instance Creation Date DA: '20191015' (0008, 0013) Instance Creation Time TM: '164400.648000' (0008, 0016) SOP Class UID UI: RT Plan Storage (0008, 0018) SOP Instance UID UI: 1.2.246.352.71.5.417454940236.2063186.20191015164204 (0008, 0020) Study Date DA: '20191015' (0008, 0030) Study Time TM: '103307.760000' (0008, 0050) Accession Number SH: '2019-10-15-10:36' (0008, 0060) Modality CS: 'RTPLAN' (0008, 0070) Manufacturer LO: 'Varian Medical Systems' (0008, 0090) Referring Physician's Name PN: '' (0008, 1010) Station Name SH: 'ARIA136SQL' (0008, 1030) Study Description LO: 'Pelvis^3_IC (Adult)' (0008, 103e) Series Description LO: 'ARIA RadOnc Plans' (0008, 1048) Physician(s) of Record PN: 'Chen^Shang-Wen' (0008, 1070) Operators' Name PN: 'cylin' (0008, 1090) Manufacturer's Model Name LO: 'ARIA RadOnc' (0010, 0010) Patient's Name PN: '李吳雲霞' (0010, 0020) Patient ID LO: '21328949' (0010, 0030) Patient's Birth Date DA: '19620209' (0010, 0032) Patient's Birth Time TM: '000000' (0010, 0040) Patient's Sex CS: 'F' (0018, 1000) Device Serial Number LO: '417454940236' (0018, 1020) Software Version(s) LO: '13.6.32' (0020, 000d) Study Instance UID UI: 1.3.12.2.1107.5.1.4.95999.30000019101500111951200000013 (0020, 000e) Series Instance UID UI: 1.2.246.352.71.2.417454940236.4237016.20191015110315 (0020, 0010) Study ID SH: '3' (0020, 0011) Series Number IS: "5" (0020, 0052) Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191015001123084000000210 (0020, 1040) Position Reference Indicator LO: '' (300a, 0002) RT Plan Label SH: 'study' (300a, 0006) RT Plan Date DA: '20191015' (300a, 0007) RT Plan Time TM: '164203.870000' (300a, 000a) Plan Intent CS: 'CURATIVE' (300a, 000c) RT Plan Geometry CS: 'PATIENT' (300a, 0010) Dose Reference Sequence 6 item(s) ---- (300a, 0012) Dose Reference Number IS: "1" (300a, 0013) Dose Reference UID UI: 1.2.246.352.71.10.417454940236.354700.20191008143159 (300a, 0014) Dose Reference Structure Type CS: 'SITE' (300a, 0016) Dose Reference Description LO: 'HR-CTV' (300a, 0020) Dose Reference Type CS: 'TARGET' (300a, 0026) Target Prescription Dose DS: "0" (3267, 0010) Private Creator LO: 'Varian Medical Systems VISION 3267' (3267, 1000) Private tag data UN: b'HR-CTV' --------- (300a, 0012) Dose Reference Number IS: "2" (300a, 0013) Dose Reference UID UI: 1.2.246.352.71.10.417454940236.355220.20191015111924 (300a, 0014) Dose Reference Structure Type CS: 'COORDINATES' (300a, 0016) Dose Reference Description LO: 'AR_2' (300a, 0018) Dose Reference Point Coordinates DS: ['-8.0784414610604', '-10.196736254077', '-23.46089130785'] (300a, 0020) Dose Reference Type CS: 'TARGET' (300a, 0026) Target Prescription Dose DS: "0" (3267, 0010) Private Creator LO: 'Varian Medical Systems VISION 3267' (3267, 1000) Private tag data UN: b'HR-CTV' --------- (300a, 0012) Dose Reference Number IS: "3" (300a, 0013) Dose Reference UID UI: 1.2.246.352.71.10.417454940236.355222.20191015111924 (300a, 0014) Dose Reference Structure Type CS: 'COORDINATES' (300a, 0016) Dose Reference Description LO: 'AL_2' (300a, 0018) Dose Reference Point Coordinates DS: ['31.99110497502', '-9.8928102849892', '-24.740952681574'] (300a, 0020) Dose Reference Type CS: 'TARGET' (300a, 0026) Target Prescription Dose DS: "0" (3267, 0010) Private Creator LO: 'Varian Medical Systems VISION 3267' (3267, 1000) Private tag data UN: b'HR-CTV' --------- (300a, 0012) Dose Reference Number IS: "4" (300a, 0013) Dose Reference UID UI: 1.2.246.352.71.10.417454940236.355223.20191015111924 (300a, 0014) Dose Reference Structure Type CS: 'COORDINATES' (300a, 0016) Dose Reference Description LO: 'OS_2' (300a, 0018) Dose Reference Point Coordinates DS: ['11.4215277961272', '-8.1896739341043', '-43.896764054375'] (300a, 0020) Dose Reference Type CS: 'TARGET' (300a, 0026) Target Prescription Dose DS: "0" (3267, 0010) Private Creator LO: 'Varian Medical Systems VISION 3267' (3267, 1000) Private tag data UN: b'HR-CTV' --------- (300a, 0012) Dose Reference Number IS: "5" (300a, 0013) Dose Reference UID UI: 1.2.246.352.71.10.417454940236.355229.20191015111924 (300a, 0014) Dose Reference Structure Type CS: 'COORDINATES' (300a, 0016) Dose Reference Description LO: 'B_2' (300a, 0018) Dose Reference Point Coordinates DS: ['1.61651714611621', '-26.976522041304', '-45.72586290669'] (300a, 0020) Dose Reference Type CS: 'TARGET' (300a, 0026) Target Prescription Dose DS: "0" (3267, 0010) Private Creator LO: 'Varian Medical Systems VISION 3267' (3267, 1000) Private tag data UN: b'HR-CTV' --------- (300a, 0012) Dose Reference Number IS: "6" (300a, 0013) Dose Reference UID UI: 1.2.246.352.71.10.417454940236.355231.20191015111924 (300a, 0014) Dose Reference Structure Type CS: 'COORDINATES' (300a, 0016) Dose Reference Description LO: 'R_2' (300a, 0018) Dose Reference Point Coordinates DS: ['12.2113226408658', '11.0195204419177', '-46.566891671951'] (300a, 0020) Dose Reference Type CS: 'TARGET' (300a, 0026) Target Prescription Dose DS: "0" (3267, 0010) Private Creator LO: 'Varian Medical Systems VISION 3267' (3267, 1000) Private tag data UN: b'HR-CTV' --------- (300a, 0070) Fraction Group Sequence 1 item(s) ---- (300a, 0071) Fraction Group Number IS: "1" (300a, 0078) Number of Fractions Planned IS: "1" (300a, 0080) Number of Beams IS: "0" (300a, 00a0) Number of Brachy Application Setups IS: "1" (300c, 000a) Referenced Brachy Application Setup Sequence 1 item(s) ---- (300c, 000c) Referenced Brachy Application Setup IS: "1" (3249, 0010) Private Creator LO: 'Varian Medical Systems VISION 3249' (3249, 1010) Private tag data UN: Array of 52 elements --------- --------- (300a, 0200) Brachy Treatment Technique CS: 'INTRACAVITARY' (300a, 0202) Brachy Treatment Type CS: 'HDR' (300a, 0206) Treatment Machine Sequence 1 item(s) ---- (0008, 0070) Manufacturer LO: 'Varian Medical Systems' (0008, 0080) Institution Name LO: 'China Medical University Hospital' (0008, 0081) Institution Address ST: 'Taichung, Taiwan' (0008, 1090) Manufacturer's Model Name LO: 'GammaMedPlus' (0018, 1000) Device Serial Number LO: '000841' (300a, 00b2) Treatment Machine Name SH: 'GammaMed' --------- (300a, 0210) Source Sequence 1 item(s) ---- (300a, 0212) Source Number IS: "1" (300a, 0214) Source Type CS: 'LINE' (300a, 0216) Source Manufacturer LO: 'Varian Medical Systems' (300a, 0218) Active Source Diameter DS: "1" (300a, 021a) Active Source Length DS: "3.5" (300a, 0226) Source Isotope Name LO: 'GammaMed Plus HDR source 0.9 mm' (300a, 0228) Source Isotope Half Life DS: "73.83" (300a, 022a) Reference Air Kerma Rate DS: "40700" (300a, 022c) Source Strength Reference Date DA: '20191015' (300a, 022e) Source Strength Reference Time TM: '000000' --------- (300a, 0230) Application Setup Sequence 1 item(s) ---- (300a, 0232) Application Setup Type CS: 'HENSCHKE' (300a, 0234) Application Setup Number IS: "1" (300a, 0236) Application Setup Name LO: 'study' (300a, 0238) Application Setup Manufacturer LO: '' (300a, 0250) Total Reference Air Kerma DS: "0" (300a, 0280) Channel Sequence 3 item(s) ---- (3006, 0084) Referenced ROI Number IS: "18" (300a, 0110) Number of Control Points IS: "38" (300a, 0282) Channel Number IS: "1" (300a, 0284) Channel Length DS: "1300" (300a, 0286) Channel Total Time DS: "0" (300a, 0288) Source Movement Type CS: 'STEPWISE' (300a, 0290) Source Applicator Number IS: "1" (300a, 0291) Source Applicator ID SH: 'Tandom_2' (300a, 0292) Source Applicator Type CS: 'RIGID' (300a, 0294) Source Applicator Name LO: '' (300a, 0296) Source Applicator Length DS: "1300" (300a, 02a0) Source Applicator Step Size DS: "5" (300a, 02a2) Transfer Tube Number IS: '' (300a, 02c8) Final Cumulative Time Weight DS: "0" (300a, 02d0) Brachy Control Point Sequence 38 item(s) ---- (300a, 0112) Control Point Index IS: "0" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['12.778672628366', '-14.486222523329', '1.80953906063796'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "1" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['12.778672628366', '-14.486222523329', '1.80953906063796'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "2" (300a, 02d2) Control Point Relative Position DS: "8.5" (300a, 02d4) Control Point 3D Position DS: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "3" (300a, 02d2) Control Point Relative Position DS: "8.5" (300a, 02d4) Control Point 3D Position DS: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "4" (300a, 02d2) Control Point Relative Position DS: "13.5" (300a, 02d4) Control Point 3D Position DS: ['12.4938084644467', '-12.702280882149', '-8.025927609987'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "5" (300a, 02d2) Control Point Relative Position DS: "13.5" (300a, 02d4) Control Point 3D Position DS: ['12.4938084644467', '-12.702280882149', '-8.025927609987'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "6" (300a, 02d2) Control Point Relative Position DS: "18.5" (300a, 02d4) Control Point 3D Position DS: ['12.351376382487', '-11.810310061558', '-12.9436609453'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "7" (300a, 02d2) Control Point Relative Position DS: "18.5" (300a, 02d4) Control Point 3D Position DS: ['12.351376382487', '-11.810310061558', '-12.9436609453'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "8" (300a, 02d2) Control Point Relative Position DS: "23.5" (300a, 02d4) Control Point 3D Position DS: ['12.2089443005273', '-10.918339240968', '-17.861394280612'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "9" (300a, 02d2) Control Point Relative Position DS: "23.5" (300a, 02d4) Control Point 3D Position DS: ['12.2089443005273', '-10.918339240968', '-17.861394280612'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "10" (300a, 02d2) Control Point Relative Position DS: "28.5" (300a, 02d4) Control Point 3D Position DS: ['12.0665122185677', '-10.026368420377', '-22.779127615925'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "11" (300a, 02d2) Control Point Relative Position DS: "28.5" (300a, 02d4) Control Point 3D Position DS: ['12.0665122185677', '-10.026368420377', '-22.779127615925'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "12" (300a, 02d2) Control Point Relative Position DS: "33.5" (300a, 02d4) Control Point 3D Position DS: ['11.924080136608', '-9.1343975997867', '-27.696860951237'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "13" (300a, 02d2) Control Point Relative Position DS: "33.5" (300a, 02d4) Control Point 3D Position DS: ['11.924080136608', '-9.1343975997867', '-27.696860951237'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "14" (300a, 02d2) Control Point Relative Position DS: "38.5" (300a, 02d4) Control Point 3D Position DS: ['11.7816480546484', '-8.2424267791963', '-32.61459428655'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "15" (300a, 02d2) Control Point Relative Position DS: "38.5" (300a, 02d4) Control Point 3D Position DS: ['11.7816480546484', '-8.2424267791963', '-32.61459428655'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "16" (300a, 02d2) Control Point Relative Position DS: "43.5" (300a, 02d4) Control Point 3D Position DS: ['11.6392159726887', '-7.3504559586058', '-37.532327621862'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "17" (300a, 02d2) Control Point Relative Position DS: "43.5" (300a, 02d4) Control Point 3D Position DS: ['11.6392159726887', '-7.3504559586058', '-37.532327621862'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "18" (300a, 02d2) Control Point Relative Position DS: "48.5" (300a, 02d4) Control Point 3D Position DS: ['11.496783890729', '-6.4584851380154', '-42.450060957175'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "19" (300a, 02d2) Control Point Relative Position DS: "48.5" (300a, 02d4) Control Point 3D Position DS: ['11.496783890729', '-6.4584851380154', '-42.450060957175'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "20" (300a, 02d2) Control Point Relative Position DS: "53.5" (300a, 02d4) Control Point 3D Position DS: ['11.3543518087694', '-5.5665143174249', '-47.367794292487'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "21" (300a, 02d2) Control Point Relative Position DS: "53.5" (300a, 02d4) Control Point 3D Position DS: ['11.3543518087694', '-5.5665143174249', '-47.367794292487'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "22" (300a, 02d2) Control Point Relative Position DS: "58.5" (300a, 02d4) Control Point 3D Position DS: ['11.2119197268097', '-4.6745434968344', '-52.2855276278'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "23" (300a, 02d2) Control Point Relative Position DS: "58.5" (300a, 02d4) Control Point 3D Position DS: ['11.2119197268097', '-4.6745434968344', '-52.2855276278'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "24" (300a, 02d2) Control Point Relative Position DS: "63.5" (300a, 02d4) Control Point 3D Position DS: ['11.0694876448501', '-3.782572676244', '-57.203260963112'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "25" (300a, 02d2) Control Point Relative Position DS: "63.5" (300a, 02d4) Control Point 3D Position DS: ['11.0694876448501', '-3.782572676244', '-57.203260963112'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "26" (300a, 02d2) Control Point Relative Position DS: "68.5" (300a, 02d4) Control Point 3D Position DS: ['10.9270555628904', '-2.8906018556535', '-62.120994298425'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "27" (300a, 02d2) Control Point Relative Position DS: "68.5" (300a, 02d4) Control Point 3D Position DS: ['10.9270555628904', '-2.8906018556535', '-62.120994298425'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "28" (300a, 02d2) Control Point Relative Position DS: "73.5" (300a, 02d4) Control Point 3D Position DS: ['10.7846234809307', '-1.9986310350631', '-67.038727633737'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "29" (300a, 02d2) Control Point Relative Position DS: "73.5" (300a, 02d4) Control Point 3D Position DS: ['10.7846234809307', '-1.9986310350631', '-67.038727633737'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "30" (300a, 02d2) Control Point Relative Position DS: "78.5" (300a, 02d4) Control Point 3D Position DS: ['10.6421913989711', '-1.1066602144726', '-71.95646096905'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "31" (300a, 02d2) Control Point Relative Position DS: "78.5" (300a, 02d4) Control Point 3D Position DS: ['10.6421913989711', '-1.1066602144726', '-71.95646096905'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "32" (300a, 02d2) Control Point Relative Position DS: "83.5" (300a, 02d4) Control Point 3D Position DS: ['10.4997593170114', '-2.1468939e-1', '-76.874194304362'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "33" (300a, 02d2) Control Point Relative Position DS: "83.5" (300a, 02d4) Control Point 3D Position DS: ['10.4997593170114', '-2.1468939e-1', '-76.874194304362'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "34" (300a, 02d2) Control Point Relative Position DS: "88.5" (300a, 02d4) Control Point 3D Position DS: ['10.3573272350518', '6.7728143e-1', '-81.791927639675'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "35" (300a, 02d2) Control Point Relative Position DS: "88.5" (300a, 02d4) Control Point 3D Position DS: ['10.3573272350518', '6.7728143e-1', '-81.791927639675'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "36" (300a, 02d2) Control Point Relative Position DS: "93.5" (300a, 02d4) Control Point 3D Position DS: ['10.2148951530921', '1.56925224729876', '-86.709660974987'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "37" (300a, 02d2) Control Point Relative Position DS: "93.5" (300a, 02d4) Control Point 3D Position DS: ['10.2148951530921', '1.56925224729876', '-86.709660974987'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300c, 000e) Referenced Source Number IS: "1" --------- (3006, 0084) Referenced ROI Number IS: "19" (300a, 0110) Number of Control Points IS: "20" (300a, 0282) Channel Number IS: "2" (300a, 0284) Channel Length DS: "1300" (300a, 0286) Channel Total Time DS: "0" (300a, 0288) Source Movement Type CS: 'STEPWISE' (300a, 0290) Source Applicator Number IS: "1" (300a, 0291) Source Applicator ID SH: 'Rt Ovoid_2' (300a, 0292) Source Applicator Type CS: 'RIGID' (300a, 0294) Source Applicator Name LO: '' (300a, 0296) Source Applicator Length DS: "1300" (300a, 02a0) Source Applicator Step Size DS: "5" (300a, 02a2) Transfer Tube Number IS: '' (300a, 02c8) Final Cumulative Time Weight DS: "0" (300a, 02d0) Brachy Control Point Sequence 20 item(s) ---- (300a, 0112) Control Point Index IS: "0" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['1.50142436734442', '-6.1883425260249', '-49.785612454113'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "1" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['1.50142436734442', '-6.1883425260249', '-49.785612454113'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "2" (300a, 02d2) Control Point Relative Position DS: "8.5" (300a, 02d4) Control Point 3D Position DS: ['1.52359412965335', '-5.2951413116253', '-54.705134798023'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "3" (300a, 02d2) Control Point Relative Position DS: "8.5" (300a, 02d4) Control Point 3D Position DS: ['1.52359412965335', '-5.2951413116253', '-54.705134798023'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "4" (300a, 02d2) Control Point Relative Position DS: "13.5" (300a, 02d4) Control Point 3D Position DS: ['1.54576389196228', '-4.4019400972257', '-59.624657141934'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "5" (300a, 02d2) Control Point Relative Position DS: "13.5" (300a, 02d4) Control Point 3D Position DS: ['1.54576389196228', '-4.4019400972257', '-59.624657141934'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "6" (300a, 02d2) Control Point Relative Position DS: "18.5" (300a, 02d4) Control Point 3D Position DS: ['1.56793365427121', '-3.5087388828261', '-64.544179485844'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "7" (300a, 02d2) Control Point Relative Position DS: "18.5" (300a, 02d4) Control Point 3D Position DS: ['1.56793365427121', '-3.5087388828261', '-64.544179485844'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "8" (300a, 02d2) Control Point Relative Position DS: "23.5" (300a, 02d4) Control Point 3D Position DS: ['1.59010341658015', '-2.6155376684265', '-69.463701829755'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "9" (300a, 02d2) Control Point Relative Position DS: "23.5" (300a, 02d4) Control Point 3D Position DS: ['1.59010341658015', '-2.6155376684265', '-69.463701829755'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "10" (300a, 02d2) Control Point Relative Position DS: "28.5" (300a, 02d4) Control Point 3D Position DS: ['1.6122731788891', '-1.7223364540269', '-74.383224173665'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "11" (300a, 02d2) Control Point Relative Position DS: "28.5" (300a, 02d4) Control Point 3D Position DS: ['1.6122731788891', '-1.7223364540269', '-74.383224173665'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "12" (300a, 02d2) Control Point Relative Position DS: "33.5" (300a, 02d4) Control Point 3D Position DS: ['1.63444294119803', '-8.2913524e-1', '-79.302746517576'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "13" (300a, 02d2) Control Point Relative Position DS: "33.5" (300a, 02d4) Control Point 3D Position DS: ['1.63444294119803', '-8.2913524e-1', '-79.302746517576'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "14" (300a, 02d2) Control Point Relative Position DS: "38.5" (300a, 02d4) Control Point 3D Position DS: ['1.65661270350697', '6.4065975e-2', '-84.222268861486'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "15" (300a, 02d2) Control Point Relative Position DS: "38.5" (300a, 02d4) Control Point 3D Position DS: ['1.65661270350697', '6.4065975e-2', '-84.222268861486'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "16" (300a, 02d2) Control Point Relative Position DS: "43.5" (300a, 02d4) Control Point 3D Position DS: ['1.6787824658159', '9.5726719e-1', '-89.141791205396'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "17" (300a, 02d2) Control Point Relative Position DS: "43.5" (300a, 02d4) Control Point 3D Position DS: ['1.6787824658159', '9.5726719e-1', '-89.141791205396'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "18" (300a, 02d2) Control Point Relative Position DS: "48.5" (300a, 02d4) Control Point 3D Position DS: ['1.70095222812483', '1.8504684035715', '-94.061313549307'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "19" (300a, 02d2) Control Point Relative Position DS: "48.5" (300a, 02d4) Control Point 3D Position DS: ['1.70095222812483', '1.8504684035715', '-94.061313549307'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300c, 000e) Referenced Source Number IS: "1" --------- (3006, 0084) Referenced ROI Number IS: "20" (300a, 0110) Number of Control Points IS: "20" (300a, 0282) Channel Number IS: "3" (300a, 0284) Channel Length DS: "1300" (300a, 0286) Channel Total Time DS: "0" (300a, 0288) Source Movement Type CS: 'STEPWISE' (300a, 0290) Source Applicator Number IS: "1" (300a, 0291) Source Applicator ID SH: 'Lt Ovoid_2' (300a, 0292) Source Applicator Type CS: 'RIGID' (300a, 0294) Source Applicator Name LO: '' (300a, 0296) Source Applicator Length DS: "1300" (300a, 02a0) Source Applicator Step Size DS: "5" (300a, 02a2) Transfer Tube Number IS: '' (300a, 02c8) Final Cumulative Time Weight DS: "0" (300a, 02d0) Brachy Control Point Sequence 20 item(s) ---- (300a, 0112) Control Point Index IS: "0" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['24.7136732619426', '-5.0136013141997', '-49.830020617865'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "1" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['24.7136732619426', '-5.0136013141997', '-49.830020617865'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "2" (300a, 02d2) Control Point Relative Position DS: "8.5" (300a, 02d4) Control Point 3D Position DS: ['24.3387986312015', '-4.1076294097673', '-54.732946674783'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "3" (300a, 02d2) Control Point Relative Position DS: "8.5" (300a, 02d4) Control Point 3D Position DS: ['24.3387986312015', '-4.1076294097673', '-54.732946674783'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "4" (300a, 02d2) Control Point Relative Position DS: "13.5" (300a, 02d4) Control Point 3D Position DS: ['23.9639240004605', '-3.2016575053348', '-59.635872731701'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "5" (300a, 02d2) Control Point Relative Position DS: "13.5" (300a, 02d4) Control Point 3D Position DS: ['23.9639240004605', '-3.2016575053348', '-59.635872731701'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "6" (300a, 02d2) Control Point Relative Position DS: "18.5" (300a, 02d4) Control Point 3D Position DS: ['23.5890493697194', '-2.2956856009024', '-64.538798788619'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "7" (300a, 02d2) Control Point Relative Position DS: "18.5" (300a, 02d4) Control Point 3D Position DS: ['23.5890493697194', '-2.2956856009024', '-64.538798788619'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "8" (300a, 02d2) Control Point Relative Position DS: "23.5" (300a, 02d4) Control Point 3D Position DS: ['23.2141747389783', '-1.38971369647', '-69.441724845537'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "9" (300a, 02d2) Control Point Relative Position DS: "23.5" (300a, 02d4) Control Point 3D Position DS: ['23.2141747389783', '-1.38971369647', '-69.441724845537'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "10" (300a, 02d2) Control Point Relative Position DS: "28.5" (300a, 02d4) Control Point 3D Position DS: ['22.8393001082372', '-4.8374179e-1', '-74.344650902455'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "11" (300a, 02d2) Control Point Relative Position DS: "28.5" (300a, 02d4) Control Point 3D Position DS: ['22.8393001082372', '-4.8374179e-1', '-74.344650902455'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "12" (300a, 02d2) Control Point Relative Position DS: "33.5" (300a, 02d4) Control Point 3D Position DS: ['22.4644254774961', '4.2223011e-1', '-79.247576959373'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "13" (300a, 02d2) Control Point Relative Position DS: "33.5" (300a, 02d4) Control Point 3D Position DS: ['22.4644254774961', '4.2223011e-1', '-79.247576959373'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "14" (300a, 02d2) Control Point Relative Position DS: "38.5" (300a, 02d4) Control Point 3D Position DS: ['22.0895508467551', '1.3282020168274', '-84.150503016291'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "15" (300a, 02d2) Control Point Relative Position DS: "38.5" (300a, 02d4) Control Point 3D Position DS: ['22.0895508467551', '1.3282020168274', '-84.150503016291'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "16" (300a, 02d2) Control Point Relative Position DS: "43.5" (300a, 02d4) Control Point 3D Position DS: ['21.714676216014', '2.23417392125984', '-89.053429073209'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "17" (300a, 02d2) Control Point Relative Position DS: "43.5" (300a, 02d4) Control Point 3D Position DS: ['21.714676216014', '2.23417392125984', '-89.053429073209'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "18" (300a, 02d2) Control Point Relative Position DS: "48.5" (300a, 02d4) Control Point 3D Position DS: ['21.3398015852729', '3.14014582569229', '-93.956355130128'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "19" (300a, 02d2) Control Point Relative Position DS: "48.5" (300a, 02d4) Control Point 3D Position DS: ['21.3398015852729', '3.14014582569229', '-93.956355130128'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300c, 000e) Referenced Source Number IS: "1" --------- --------- (300c, 0060) Referenced Structure Set Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: RT Structure Set Storage (0008, 1155) Referenced SOP Instance UID UI: 1.2.246.352.71.4.417454940236.287582.20191015164114 --------- (300e, 0002) Approval Status CS: 'UNAPPROVED' ###Markdown Write related RS Data in RP Tamplate OperatorsName cylin is in template. But Who is the people ? PhysiciansOfRecordJust copy from RS FrameOfReferenceUIDIn RS, ReferencedFrameOfReferenceSequence[0].FrameOfReferenceUID Basic thing copy from RSPhysiciansOfRecord, PatientName, PatientID, PatientBirthDate, PatientBirthTime, PatientSex,DeviceSerialNumber, SoftwareVersions, StudyID, StudyDate, StudyTime, StudyInstanceUID ReferencedStructureSetSequenceIn RP, ReferencedStructureSetSequence[0].ReferencedSOPClassUID = 1.2.840.10008.5.1.4.1.1.481.3 ReferencedStructureSetSequence[0].ReferencedSOPInstanceUID = 1.2.246.352.71.4.417454940236.287582.20191015164114 So, RP ReferencedStructureSetSequence[0].ReferencedSOPClassUID = RS SOPClassUID RP ReferencedStructureSetSequence[0].ReferencedSOPInstanceUID = RS SOPInstanceUID InstanceCreationRS InstanceCreationDate = 20191015 RS InstanceCreationTime = 164359.971000 RP InstanceCreationDate = 20191015 RP InstanceCreationTime = 164400.648000 RP RTPlanDate = 20191015 RP RTPlanTime = 164203.870000 (What if compare to RP InstanceCreationTime ?) RS StudyDate = 20191015 RS StudyTime = 103307.760000 Conclusion, StudyDate, InstanceCreationDate are all the same in RS and RP. RS StudyTime < RP RTPlanTime < RS InstanceCreationTime < RP InstanceCreationTime Implemnet write procedure ###Code import pydicom import os def get_new_uid(old_uid='1.2.246.352.71.5.417454940236.2063186.20191015164204', study_date='20190923'): def gen_6_random_digits(): import random ret_str = "" for i in range(6): ch = chr(random.randrange(ord('0'), ord('9') + 1)) ret_str += ch return ret_str theStudyDate = study_date uid_last_part = uid.split('.')[-1] #20191015164204 first 8 digit is match to StudyDate uid_list = uid.split('.') uid_list[-1] = theStudyDate + gen_6_random_digits() #print(uid_list) new_uid = '.'.join(uid_list) #print(uid) #print(new_uid) return new_uid rp_template_filepath = r'RP_Template/Brachy_RP.1.2.246.352.71.5.417454940236.2063186.20191015164204.dcm' #folder = r'RAL_plan_new_20190905/29059811-1' #folder = r'RP_Template_TestData' folder = r'RALmilo' # Set rs_filepath and ct_filelist, which are INPUT rs_filepath = '' ct_filelist = [] for file in os.listdir(folder): filepath = os.path.join(folder, file) fp = pydicom.read_file(filepath) if (fp.Modality == 'CT'): ct_filelist.append(filepath) elif (fp.Modality == 'RTSTRUCT'): rs_filepath = filepath # Read RS file as input rs_fp = pydicom.read_file(rs_filepath) # read RP tempalte into rp_fp rp_fp = pydicom.read_file(rp_template_filepath) out_rp_filepath = r'out.brachy.rp.dcm' rp_fp.OperatorsName = 'cylin' rp_fp.PhysiciansOfRecord = rs_fp.PhysiciansOfRecord rp_fp.FrameOfReferenceUID = rs_fp.ReferencedFrameOfReferenceSequence[0].FrameOfReferenceUID rp_fp.ReferencedStructureSetSequence[0].ReferencedSOPClassUID = rs_fp.SOPClassUID rp_fp.ReferencedStructureSetSequence[0].ReferencedSOPInstanceUID = rs_fp.SOPInstanceUID directAttrSet = [ 'PhysiciansOfRecord','PatientName', 'PatientID', 'PatientBirthDate', 'PatientBirthTime', 'PatientSex', 'DeviceSerialNumber', 'SoftwareVersions', 'StudyID', 'StudyDate', 'StudyTime', 'StudyInstanceUID'] for attr in directAttrSet: rs_val = getattr(rs_fp, attr) rp_val = getattr(rp_fp, attr) print('attr={}, \n In RS->{} \n In RP->{}'.format(attr, rs_val, rp_val)) val = getattr(rs_fp, attr) setattr(rp_fp, attr, val) new_rp_val = getattr(rp_fp,attr) print('after update, RP->{}\n'.format(new_rp_val)) newSeriesInstanceUID = get_new_uid(old_uid=rp_fp.SeriesInstanceUID, study_date=rp_fp.StudyDate ) newSOPInstanceUID = get_new_uid(old_uid=rp_fp.SOPInstanceUID, study_date=rp_fp.StudyDate) rp_fp.SeriesInstanceUID = newSeriesInstanceUID rp_fp.SOPInstanceUID = newSOPInstanceUID # Change SeriesInstanceUID and SOPInstanceUID # # to satisfy the condition # (1) RS InstanceCreationDate == RP InstanceCreationDate == RP RTPlanDate == RS StudyDate # (2) RS StudyTime < RP RTPlanTime < RS InstanceCreationTime < RP InstanceCreationTime # for (1) rp_fp.InstanceCreationDate = rp_fp.RTPlanDate = rp_fp.StudyDate = rs_fp.StudyDate # for (2) #print('type(rs_fp.StudyTime) = {}'.format(type(rs_fp.StudyTime))) # type is str #print('type(rp_fp.RTPlanTime) = {}'.format(type(rp_fp.RTPlanTime))) # type is str rp_fp.RTPlanTime= str(float(rs_fp.StudyTime) + 0.001) rp_fp.InstanceCreationTime = str(float(rs_fp.InstanceCreationTime) + 0.001) # rp_fp.RTPlanLabel = 'TEST' # tandem_rp_line is made by information in the log of # run_and_make_rp_v02(folder='RP_Template_TestData', out_rp_filepath=r'out.brachy.rp.withpoints.v04.dcm') # So it is tandem lin that for folder = RP_Template_TestData # Case 5mm RP_Template_TestData #tandem_rp_line = [[16.98905170843703, -184.51905170843702, 83.90804591543933], [16.12, -182.59439432365733, 79.43018494834773], [16.12, -180.22625544178416, 75.11276643836932], [15.61, -178.10368144202292, 70.66408125886848], [15.61, -175.89066242062734, 66.1888722377543], [15.1, -174.2040753078662, 61.57480512888706], [14.729683314929039, -172.1736095792839, 57.075705057419384], [14.58, -171.30564601520265, 52.373815716083655], [14.380884282801011, -169.63088428280102, 47.71915405020004], [14.07, -168.40476965158345, 42.91086137875857], [13.499484973334287, -167.15778337751826, 38.26268616993838], [13.56, -166.55221861146777, 33.68517102536384], [13.56, -165.3167540752058, 28.840212059630595], [13.05, -165.23, 23.915099979362182], [13.05, -164.02365616369758, 19.14787604601474], [12.54, -163.69, 14.292746333692945], [12.54, -163.18, 9.356747302684962], [12.03, -162.66623164553909, 4.4852221393690135], [12.03, -161.92495292378192, -0.4217532400709383], [12.03, -161.1751043098456, -5.326521885209219], [11.51, -160.62, -10.192257082467922], [11.51, -159.70766946316976, -15.077766811099007], [11.386390741412567, -159.09, -19.984742190538952], [11.0, -158.58, -24.872252237507816], [10.9244494605092, -158.06, -29.79627662545412], [10.49, -157.25898492424722, -34.64123559118737], [10.477706202490035, -156.53, -39.548210970627316], [9.98, -156.02, -44.421751807061376], [9.98, -155.51, -49.35775083806935], [9.98, -154.99, "-53.5"]] # Case 4mm RP_Template_TestData # tandem_rp_line = [[17.058034408590366, -184.83553835127827, 84.82143369180248], [16.12, -183.04872040645336, 80.32102040481051], [16.12, -180.4733483490366, 76.08175823151598], [15.61, -178.55800752481895, 71.55491671533126], [15.61, -176.28084178170548, 67.10357627515624], [15.110012332139032, -174.46002466427805, 62.53926404760405], [14.955327096954274, -172.61621853633338, 57.94356575751644], [14.789698894470218, -171.5756662234227, 53.30653420950084], [14.58, -169.87276039354526, 48.664463052097126], [14.07, -168.65186255883583, 43.87985317190524], [13.720887458553541, -167.6017749171071, 39.130931210013884], [13.521836263063076, -166.72183626306307, 34.64966171347814], [13.56, -165.56384698245822, 29.809203852777266], [13.05, -165.23, 24.915099979362207], [13.05, -164.48150629881926, 20.036905434600477], [12.735881996305874, -163.69, 15.26816469139559], [12.54, -163.18, 10.356747302684996], [12.273324552791502, -162.67, 5.454213932515696], [12.03, -162.16, 0.5487502096610601], [12.03, -161.42673816161056, -4.3586993784209165], [11.585182773532438, -160.6937369509645, -9.2108354864137], [11.51, -159.95476237042217, -14.108775017952333], [11.51, -159.21348364866498, -19.015750397392278], [11.0, -158.58, -23.87225223750779], [11.0, -158.06, -28.80575762101347], [10.49, -157.50607783149962, -33.6722437980407], [10.49, -156.76479910974243, -38.579219177480645], [9.98, -156.03933457348046, -43.424178143213894], [9.98, -155.51, -48.35775083806933], [9.98, -155.04252700099707, -53.29797307308827], [9.98, -154.99, "-53.5"]] # Case 4mm in RALmilo tandem_rp_line = [[-17.364760943476806, -137.50531764716507, 34.96922066277659], [-14.507295389260364, -138.96776102631532, 31.484506572268735], [-12.121197670078443, -141.8932334511995, 28.21556896195182], [-10.352067011648948, -143.68568033036905, 24.037129266612148], [-7.331626111466163, -145.72837388853384, 20.853412458988373], [-6.192040032243996, -146.9090550806545, 16.24906129029387], [-3.8038198803606917, -149.00737558337042, 12.431487658976454], [-2.747132679885409, -150.3128673201146, 7.870099029955784], [-1.0192642215390362, -151.67989966408996, 3.4381270994378488], [0.49, -153.63443097414736, -0.5117028736203919], [0.9360385068944075, -154.3260385068944, -5.351631839073963], [1.5000764344945428, -155.5407038230725, -10.125477715590891], [2.2818169860284883, -156.6618169860285, -14.920102945627201], [2.759779615212831, -157.45977961521285, -19.816846152805034], [2.9117725339140765, -158.2717725339141, -24.73801535705501], [2.79, -158.8, -29.651967982556688], [2.79, -159.55733472608358, -34.589907430809525], [2.79, -160.04573107506317, -39.549885303413205], [2.7110532270910443, -160.84894677290896, -44.478465290357306], [2.46, -161.33403799047878, -49.41841206350765], [2.5801821905215543, -161.76, -54.36418845612592], [3.12, -161.76, "-56"]] import pydicom import copy # = pydicom.read_file(r'RP_Template/Brachy_RP.1.2.246.352.71.5.417454940236.2063186.20191015164204.dcm') #print(rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].NumberOfControlPoints) rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].NumberOfControlPoints = len(tandem_rp_line) BCPItemTemplate = copy.deepcopy(rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].BrachyControlPointSequence[0]) print('BCPItemTemplate = {}'.format(BCPItemTemplate)) rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].BrachyControlPointSequence.clear() # Clean Dose Reference rp_fp. DoseReferenceSequence.clear() # Start to make data style like these """ (300a, 0112) Control Point Index IS: "0" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['12.778672628366', '-14.486222523329', '1.80953906063796'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "1" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['12.778672628366', '-14.486222523329', '1.80953906063796'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "2" (300a, 02d2) Control Point Relative Position DS: "8.5" (300a, 02d4) Control Point 3D Position DS: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "3" (300a, 02d2) Control Point Relative Position DS: "8.5" (300a, 02d4) Control Point 3D Position DS: ['12.6362405464063', '-13.594251702739', '-3.1081942746745'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "4" (300a, 02d2) Control Point Relative Position DS: "13.5" (300a, 02d4) Control Point 3D Position DS: ['12.4938084644467', '-12.702280882149', '-8.025927609987'] (300a, 02d6) Cumulative Time Weight DS: "0" --------- (300a, 0112) Control Point Index IS: "5" (300a, 02d2) Control Point Relative Position DS: "13.5" (300a, 02d4) Control Point 3D Position DS: ['12.4938084644467', '-12.702280882149', '-8.025927609987'] (300a, 02d6) Cumulative Time Weight DS: "0" """ for idx, pt in enumerate(tandem_rp_line): BCPPt = copy.deepcopy(BCPItemTemplate) BCPPt.ControlPointRelativePosition = 3.5 + idx*5 BCPPt.ControlPoint3DPosition[0] = pt[0] BCPPt.ControlPoint3DPosition[1] = pt[1] BCPPt.ControlPoint3DPosition[2] = pt[2] BCPStartPt = copy.deepcopy(BCPPt) BCPEndPt = copy.deepcopy(BCPPt) BCPStartPt.ControlPointIndex = 2*idx BCPEndPt.ControlPointIndex = 2*idx + 1 rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].BrachyControlPointSequence.append(BCPStartPt) rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].BrachyControlPointSequence.append(BCPEndPt) # Change ROINumber of RP_Template_TestData RS into output RP output file #rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].ReferencedROINumber = 15 #rp_fp.ApplicationSetupSequence[0].ChannelSequence[1].ReferencedROINumber = 16 #rp_fp.ApplicationSetupSequence[0].ChannelSequence[2].ReferencedROINumber = 17 rp_fp.ApplicationSetupSequence[0].ChannelSequence[0].ReferencedROINumber = 21 rp_fp.ApplicationSetupSequence[0].ChannelSequence[1].ReferencedROINumber = 22 rp_fp.ApplicationSetupSequence[0].ChannelSequence[2].ReferencedROINumber = 23 #print(BrachyControlPointSequenceItemTemplate) # # (ControlPointIndex, ControlPointRelativePosition, ControlPoint3DPosition, CumulativeTimeWeight) # (0, 3.5, [,,], 0 ) # (0, ) #for idx, cp in enumerate(ChannelSequence[0].BrachyControlPointSequence): pydicom.write_file(out_rp_filepath, rp_fp) ###Output attr=PhysiciansOfRecord, In RS->Chen^Shang-Wen In RP->Chen^Shang-Wen after update, RP->Chen^Shang-Wen attr=PatientName, In RS->SUNARTI In RP->李吳雲霞 after update, RP->SUNARTI attr=PatientID, In RS->34982640 In RP->21328949 after update, RP->34982640 attr=PatientBirthDate, In RS->19761011 In RP->19620209 after update, RP->19761011 attr=PatientBirthTime, In RS->000000 In RP->000000 after update, RP->000000 attr=PatientSex, In RS->F In RP->F after update, RP->F attr=DeviceSerialNumber, In RS->417454940236 In RP->417454940236 after update, RP->417454940236 attr=SoftwareVersions, In RS->13.6.32 In RP->13.6.32 after update, RP->13.6.32 attr=StudyID, In RS->3 In RP->3 after update, RP->3 attr=StudyDate, In RS->20190423 In RP->20191015 after update, RP->20190423 attr=StudyTime, In RS->095710.840000 In RP->103307.760000 after update, RP->095710.840000 attr=StudyInstanceUID, In RS->1.3.12.2.1107.5.1.4.95999.30000019042300043162800000016 In RP->1.3.12.2.1107.5.1.4.95999.30000019101500111951200000013 after update, RP->1.3.12.2.1107.5.1.4.95999.30000019042300043162800000016 BCPItemTemplate = (300a, 0112) Control Point Index IS: "0" (300a, 02d2) Control Point Relative Position DS: "3.5" (300a, 02d4) Control Point 3D Position DS: ['12.778672628366', '-14.486222523329', '1.80953906063796'] (300a, 02d6) Cumulative Time Weight DS: "0" ###Markdown Check the data in output RP filepath ###Code rp_fp = pydicom.read_file(out_rp_filepath) print(rp_fp) #print(rp_fp.ApplicationSetupSequence[0].ChannelSequence[0]) print(rs_fp) def get_new_uid(old_uid='1.2.246.352.71.5.417454940236.2063186.20191015164204', study_date='20190923'): def gen_6_random_digits(): import random ret_str = "" for i in range(6): ch = chr(random.randrange(ord('0'), ord('9') + 1)) ret_str += ch return ret_str theStudyDate = study_date uid_last_part = uid.split('.')[-1] #20191015164204 first 8 digit is match to StudyDate uid_list = uid.split('.') uid_list[-1] = theStudyDate + gen_6_random_digits() #print(uid_list) new_uid = '.'.join(uid_list) #print(uid) #print(new_uid) return new_uid rp_fp = pydicom.read_file('brachy.rp.dcm') print(rp_fp) #rs_fp = pydicom.read_file(r'16568131/RS.1.2.246.352.71.4.417454940236.288142.20191018151404.dcm') rs_fp = pydicom.read_file(r'24460566/RS.1.2.246.352.71.4.417454940236.288187.20191018155523.dcm') print(rs_fp) ###Output (0008, 0005) Specific Character Set CS: 'ISO_IR 192' (0008, 0012) Instance Creation Date DA: '20191018' (0008, 0013) Instance Creation Time TM: '155540.373000' (0008, 0016) SOP Class UID UI: RT Structure Set Storage (0008, 0018) SOP Instance UID UI: 1.2.246.352.71.4.417454940236.288187.20191018155523 (0008, 0020) Study Date DA: '20191008' (0008, 0030) Study Time TM: '092849.803000' (0008, 0050) Accession Number SH: '2019-10-08-09:42' (0008, 0060) Modality CS: 'RTSTRUCT' (0008, 0070) Manufacturer LO: 'Varian Medical Systems' (0008, 0090) Referring Physician's Name PN: '' (0008, 1010) Station Name SH: 'ARIA136SQL' (0008, 1030) Study Description LO: 'Pelvis^3_IC (Adult)' (0008, 103e) Series Description LO: 'ARIA RadOnc Structure Sets' (0008, 1048) Physician(s) of Record PN: 'Chen^Shang-Wen' (0008, 1090) Manufacturer's Model Name LO: 'ARIA RadOnc' (0010, 0010) Patient's Name PN: '吳慧萍' (0010, 0020) Patient ID LO: '24460566' (0010, 0030) Patient's Birth Date DA: '19730628' (0010, 0032) Patient's Birth Time TM: '000000' (0010, 0040) Patient's Sex CS: 'F' (0018, 1000) Device Serial Number LO: '417454940236' (0018, 1020) Software Version(s) LO: '13.6.32' (0020, 000d) Study Instance UID UI: 1.3.12.2.1107.5.1.4.95999.30000019100800091392300000010 (0020, 000e) Series Instance UID UI: 1.2.246.352.71.2.417454940236.4230106.20191008094400 (0020, 0010) Study ID SH: '19482' (0020, 0011) Series Number IS: "8" (0020, 0013) Instance Number IS: "2" (3006, 0002) Structure Set Label SH: 'RALCT_20191009' (3006, 0006) Structure Set Description ST: '|AcurosXB-13.5' (3006, 0008) Structure Set Date DA: '20191018' (3006, 0009) Structure Set Time TM: '155523.132000' (3006, 0010) Referenced Frame of Reference Sequence 1 item(s) ---- (0020, 0052) Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0012) RT Referenced Study Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: Study Component Management SOP Class (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.30000019100800091392300000010 (3006, 0014) RT Referenced Series Sequence 1 item(s) ---- (0020, 000e) Series Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020790 (3006, 0016) Contour Image Sequence 94 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020800 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020810 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020820 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020830 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020840 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020850 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020860 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020870 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020880 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020890 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020900 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020910 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020920 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020930 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020940 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020950 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020960 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020970 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020980 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020990 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021000 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021010 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021020 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021030 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021040 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021050 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021060 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021070 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021080 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021090 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021100 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021110 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021120 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021130 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021140 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021150 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021160 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021170 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021180 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021190 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021200 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021210 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021220 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021230 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021240 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021420 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021430 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021440 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021450 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021460 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021470 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021480 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021490 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021500 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021510 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021520 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021530 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021540 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021550 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021560 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021570 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021580 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021590 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021600 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021610 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021620 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021630 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021640 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021650 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021660 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021670 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021680 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021690 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021700 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021710 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021720 --------- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021730 --------- --------- --------- --------- (3006, 0020) Structure Set ROI Sequence 19 item(s) ---- (3006, 0022) ROI Number IS: "1" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'BODY' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "2" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Gause' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "3" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Bladder' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "4" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Foley' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "5" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Adjacent_bowel' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "6" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'GTV' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "7" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'HR-CTV' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "8" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'MRI-based GTV' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "9" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'R_GTV' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "10" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'R_HR-CTV' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "11" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Rectum' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "12" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Sigmoid_colon' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "13" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Spinal_cord' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "14" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Uterus' (3006, 0036) ROI Generation Algorithm CS: 'MANUAL' --------- (3006, 0022) ROI Number IS: "15" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Applicator1' (3006, 0036) ROI Generation Algorithm CS: '' --------- (3006, 0022) ROI Number IS: "16" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Applicator2' (3006, 0036) ROI Generation Algorithm CS: '' --------- (3006, 0022) ROI Number IS: "17" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Applicator3' (3006, 0036) ROI Generation Algorithm CS: '' --------- (3006, 0022) ROI Number IS: "18" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Applicator4' (3006, 0036) ROI Generation Algorithm CS: '' --------- (3006, 0022) ROI Number IS: "19" (3006, 0024) Referenced Frame of Reference UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008000917323000000170 (3006, 0026) ROI Name LO: 'Applicator5' (3006, 0036) ROI Generation Algorithm CS: '' --------- (3006, 0039) ROI Contour Sequence 19 item(s) ---- (3006, 002a) ROI Display Color IS: ['0', '255', '0'] (3006, 0040) Contour Sequence 94 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021730 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "950" (3006, 0050) Contour Data DS: Array of 2850 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021720 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "948" (3006, 0050) Contour Data DS: Array of 2844 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021710 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "956" (3006, 0050) Contour Data DS: Array of 2868 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021700 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "956" (3006, 0050) Contour Data DS: Array of 2868 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021690 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "952" (3006, 0050) Contour Data DS: Array of 2856 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021680 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "954" (3006, 0050) Contour Data DS: Array of 2862 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021670 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "956" (3006, 0050) Contour Data DS: Array of 2868 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021660 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "962" (3006, 0050) Contour Data DS: Array of 2886 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021650 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "958" (3006, 0050) Contour Data DS: Array of 2874 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021640 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "960" (3006, 0050) Contour Data DS: Array of 2880 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021630 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "960" (3006, 0050) Contour Data DS: Array of 2880 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021620 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "958" (3006, 0050) Contour Data DS: Array of 2874 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021610 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "954" (3006, 0050) Contour Data DS: Array of 2862 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021600 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "962" (3006, 0050) Contour Data DS: Array of 2886 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021590 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "958" (3006, 0050) Contour Data DS: Array of 2874 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021580 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "958" (3006, 0050) Contour Data DS: Array of 2874 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021570 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "958" (3006, 0050) Contour Data DS: Array of 2874 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021560 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "956" (3006, 0050) Contour Data DS: Array of 2868 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021550 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "952" (3006, 0050) Contour Data DS: Array of 2856 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021540 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "950" (3006, 0050) Contour Data DS: Array of 2850 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021530 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "948" (3006, 0050) Contour Data DS: Array of 2844 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021520 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "948" (3006, 0050) Contour Data DS: Array of 2844 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021510 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "938" (3006, 0050) Contour Data DS: Array of 2814 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021500 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "934" (3006, 0050) Contour Data DS: Array of 2802 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021490 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "926" (3006, 0050) Contour Data DS: Array of 2778 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021480 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "926" (3006, 0050) Contour Data DS: Array of 2778 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021470 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "928" (3006, 0050) Contour Data DS: Array of 2784 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021460 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "926" (3006, 0050) Contour Data DS: Array of 2778 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021450 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "924" (3006, 0050) Contour Data DS: Array of 2772 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021440 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "928" (3006, 0050) Contour Data DS: Array of 2784 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021430 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "930" (3006, 0050) Contour Data DS: Array of 2790 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021420 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "930" (3006, 0050) Contour Data DS: Array of 2790 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "928" (3006, 0050) Contour Data DS: Array of 2784 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "930" (3006, 0050) Contour Data DS: Array of 2790 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "932" (3006, 0050) Contour Data DS: Array of 2796 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "926" (3006, 0050) Contour Data DS: Array of 2778 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "926" (3006, 0050) Contour Data DS: Array of 2778 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "928" (3006, 0050) Contour Data DS: Array of 2784 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "932" (3006, 0050) Contour Data DS: Array of 2796 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "934" (3006, 0050) Contour Data DS: Array of 2802 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "930" (3006, 0050) Contour Data DS: Array of 2790 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "938" (3006, 0050) Contour Data DS: Array of 2814 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "934" (3006, 0050) Contour Data DS: Array of 2802 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "936" (3006, 0050) Contour Data DS: Array of 2808 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "936" (3006, 0050) Contour Data DS: Array of 2808 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "934" (3006, 0050) Contour Data DS: Array of 2802 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "938" (3006, 0050) Contour Data DS: Array of 2814 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "942" (3006, 0050) Contour Data DS: Array of 2826 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "942" (3006, 0050) Contour Data DS: Array of 2826 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021240 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "936" (3006, 0050) Contour Data DS: Array of 2808 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021230 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "944" (3006, 0050) Contour Data DS: Array of 2832 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021220 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "940" (3006, 0050) Contour Data DS: Array of 2820 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021210 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "940" (3006, 0050) Contour Data DS: Array of 2820 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021200 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "940" (3006, 0050) Contour Data DS: Array of 2820 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021190 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "944" (3006, 0050) Contour Data DS: Array of 2832 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021180 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "936" (3006, 0050) Contour Data DS: Array of 2808 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021170 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "936" (3006, 0050) Contour Data DS: Array of 2808 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021160 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "926" (3006, 0050) Contour Data DS: Array of 2778 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021150 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "924" (3006, 0050) Contour Data DS: Array of 2772 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021140 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "918" (3006, 0050) Contour Data DS: Array of 2754 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021130 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "916" (3006, 0050) Contour Data DS: Array of 2748 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021120 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "914" (3006, 0050) Contour Data DS: Array of 2742 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021110 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "912" (3006, 0050) Contour Data DS: Array of 2736 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021100 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "904" (3006, 0050) Contour Data DS: Array of 2712 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021090 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "898" (3006, 0050) Contour Data DS: Array of 2694 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021080 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "896" (3006, 0050) Contour Data DS: Array of 2688 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021070 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "890" (3006, 0050) Contour Data DS: Array of 2670 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021060 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "888" (3006, 0050) Contour Data DS: Array of 2664 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021050 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "884" (3006, 0050) Contour Data DS: Array of 2652 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021040 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "880" (3006, 0050) Contour Data DS: Array of 2640 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021030 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "884" (3006, 0050) Contour Data DS: Array of 2652 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021020 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "880" (3006, 0050) Contour Data DS: Array of 2640 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021010 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "880" (3006, 0050) Contour Data DS: Array of 2640 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021000 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "876" (3006, 0050) Contour Data DS: Array of 2628 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020990 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "880" (3006, 0050) Contour Data DS: Array of 2640 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020980 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "880" (3006, 0050) Contour Data DS: Array of 2640 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020970 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "878" (3006, 0050) Contour Data DS: Array of 2634 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020960 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "880" (3006, 0050) Contour Data DS: Array of 2640 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020950 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "878" (3006, 0050) Contour Data DS: Array of 2634 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020940 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "876" (3006, 0050) Contour Data DS: Array of 2628 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020930 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "876" (3006, 0050) Contour Data DS: Array of 2628 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020920 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "876" (3006, 0050) Contour Data DS: Array of 2628 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020910 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "872" (3006, 0050) Contour Data DS: Array of 2616 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020900 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "872" (3006, 0050) Contour Data DS: Array of 2616 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020890 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "870" (3006, 0050) Contour Data DS: Array of 2610 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020880 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "870" (3006, 0050) Contour Data DS: Array of 2610 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020870 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "870" (3006, 0050) Contour Data DS: Array of 2610 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020860 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "870" (3006, 0050) Contour Data DS: Array of 2610 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020850 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "870" (3006, 0050) Contour Data DS: Array of 2610 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020840 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "870" (3006, 0050) Contour Data DS: Array of 2610 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020830 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "868" (3006, 0050) Contour Data DS: Array of 2604 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020820 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "866" (3006, 0050) Contour Data DS: Array of 2598 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020810 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "866" (3006, 0050) Contour Data DS: Array of 2598 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000020800 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "868" (3006, 0050) Contour Data DS: Array of 2604 elements --------- (3006, 0084) Referenced ROI Number IS: "1" --------- (3006, 002a) ROI Display Color IS: ['0', '64', '128'] (3006, 0040) Contour Sequence 11 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021530 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "82" (3006, 0050) Contour Data DS: Array of 246 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021520 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "86" (3006, 0050) Contour Data DS: Array of 258 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021510 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "94" (3006, 0050) Contour Data DS: Array of 282 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021500 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "98" (3006, 0050) Contour Data DS: Array of 294 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021490 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "98" (3006, 0050) Contour Data DS: Array of 294 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021480 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "98" (3006, 0050) Contour Data DS: Array of 294 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021470 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "96" (3006, 0050) Contour Data DS: Array of 288 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021460 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "104" (3006, 0050) Contour Data DS: Array of 312 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021450 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "108" (3006, 0050) Contour Data DS: Array of 324 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021440 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "112" (3006, 0050) Contour Data DS: Array of 336 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021430 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "116" (3006, 0050) Contour Data DS: Array of 348 elements --------- (3006, 0084) Referenced ROI Number IS: "2" --------- (3006, 002a) ROI Display Color IS: ['0', '255', '255'] (3006, 0040) Contour Sequence 20 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021590 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "164" (3006, 0050) Contour Data DS: Array of 492 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021580 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "190" (3006, 0050) Contour Data DS: Array of 570 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021570 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "232" (3006, 0050) Contour Data DS: Array of 696 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021560 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "284" (3006, 0050) Contour Data DS: Array of 852 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021550 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "304" (3006, 0050) Contour Data DS: Array of 912 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021540 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "328" (3006, 0050) Contour Data DS: Array of 984 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021530 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "352" (3006, 0050) Contour Data DS: Array of 1056 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021520 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "358" (3006, 0050) Contour Data DS: Array of 1074 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021510 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "372" (3006, 0050) Contour Data DS: Array of 1116 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021500 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "392" (3006, 0050) Contour Data DS: Array of 1176 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021490 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "420" (3006, 0050) Contour Data DS: Array of 1260 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021480 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "428" (3006, 0050) Contour Data DS: Array of 1284 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021470 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "434" (3006, 0050) Contour Data DS: Array of 1302 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021460 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "444" (3006, 0050) Contour Data DS: Array of 1332 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021450 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "438" (3006, 0050) Contour Data DS: Array of 1314 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021440 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "312" (3006, 0050) Contour Data DS: Array of 936 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021430 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "304" (3006, 0050) Contour Data DS: Array of 912 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021420 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "316" (3006, 0050) Contour Data DS: Array of 948 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "306" (3006, 0050) Contour Data DS: Array of 918 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "232" (3006, 0050) Contour Data DS: Array of 696 elements --------- (3006, 0084) Referenced ROI Number IS: "3" --------- (3006, 002a) ROI Display Color IS: ['0', '150', '0'] (3006, 0040) Contour Sequence 14 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021530 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "94" (3006, 0050) Contour Data DS: Array of 282 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021520 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "102" (3006, 0050) Contour Data DS: Array of 306 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021510 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "116" (3006, 0050) Contour Data DS: Array of 348 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021500 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021490 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021480 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021470 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021460 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021450 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021440 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021430 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "116" (3006, 0050) Contour Data DS: Array of 348 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021420 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "104" (3006, 0050) Contour Data DS: Array of 312 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "78" (3006, 0050) Contour Data DS: Array of 234 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "52" (3006, 0050) Contour Data DS: Array of 156 elements --------- (3006, 0084) Referenced ROI Number IS: "4" --------- (3006, 002a) ROI Display Color IS: ['255', '128', '128'] (3006, 0040) Contour Sequence 56 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021430 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "176" (3006, 0050) Contour Data DS: Array of 528 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021420 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "172" (3006, 0050) Contour Data DS: Array of 516 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "104" (3006, 0050) Contour Data DS: Array of 312 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "170" (3006, 0050) Contour Data DS: Array of 510 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "116" (3006, 0050) Contour Data DS: Array of 348 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "188" (3006, 0050) Contour Data DS: Array of 564 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "218" (3006, 0050) Contour Data DS: Array of 654 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "174" (3006, 0050) Contour Data DS: Array of 522 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "230" (3006, 0050) Contour Data DS: Array of 690 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "178" (3006, 0050) Contour Data DS: Array of 534 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "382" (3006, 0050) Contour Data DS: Array of 1146 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "144" (3006, 0050) Contour Data DS: Array of 432 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "164" (3006, 0050) Contour Data DS: Array of 492 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "136" (3006, 0050) Contour Data DS: Array of 408 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "158" (3006, 0050) Contour Data DS: Array of 474 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "142" (3006, 0050) Contour Data DS: Array of 426 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "210" (3006, 0050) Contour Data DS: Array of 630 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "142" (3006, 0050) Contour Data DS: Array of 426 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "202" (3006, 0050) Contour Data DS: Array of 606 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "118" (3006, 0050) Contour Data DS: Array of 354 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "158" (3006, 0050) Contour Data DS: Array of 474 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "112" (3006, 0050) Contour Data DS: Array of 336 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "158" (3006, 0050) Contour Data DS: Array of 474 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "98" (3006, 0050) Contour Data DS: Array of 294 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "156" (3006, 0050) Contour Data DS: Array of 468 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "78" (3006, 0050) Contour Data DS: Array of 234 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "146" (3006, 0050) Contour Data DS: Array of 438 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "88" (3006, 0050) Contour Data DS: Array of 264 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "142" (3006, 0050) Contour Data DS: Array of 426 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "86" (3006, 0050) Contour Data DS: Array of 258 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "132" (3006, 0050) Contour Data DS: Array of 396 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "90" (3006, 0050) Contour Data DS: Array of 270 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "124" (3006, 0050) Contour Data DS: Array of 372 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "84" (3006, 0050) Contour Data DS: Array of 252 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "126" (3006, 0050) Contour Data DS: Array of 378 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021240 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "86" (3006, 0050) Contour Data DS: Array of 258 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021240 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "124" (3006, 0050) Contour Data DS: Array of 372 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021230 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "98" (3006, 0050) Contour Data DS: Array of 294 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021230 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "124" (3006, 0050) Contour Data DS: Array of 372 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021220 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "86" (3006, 0050) Contour Data DS: Array of 258 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021220 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "124" (3006, 0050) Contour Data DS: Array of 372 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021210 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "82" (3006, 0050) Contour Data DS: Array of 246 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021210 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "116" (3006, 0050) Contour Data DS: Array of 348 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021200 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "70" (3006, 0050) Contour Data DS: Array of 210 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021200 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "116" (3006, 0050) Contour Data DS: Array of 348 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021190 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "68" (3006, 0050) Contour Data DS: Array of 204 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021190 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "114" (3006, 0050) Contour Data DS: Array of 342 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021180 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "66" (3006, 0050) Contour Data DS: Array of 198 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021180 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "110" (3006, 0050) Contour Data DS: Array of 330 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021170 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "64" (3006, 0050) Contour Data DS: Array of 192 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021170 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "108" (3006, 0050) Contour Data DS: Array of 324 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021160 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "72" (3006, 0050) Contour Data DS: Array of 216 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021160 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "104" (3006, 0050) Contour Data DS: Array of 312 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021150 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "112" (3006, 0050) Contour Data DS: Array of 336 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021150 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "74" (3006, 0050) Contour Data DS: Array of 222 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021140 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "86" (3006, 0050) Contour Data DS: Array of 258 elements --------- (3006, 0084) Referenced ROI Number IS: "5" --------- (3006, 002a) ROI Display Color IS: ['255', '0', '255'] (3006, 0040) Contour Sequence 16 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "104" (3006, 0050) Contour Data DS: Array of 312 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "118" (3006, 0050) Contour Data DS: Array of 354 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "136" (3006, 0050) Contour Data DS: Array of 408 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "194" (3006, 0050) Contour Data DS: Array of 582 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "188" (3006, 0050) Contour Data DS: Array of 564 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "182" (3006, 0050) Contour Data DS: Array of 546 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "180" (3006, 0050) Contour Data DS: Array of 540 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "168" (3006, 0050) Contour Data DS: Array of 504 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "164" (3006, 0050) Contour Data DS: Array of 492 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "156" (3006, 0050) Contour Data DS: Array of 468 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "150" (3006, 0050) Contour Data DS: Array of 450 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "148" (3006, 0050) Contour Data DS: Array of 444 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "132" (3006, 0050) Contour Data DS: Array of 396 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "126" (3006, 0050) Contour Data DS: Array of 378 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "114" (3006, 0050) Contour Data DS: Array of 342 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "98" (3006, 0050) Contour Data DS: Array of 294 elements --------- (3006, 0084) Referenced ROI Number IS: "6" --------- (3006, 002a) ROI Display Color IS: ['0', '255', '0'] (3006, 0040) Contour Sequence 22 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "130" (3006, 0050) Contour Data DS: Array of 390 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "140" (3006, 0050) Contour Data DS: Array of 420 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "184" (3006, 0050) Contour Data DS: Array of 552 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "224" (3006, 0050) Contour Data DS: Array of 672 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "238" (3006, 0050) Contour Data DS: Array of 714 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "226" (3006, 0050) Contour Data DS: Array of 678 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "226" (3006, 0050) Contour Data DS: Array of 678 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "232" (3006, 0050) Contour Data DS: Array of 696 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "224" (3006, 0050) Contour Data DS: Array of 672 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "230" (3006, 0050) Contour Data DS: Array of 690 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "226" (3006, 0050) Contour Data DS: Array of 678 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "214" (3006, 0050) Contour Data DS: Array of 642 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "206" (3006, 0050) Contour Data DS: Array of 618 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "200" (3006, 0050) Contour Data DS: Array of 600 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "194" (3006, 0050) Contour Data DS: Array of 582 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "190" (3006, 0050) Contour Data DS: Array of 570 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "192" (3006, 0050) Contour Data DS: Array of 576 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021240 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "180" (3006, 0050) Contour Data DS: Array of 540 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021230 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "166" (3006, 0050) Contour Data DS: Array of 498 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021220 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "152" (3006, 0050) Contour Data DS: Array of 456 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021210 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "136" (3006, 0050) Contour Data DS: Array of 408 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021200 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0084) Referenced ROI Number IS: "7" --------- (3006, 002a) ROI Display Color IS: ['255', '0', '0'] (3006, 0040) Contour Sequence 20 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "66" (3006, 0050) Contour Data DS: Array of 198 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "76" (3006, 0050) Contour Data DS: Array of 228 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "106" (3006, 0050) Contour Data DS: Array of 318 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "86" (3006, 0050) Contour Data DS: Array of 258 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "112" (3006, 0050) Contour Data DS: Array of 336 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "110" (3006, 0050) Contour Data DS: Array of 330 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "160" (3006, 0050) Contour Data DS: Array of 480 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "148" (3006, 0050) Contour Data DS: Array of 444 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "168" (3006, 0050) Contour Data DS: Array of 504 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "164" (3006, 0050) Contour Data DS: Array of 492 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "160" (3006, 0050) Contour Data DS: Array of 480 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "156" (3006, 0050) Contour Data DS: Array of 468 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "156" (3006, 0050) Contour Data DS: Array of 468 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "150" (3006, 0050) Contour Data DS: Array of 450 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "156" (3006, 0050) Contour Data DS: Array of 468 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021240 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "108" (3006, 0050) Contour Data DS: Array of 324 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021230 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "104" (3006, 0050) Contour Data DS: Array of 312 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021220 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "98" (3006, 0050) Contour Data DS: Array of 294 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021210 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "8" (3006, 0050) Contour Data DS: Array of 24 elements --------- (3006, 0084) Referenced ROI Number IS: "8" --------- (3006, 002a) ROI Display Color IS: ['255', '0', '255'] (3006, 0084) Referenced ROI Number IS: "9" --------- (3006, 002a) ROI Display Color IS: ['0', '255', '0'] (3006, 0084) Referenced ROI Number IS: "10" --------- (3006, 002a) ROI Display Color IS: ['128', '64', '64'] (3006, 0040) Contour Sequence 24 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021590 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "100" (3006, 0050) Contour Data DS: Array of 300 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021580 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "106" (3006, 0050) Contour Data DS: Array of 318 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021570 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "110" (3006, 0050) Contour Data DS: Array of 330 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021560 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "114" (3006, 0050) Contour Data DS: Array of 342 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021550 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "122" (3006, 0050) Contour Data DS: Array of 366 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021540 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "124" (3006, 0050) Contour Data DS: Array of 372 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021530 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "136" (3006, 0050) Contour Data DS: Array of 408 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021520 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "154" (3006, 0050) Contour Data DS: Array of 462 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021510 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "176" (3006, 0050) Contour Data DS: Array of 528 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021500 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "194" (3006, 0050) Contour Data DS: Array of 582 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021490 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "202" (3006, 0050) Contour Data DS: Array of 606 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021480 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "210" (3006, 0050) Contour Data DS: Array of 630 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021470 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "222" (3006, 0050) Contour Data DS: Array of 666 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021460 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "230" (3006, 0050) Contour Data DS: Array of 690 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021450 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "238" (3006, 0050) Contour Data DS: Array of 714 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021440 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "252" (3006, 0050) Contour Data DS: Array of 756 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021430 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "242" (3006, 0050) Contour Data DS: Array of 726 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021420 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "246" (3006, 0050) Contour Data DS: Array of 738 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "242" (3006, 0050) Contour Data DS: Array of 726 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "236" (3006, 0050) Contour Data DS: Array of 708 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "234" (3006, 0050) Contour Data DS: Array of 702 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "230" (3006, 0050) Contour Data DS: Array of 690 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "230" (3006, 0050) Contour Data DS: Array of 690 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "230" (3006, 0050) Contour Data DS: Array of 690 elements --------- (3006, 0084) Referenced ROI Number IS: "11" --------- (3006, 002a) ROI Display Color IS: ['187', '255', '187'] (3006, 0040) Contour Sequence 31 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021410 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "68" (3006, 0050) Contour Data DS: Array of 204 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "84" (3006, 0050) Contour Data DS: Array of 252 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "128" (3006, 0050) Contour Data DS: Array of 384 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "144" (3006, 0050) Contour Data DS: Array of 432 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "138" (3006, 0050) Contour Data DS: Array of 414 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "144" (3006, 0050) Contour Data DS: Array of 432 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "146" (3006, 0050) Contour Data DS: Array of 438 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "218" (3006, 0050) Contour Data DS: Array of 654 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "148" (3006, 0050) Contour Data DS: Array of 444 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "224" (3006, 0050) Contour Data DS: Array of 672 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "146" (3006, 0050) Contour Data DS: Array of 438 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "226" (3006, 0050) Contour Data DS: Array of 678 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "150" (3006, 0050) Contour Data DS: Array of 450 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "228" (3006, 0050) Contour Data DS: Array of 684 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "152" (3006, 0050) Contour Data DS: Array of 456 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "234" (3006, 0050) Contour Data DS: Array of 702 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "364" (3006, 0050) Contour Data DS: Array of 1092 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "382" (3006, 0050) Contour Data DS: Array of 1146 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "382" (3006, 0050) Contour Data DS: Array of 1146 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "382" (3006, 0050) Contour Data DS: Array of 1146 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "380" (3006, 0050) Contour Data DS: Array of 1140 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "326" (3006, 0050) Contour Data DS: Array of 978 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021240 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "354" (3006, 0050) Contour Data DS: Array of 1062 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021230 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "332" (3006, 0050) Contour Data DS: Array of 996 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021220 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "328" (3006, 0050) Contour Data DS: Array of 984 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021210 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "326" (3006, 0050) Contour Data DS: Array of 978 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021200 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "328" (3006, 0050) Contour Data DS: Array of 984 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021190 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "324" (3006, 0050) Contour Data DS: Array of 972 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021180 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "326" (3006, 0050) Contour Data DS: Array of 978 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021170 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "342" (3006, 0050) Contour Data DS: Array of 1026 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021160 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "356" (3006, 0050) Contour Data DS: Array of 1068 elements --------- (3006, 0084) Referenced ROI Number IS: "12" --------- (3006, 002a) ROI Display Color IS: ['128', '255', '255'] (3006, 0084) Referenced ROI Number IS: "13" --------- (3006, 002a) ROI Display Color IS: ['255', '255', '128'] (3006, 0040) Contour Sequence 29 item(s) ---- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021400 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "226" (3006, 0050) Contour Data DS: Array of 678 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021390 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "236" (3006, 0050) Contour Data DS: Array of 708 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021380 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "274" (3006, 0050) Contour Data DS: Array of 822 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021370 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "302" (3006, 0050) Contour Data DS: Array of 906 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021360 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "304" (3006, 0050) Contour Data DS: Array of 912 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021350 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "308" (3006, 0050) Contour Data DS: Array of 924 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021340 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "312" (3006, 0050) Contour Data DS: Array of 936 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021330 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "312" (3006, 0050) Contour Data DS: Array of 936 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021320 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "318" (3006, 0050) Contour Data DS: Array of 954 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021310 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "312" (3006, 0050) Contour Data DS: Array of 936 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021300 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "312" (3006, 0050) Contour Data DS: Array of 936 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021290 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "316" (3006, 0050) Contour Data DS: Array of 948 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021280 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "314" (3006, 0050) Contour Data DS: Array of 942 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021270 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "316" (3006, 0050) Contour Data DS: Array of 948 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021260 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "316" (3006, 0050) Contour Data DS: Array of 948 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021250 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "318" (3006, 0050) Contour Data DS: Array of 954 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021240 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "306" (3006, 0050) Contour Data DS: Array of 918 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021230 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "300" (3006, 0050) Contour Data DS: Array of 900 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021220 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "292" (3006, 0050) Contour Data DS: Array of 876 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021210 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "284" (3006, 0050) Contour Data DS: Array of 852 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021200 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "278" (3006, 0050) Contour Data DS: Array of 834 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021190 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "266" (3006, 0050) Contour Data DS: Array of 798 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021180 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "252" (3006, 0050) Contour Data DS: Array of 756 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021170 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "238" (3006, 0050) Contour Data DS: Array of 714 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021160 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "228" (3006, 0050) Contour Data DS: Array of 684 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021150 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "212" (3006, 0050) Contour Data DS: Array of 636 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021140 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "176" (3006, 0050) Contour Data DS: Array of 528 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021130 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "142" (3006, 0050) Contour Data DS: Array of 426 elements --------- (3006, 0016) Contour Image Sequence 1 item(s) ---- (0008, 1150) Referenced SOP Class UID UI: CT Image Storage (0008, 1155) Referenced SOP Instance UID UI: 1.3.12.2.1107.5.1.4.95999.300000191008001101339000021120 --------- (3006, 0042) Contour Geometric Type CS: 'CLOSED_PLANAR' (3006, 0046) Number of Contour Points IS: "70" (3006, 0050) Contour Data DS: Array of 210 elements --------- (3006, 0084) Referenced ROI Number IS: "14" --------- (3006, 002a) ROI Display Color IS: ['0', '255', '0'] (3006, 0040) Contour Sequence 1 item(s) ---- (3006, 0042) Contour Geometric Type CS: 'OPEN_NONPLANAR' (3006, 0046) Number of Contour Points IS: "8" (3006, 0050) Contour Data DS: Array of 24 elements --------- (3006, 0084) Referenced ROI Number IS: "15" --------- (3006, 002a) ROI Display Color IS: ['0', '255', '0'] (3006, 0040) Contour Sequence 1 item(s) ---- (3006, 0042) Contour Geometric Type CS: 'OPEN_NONPLANAR' (3006, 0046) Number of Contour Points IS: "2" (3006, 0050) Contour Data DS: ['-24.95', '15.05', '-28.59', '-21.66', '12.31', '-72.38'] --------- (3006, 0084) Referenced ROI Number IS: "16" --------- (3006, 002a) ROI Display Color IS: ['0', '255', '0'] (3006, 0040) Contour Sequence 1 item(s) ---- (3006, 0042) Contour Geometric Type CS: 'OPEN_NONPLANAR' (3006, 0046) Number of Contour Points IS: "2" (3006, 0050) Contour Data DS: ['14.09', '8.53', '-28.49', '10.75', '8.24', '-70'] --------- (3006, 0084) Referenced ROI Number IS: "17" --------- (3006, 002a) ROI Display Color IS: ['0', '255', '0'] (3006, 0040) Contour Sequence 1 item(s) ---- (3006, 0042) Contour Geometric Type CS: 'OPEN_NONPLANAR' (3006, 0046) Number of Contour Points IS: "2" (3006, 0050) Contour Data DS: ['-22.66', '14.79', '-3.0e-1', '-17.64', '12.33', '-55.35'] --------- (3006, 0084) Referenced ROI Number IS: "18" --------- (3006, 002a) ROI Display Color IS: ['0', '255', '0'] (3006, 0040) Contour Sequence 1 item(s) ---- (3006, 0042) Contour Geometric Type CS: 'OPEN_NONPLANAR' (3006, 0046) Number of Contour Points IS: "2" (3006, 0050) Contour Data DS: ['7.62', '10.08', '-22.63', '6.24', '10.88', '-70.31'] --------- (3006, 0084) Referenced ROI Number IS: "19" --------- (3006, 0080) RT ROI Observations Sequence 19 item(s) ---- (3006, 0082) Observation Number IS: "1" (3006, 0084) Referenced ROI Number IS: "1" (3006, 0085) ROI Observation Label SH: 'BODY' (3006, 0086) RT ROI Identification Code Sequence 1 item(s) ---- (0008, 0100) Code Value SH: 'C44.9' (0008, 0102) Coding Scheme Designator SH: 'ICD-O-2' (0008, 0104) Code Meaning LO: 'Skin, NOS' --------- (3006, 00a4) RT ROI Interpreted Type CS: 'EXTERNAL' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "2" (3006, 0084) Referenced ROI Number IS: "2" (3006, 0085) ROI Observation Label SH: 'Gause' (3006, 00a4) RT ROI Interpreted Type CS: 'ORGAN' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "3" (3006, 0084) Referenced ROI Number IS: "3" (3006, 0085) ROI Observation Label SH: 'Bladder' (3006, 0086) RT ROI Identification Code Sequence 1 item(s) ---- (0008, 0100) Code Value SH: 'C67.9' (0008, 0102) Coding Scheme Designator SH: 'ICD-O-2' (0008, 0104) Code Meaning LO: 'Bladder, NOS' --------- (3006, 00a4) RT ROI Interpreted Type CS: 'ORGAN' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "4" (3006, 0084) Referenced ROI Number IS: "4" (3006, 0085) ROI Observation Label SH: 'Foley' (3006, 00a4) RT ROI Interpreted Type CS: 'ORGAN' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "5" (3006, 0084) Referenced ROI Number IS: "5" (3006, 0085) ROI Observation Label SH: 'Adjacent_bowel' (3006, 00a4) RT ROI Interpreted Type CS: 'ORGAN' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "6" (3006, 0084) Referenced ROI Number IS: "6" (3006, 0085) ROI Observation Label SH: 'GTV' (3006, 00a4) RT ROI Interpreted Type CS: 'GTV' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "7" (3006, 0084) Referenced ROI Number IS: "7" (3006, 0085) ROI Observation Label SH: 'HR-CTV' (3006, 00a4) RT ROI Interpreted Type CS: 'CTV' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "8" (3006, 0084) Referenced ROI Number IS: "8" (3006, 0085) ROI Observation Label SH: 'MRI-based GTV' (3006, 00a4) RT ROI Interpreted Type CS: 'GTV' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "9" (3006, 0084) Referenced ROI Number IS: "9" (3006, 0085) ROI Observation Label SH: 'R_GTV' (3006, 00a4) RT ROI Interpreted Type CS: 'GTV' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "10" (3006, 0084) Referenced ROI Number IS: "10" (3006, 0085) ROI Observation Label SH: 'R_HR-CTV' (3006, 00a4) RT ROI Interpreted Type CS: 'CTV' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "11" (3006, 0084) Referenced ROI Number IS: "11" (3006, 0085) ROI Observation Label SH: 'Rectum' (3006, 0086) RT ROI Identification Code Sequence 1 item(s) ---- (0008, 0100) Code Value SH: 'C20.9' (0008, 0102) Coding Scheme Designator SH: 'ICD-O-2' (0008, 0104) Code Meaning LO: 'Rectum, NOS' --------- (3006, 00a4) RT ROI Interpreted Type CS: 'ORGAN' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "12" (3006, 0084) Referenced ROI Number IS: "12" (3006, 0085) ROI Observation Label SH: 'Sigmoid_colon' (3006, 00a4) RT ROI Interpreted Type CS: 'ORGAN' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "13" (3006, 0084) Referenced ROI Number IS: "13" (3006, 0085) ROI Observation Label SH: 'Spinal_cord' (3006, 00a4) RT ROI Interpreted Type CS: 'ORGAN' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "14" (3006, 0084) Referenced ROI Number IS: "14" (3006, 0085) ROI Observation Label SH: 'Uterus' (3006, 00a4) RT ROI Interpreted Type CS: 'ORGAN' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "15" (3006, 0084) Referenced ROI Number IS: "15" (3006, 00a4) RT ROI Interpreted Type CS: 'BRACHY_CHANNEL' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "16" (3006, 0084) Referenced ROI Number IS: "16" (3006, 00a4) RT ROI Interpreted Type CS: 'BRACHY_CHANNEL' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "17" (3006, 0084) Referenced ROI Number IS: "17" (3006, 00a4) RT ROI Interpreted Type CS: 'BRACHY_CHANNEL' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "18" (3006, 0084) Referenced ROI Number IS: "18" (3006, 00a4) RT ROI Interpreted Type CS: 'BRACHY_CHANNEL' (3006, 00a6) ROI Interpreter PN: '' --------- (3006, 0082) Observation Number IS: "19" (3006, 0084) Referenced ROI Number IS: "19" (3006, 00a4) RT ROI Interpreted Type CS: 'BRACHY_CHANNEL' (3006, 00a6) ROI Interpreter PN: '' --------- (300e, 0002) Approval Status CS: 'UNAPPROVED'
Project/notebooks/.ipynb_checkpoints/0.0 Creating directory (product, link, id)-checkpoint.ipynb
###Markdown Extracting product id from the weblinks Necessary step to act as reference for web scraping for reviews ###Code #Import necessary libraries import json import re import os import pandas as pd with open('../data/raw_data/initial.json', 'r') as file: raw_data= json.load(file) df=pd.DataFrame.from_dict(raw_data) df['product_id']=df.weblink.str.partition(sep='grid:')[2] df.columns df.drop(columns=['index'], inplace=True) df_links_id=df[['product_name', 'weblink', 'product_id', 'num_reviews']].copy() with open('../data/processed_data/combined_data.json', 'r') as file: raw_data= json.load(file) selected=pd.DataFrame.from_dict(raw_data) selected_df= pd.merge(selected, df_links_id, on='product_name', how='left') datapath_data = os.path.join('../data/raw_data', 'pre_selection.json') if not os.path.exists(datapath_data): selected_df.to_json(datapath_data) datapath_data = os.path.join('../data/raw_data', 'data_links_id.json') if not os.path.exists(datapath_data): df_links_id.to_json(datapath_data) ###Output _____no_output_____ ###Markdown ______________________________________________________________________ Creating full list of product_ids scraped ###Code #Storing each raw json file in a dataframe with open('../data/raw_data/cleansers_full.json', 'r') as file: raw_data= json.load(file) cleansers_raw=pd.DataFrame.from_dict(raw_data) with open('../data/raw_data/eye_products.json', 'r') as file: raw_data= json.load(file) eyeproducts_raw=pd.DataFrame.from_dict(raw_data) with open('../data/raw_data/moisturizers_full.json', 'r') as file: raw_data= json.load(file) moisturizers_raw=pd.DataFrame.from_dict(raw_data) with open('../data/raw_data/treatments_full.json', 'r') as file: raw_data= json.load(file) treatments_raw=pd.DataFrame.from_dict(raw_data) full_df= pd.concat([cleansers_raw, eyeproducts_raw, moisturizers_raw, treatments_raw]) full_df['product_id']=full_df.weblink.str.partition(sep='grid:')[2] #Also need to fix number of reviews full_df.drop(full_df[full_df['num_reviews'].isnull()].index, inplace=True) #Cleaning num_reviews column full_df.loc[full_df.num_reviews.str.endswith('K'), 'num_reviews']=full_df.loc[full_df.num_reviews.str.endswith('K'), 'num_reviews'].str.strip('K').astype('float64')*1000 full_df['num_reviews']=full_df['num_reviews'].astype('int64') full_df.reset_index(inplace=True) full_df.info() datapath3 = os.path.join('../data/raw_data', 'preprocessed_full.json') if not os.path.exists(datapath3): full_df.to_json(datapath3) ###Output _____no_output_____
IA/TFM-Algoritmos-Prueba.ipynb
###Markdown Importaciones ###Code from google.colab import drive drive.mount('/content/drive') #Importamos la libreria para realizar la conexión con la base de datos from sqlalchemy import create_engine #Libreria para el manejo de datos import pandas as pd from datetime import datetime import dateutil.relativedelta import random #Libreria para la descripción de las variables import matplotlib.pyplot as plt #Libreria para realizar los conjuntos de modelamiento, validación y pruebas import numpy as np from sklearn.model_selection import train_test_split #Libreria para utilizar la técnica de árboles extremadamente aleatorios from sklearn.ensemble import ExtraTreesRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score import math #LIbreria para guardar el entrenamiento de los extra trees from joblib import dump, load #Para realizar las redes neuronales from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Flatten, InputLayer, LSTM, Bidirectional from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D from keras.layers.normalization import BatchNormalization from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint from keras.regularizers import l2 from keras.models import load_model #Libreria para realizar comparaciónes from keras import backend as K ###Output _____no_output_____ ###Markdown Carga de datosDescargamos el archivo de datos desde la página http://www.sui.gov.co/web/datos-abiertos/energia/energia-formato-5-informacion-de-transformadores donde se encuentran el archivos de datos particular de enero de 2019 en el siguiente enlace http://www.sui.gov.co/datosAbiertos/Energia_442_M_4_2019.csv.zip. Es de importancia aclarar que se realizo la descarga de los datos de manera manual, debido a que la página web tiene protección para descargas automáticas. Funciones ###Code def df_aleatorios(df, cantidad): lenght = len(df) -1 aleatorios = [ random.randint(0,lenght) for _ in range(cantidad)] df = df[aleatorios] return df # Funcion de lectura de los datos def df_read(): #Lectura del archivo a dataframe dataframe = pd.read_csv('/content/drive/My Drive/Colab Notebooks/TFM/IA/formato5_520.zip', header=0) #Convertimos las fecha a tipo fecha dataframe['fecha'] = pd.to_datetime(dataframe['fecha']) dataframe['mes'] = dataframe['fecha'].dt.month dataframe = dataframe.sort_values(by=['fecha'], ascending=False) #Convertimos los valores S y N en booleanos dic = {'S': True, 'N': False} dataframe['propiedad'] = dataframe['propiedad'].map(dic) print('Archivo cargado ',dataframe.shape) return dataframe # Función traer códigos de todos los transformadores def df_tranformadores(df): unique_transformators = df['id_transformador'].unique() print('Transformadores unicos ',len(unique_transformators)) return unique_transformators # Funcion para extraer dataframe de un transformador def df_transformador(df, transformador): df_tr = df[(df['id_transformador'] == transformador)] del df_tr['id_transformador'] return df_tr def df_salida(df, fecha): df_out = df[(df['fecha'] == fecha)] df_out = df_out[['min_catastrofes','min_expansion','min_infraestructura', 'min_inpne','min_ipne','min_limitacion','min_racionamiento', 'min_seguridad', 'min_strstn', 'min_subestaciones', 'min_terrorismo', 'min_usuario', 'min_zonasespeciales']] return df_out # Función traer datos de entrada def df_entrada_tiempo(df, fecha, espacio_tiempo): df_in = df[(df['fecha'] >= (fecha - dateutil.relativedelta.relativedelta(months=espacio_tiempo) ) ) & (df['fecha'] < fecha)] del df_in['fecha'] return df_in def time_print(sec): days = int(sec / 86400) sec -= 86400*days hrs = int(sec / 3600) sec -= 3600*hrs mins = int(sec / 60) sec -= 60*mins print('Duración ',days, ':', hrs, ':', mins, ':', sec) from time import time # Función para entrenamiento de una fecha particular # Con todos los transformadores def df_conjunto_tiempo(fecha, espacio_tiempo, pruebas = 0): print('Leyendo el archivo de datos') df = df_read() print('Extrayendo información transformadores') transformador = df_tranformadores(df) #Limitación para las pruebas if pruebas != 0: print('Ejecutando Pruebas') transformador = df_aleatorios(transformador, pruebas) #Conjuntos X = [] y = [] #Lista de fechas # Rango de fechas años date_list = [fecha - dateutil.relativedelta.relativedelta(months=x) for x in range(108-int(espacio_tiempo))] #date_list = np.array(date_list) tiempo_inicial = time() print('Cargando ',len(transformador)) # Recorremos todos los transformadores # Tomamos conjuntos de entrada y predicción for t in transformador: '''if (index * 100 / len(transformador)) % 10 == 0: print('Revisando el registro ',index, ' correspondiente al ', index*100 / len(transformador),'% de ',len(transformador), ' registros')''' #transformator_dates = df_aleatorios(date_list, 18) #Para pruebas transformator_dates = date_list #Seleccianamos el conjunto de registro del tranformador df_tr = df_transformador(df, t) #Realizamos la extracción de conjunto para determianda fecha for date in transformator_dates: y1 = df_salida(df_tr, date) if not y1.empty: X1 = df_entrada_tiempo(df_tr, date, espacio_tiempo) if len(X1) > 0: X.append(pad_sequences(np.array(X1), maxlen = 108, value=-1)) y.append(y1.values[0]) time_print(time() - tiempo_inicial) print('Cargado un total de: ',len(X),' datos') return np.array(X), np.array(y) ###Output _____no_output_____ ###Markdown Aplicación ###Code tiempo = 108 X, y = df_conjunto_tiempo(datetime(2019,2,1), tiempo, 0) dump(X, '/content/drive/My Drive/Colab Notebooks/TFM/IA/X_'+str(tiempo)+'.joblib.z') dump(y, '/content/drive/My Drive/Colab Notebooks/TFM/IA/y_'+str(tiempo)+'.joblib.z') #X = load('/content/drive/My Drive/Colab Notebooks/TFM/IA/X_'+tiempo+'.joblib.z') #y = load('/content/drive/My Drive/Colab Notebooks/TFM/IA/y_'+tiempo+'.joblib.z') X.shape ###Output _____no_output_____ ###Markdown Determinación de conjunto de modelamiento, validación y pruebas ###Code # Cojunto de pruebas train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=2) # Cojunto de pruebas train_x, validation_x, train_y, validation_y = train_test_split(train_x, train_y, test_size=0.2, random_state=4) print('Tamaño cojunto de entrenamiento final : ', len(train_x), len(train_y)) print('Tamaño cojunto de validación final : ', len(validation_x), len(validation_y)) print('Tamaño cojunto de pruebas final : ', len(test_x), len(test_y)) ###Output _____no_output_____ ###Markdown Redes Neuronales Profundas ###Code epochs = 1000 batch = 512 ###Output _____no_output_____ ###Markdown Funciones ###Code # Función Input def input_layer(model, size_x, size_y = None, size_z = None): if size_y == None: model.add( InputLayer( input_shape=(size_x, ) ) ) elif size_z == None: model.add( InputLayer( input_shape=(size_x,size_y) ) ) else: model.add( InputLayer( input_shape=(size_x,size_y, size_z) ) ) model.add( BatchNormalization() ) return model #Función Densa def densa(model, neuronas= 256, activacion = 'relu', regularizador = l2(0.001), dropout = 0.2): #Creación de una capa de 32 neuronas densas model.add(Dense(neuronas, activation=activacion, kernel_regularizer=regularizador, bias_regularizer=regularizador)) model.add(BatchNormalization()) model.add(Dropout(dropout)) return model #Función de resultado def result_layer(model, y, activacion = 'linear'): model.add(Dense(y, activation=activacion)) return model def coeff_determination(y_true, y_pred): from keras import backend as K SS_res = K.sum(K.square( y_true-y_pred )) SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) return ( 1 - SS_res/(SS_tot + K.epsilon()) ) def run_desing(model, name, batch, epochs, train_x, train_y, test_x, test_y, optimizer = 'adam', loss = 'mean_squared_error'): metrics = ['mean_absolute_error','mean_squared_error',coeff_determination] model.name = name #Revisamos el modelo model.summary() #Compilación del modelo model.compile(loss=loss, optimizer=optimizer, metrics=metrics) #Con el objetivo de parar el entrenamiento si los valores no mejoran early_stop = EarlyStopping(monitor='val_loss', patience=epochs*0.05, verbose=1, min_delta=0.01, mode='auto') #Con el objetivo de cambiar el learnfactor reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.001, patience=epochs*0.02, mode='auto', verbose=1, min_delta=1e-4, min_lr=0) #Guardamos el mejor modelo checkpoint = ModelCheckpoint('/content/drive/My Drive/Colab Notebooks/TFM/IA/'+name+'.model', monitor='val_loss', verbose=1, save_best_only=True, mode='min') history = model.fit(train_x, train_y, batch_size=batch, epochs=epochs, verbose=1, validation_data=(test_x, test_y), callbacks=[early_stop, reduce_lr, checkpoint]) model.save('/content/drive/My Drive/Colab Notebooks/TFM/IA/'+name+'.h5') dump([model,history,coeff_determination], '/content/drive/My Drive/Colab Notebooks/TFM/IA/'+name+'.joblib.z') return model, history def load_desing(name): #Cargamos desde disco model_complete = load('/content/drive/My Drive/Colab Notebooks/TFM/IA/'+name+'.joblib.lz4') #Cargamos el modelo, historia y variable especial model = model_complete[0] history = model_complete[1] coeff_determination = model_complete[2] return model, history def load_model_weights(model, name): model.load_weights('/content/drive/My Drive/Colab Notebooks/TFM/IA/'+name+'.model') return model ###Output _____no_output_____ ###Markdown Modelo ###Code def modelo_dnn(): #Creación del modelo secuencial model = Sequential() function_act = 'relu' #Capa inicial model = input_layer(model, len(train_x[0]), len(train_x[0][0])) model.add(Flatten()) model = densa(model, 1024, activacion = function_act) model = densa(model, 1024, activacion = function_act) model = densa(model, 512, activacion = function_act) model = densa(model, 512, activacion = function_act) model = densa(model, 256, activacion = function_act) model = result_layer(model, len(train_y[0])) return model ###Output _____no_output_____ ###Markdown Ejecución ###Code '''model_dnn, history_dnn = run_desing(modelo_dnn(), 'modelo_dnn', batch, epochs, train_x, train_y, validation_x, validation_y, 'adam','huber_loss')''' predict_y_dnn = model_dnn.predict(test_x) ###Output _____no_output_____ ###Markdown Red Neuronal Convolucional Funciones ###Code #Función convolucional def convolutional(model, filtros = 256, kernel_x = 3,kernel_y = 3, stride = 1, activacion = 'relu', regularizador = l2(0.001), dropout = 0.2): #Capa convolucional model.add(Conv2D(filtros, kernel_size=(kernel_x, kernel_y), activation=activacion,padding='same', kernel_regularizer=regularizador, bias_regularizer=regularizador)) model.add(Dropout(dropout)) return model #Función de pooling def pooling(model, pooling_x = 3, pooling_y = 3, stride = 1, dropout = 0.2): #Capa de polling model.add(MaxPooling2D((pooling_x, pooling_y))) model.add(Dropout(dropout)) model.add(BatchNormalization()) return model def data_cnn(data): return data.reshape(len(data),len(data[0]),len(data[0][0]),1) ###Output _____no_output_____ ###Markdown Modelo ###Code def modelo_cnn(): #Creación del modelo secuencial model = Sequential() function_act = 'relu' #Primera Capa convolucional model = input_layer(model, len(train_x[0]),len(train_x[0][0]),1) model = convolutional(model, 64, activacion = function_act) #model = pooling(model, 2, 2, dropout = 0) model = convolutional(model, 128, activacion = function_act) #model = pooling(model, 2, 2, dropout = 0) model = convolutional(model, 256, activacion = function_act) #model = pooling(model, 2, 2, dropout = 0) model = convolutional(model, 512, activacion = function_act) model = pooling(model, 2, 2, dropout = 0) model.add(Flatten()) model = densa(model, 1024, activacion = function_act) model = densa(model, 1024, activacion = function_act) model = densa(model, 512, activacion = function_act) #Capa de salida model = result_layer(model, len(train_y[0])) return model ###Output _____no_output_____ ###Markdown Ejecución ###Code #Transformación para CNN train_x_cnn = data_cnn(train_x) validation_x_cnn = data_cnn(validation_x) test_x_cnn = data_cnn(test_x) model_cnn, history_cnn = run_desing(modelo_cnn(), 'modelo_cnn', batch, epochs, train_x_cnn, train_y, validation_x_cnn, validation_y, 'adam','huber_loss') predict_y_cnn = model_cnn.predict(test_x_cnn) ###Output _____no_output_____ ###Markdown Red Neuronal Recurrente - LSTM Funciones ###Code def layer_lstm(model, neurons = 128, regularizer = l2(0.0001), dropout = 0.2, return_sequences = True): model.add(LSTM(neurons, return_sequences=return_sequences, kernel_regularizer=regularizer, bias_regularizer=regularizer, dropout=dropout, recurrent_dropout=dropout)) model.add(BatchNormalization()) return model ###Output _____no_output_____ ###Markdown Modelo ###Code def model_lstm(): #Creación del modelo secuencial model = Sequential() function_act = 'relu' #Primera Capa convolucional #Capa inicial model = input_layer(model, len(train_x[0]), len(train_x[0][0]) ) model = layer_lstm(model, 128) model = layer_lstm(model, 256) model = layer_lstm(model, 128, return_sequences=False) #Capa de salida model = result_layer(model, len(train_y[0])) return model ###Output _____no_output_____ ###Markdown Ejecución ###Code model_lstm, history_lstm = run_desing(model_lstm(), 'modelo_lstm', batch, epochs, train_x, train_y, validation_x, validation_y, 'adam','huber_loss') predict_y_lstm = model_lstm.predict(test_x) ###Output _____no_output_____ ###Markdown Modelo red neuronal convolucional bidireccional con LSTM Función ###Code def layer_bi_lstm(model, neurons = 128, regularizer = l2(0.0001), dropout = 0.2, return_sequences = True): model.add(Bidirectional(LSTM(neurons, return_sequences=return_sequences, kernel_regularizer=regularizer, bias_regularizer=regularizer, dropout=dropout, recurrent_dropout=dropout))) model.add(BatchNormalization()) return model #Función convolucional def convolutional1D(model, filtros = 256, kernel_x = 3, stride = 1, activacion = 'relu', regularizador = l2(0.001), dropout = 0.2): #Capa convolucional model.add(Conv1D(filtros, kernel_size=kernel_x, activation=activacion,padding='same', kernel_regularizer=regularizador, bias_regularizer=regularizador)) model.add(Dropout(dropout)) return model #Función de pooling def pooling1D(model, pooling_x = 3, dropout = 0.2): #Capa de polling model.add(MaxPooling1D(pooling_x)) model.add(Dropout(dropout)) model.add(BatchNormalization()) return model ###Output _____no_output_____ ###Markdown Modelo ###Code def model_bi_lstm(): #Creación del modelo secuencial model = Sequential() function_act = 'relu' #Primera Capa convolucional #Capa inicial model = input_layer(model, len(train_x[0]),len(train_x[0][0])) model = convolutional1D(model, 64, activacion = function_act) model = pooling1D(model, 2, dropout = 0) model = layer_bi_lstm(model, 128) model = layer_bi_lstm(model, 128, return_sequences=False) model = densa(model, 64, activacion = function_act) #Capa de salida model = result_layer(model, len(train_y[0])) return model ###Output _____no_output_____ ###Markdown Ejecución ###Code model_bi_lstm, history_bi_lstm = run_desing(model_bi_lstm(), 'modelo_bilstm_1c_1d', batch, epochs, train_x, train_y, validation_x, validation_y, 'adam','huber_loss') predict_y_bi_lstm = model_bi_lstm.predict(test_x) ###Output _____no_output_____ ###Markdown Árbol extremadamente aleatorio Funciones ###Code def ajustar_entrada(original): return np.reshape(original, (original.shape[0], original.shape[1]*original.shape[2])) ###Output _____no_output_____ ###Markdown Ejecución ###Code model_extra = ExtraTreesRegressor(n_estimators=350, random_state=0) history_extra = model_extra.fit(ajustar_entrada(train_x), train_y) dump(model_extra, '/content/drive/My Drive/Colab Notebooks/TFM/IA/model_extra.joblib.z') predict_y_extra = model_extra.predict(ajustar_entrada(test_x)) ###Output _____no_output_____ ###Markdown Metrica ###Code mean_squared_error(test_y,predict_y_extra) mean_absolute_error(test_y,predict_y_extra) r2_score(test_y,predict_y_extra) ###Output _____no_output_____ ###Markdown Comparación Funciones ###Code def all_metrics (test_y, predict_y, name): mse = mean_squared_error(test_y,predict_y) print('Para la técnica ',name, ' da un error cuadratico medio de ', mse) rmse = math.sqrt(mse) print('Para la técnica ',name, ' da una raíz del error cuadrático medio de ', mse) mae = mean_absolute_error(test_y,predict_y) print('Para la técnica ',name, ' da un error absoluto medio de ', mae) r2 = r2_score(test_y,predict_y) print('Para la técnica ',name, ' da un r cuadrado de ', r2*100, ' %') return [name, mse, rmse, mae, r2] #Ayuda a plotear una metrica para varias redes # Entrada #Tensor de histories #Tensor de nombres #Nombre de la metrica # Salida #No da salida # Resultado #Gráfico Ploteado def plot_compare(histories, names, metric): #realizamos nobres para traer valores train_metric = metric test_metric = 'val_'+metric #Colocamos el tituo plt.title(metric) #Recorremos todas las historias for history in histories: color = np.random.rand(3,) plt.plot(history.history[train_metric], c=color) plt.plot(history.history[test_metric], 'r--', c=color) plt.ylabel(metric) plt.xlabel('Epoch') legend = [] for name in names: legend.append('Entrenamiento ' + name) legend.append('Evaluación ' + name) plt.legend(legend, loc='lower right') plt.figure(figsize=(20,11)) plt.show() def load_models(path, test_x): model = load_model('/content/drive/My Drive/Colab Notebooks/TFM/IA/'+path+'.h5', {"coeff_determination":coeff_determination}) predict_y = model.predict(test_x) return model, predict_y ###Output _____no_output_____ ###Markdown Funciones para guardar los modelos ###Code def save_model(model,history,coeff_determination,name): dump([model,history,coeff_determination], '/content/drive/My Drive/Colab Notebooks/TFM/IA/'+name+'.joblib.z') #save_model(model_dnn,history_dnn,coeff_determination,'model_dnn') #save_model(model_cnn,history_cnn,coeff_determination,'model_cnn') #save_model(model_lstm,history_lstm,coeff_determination,'model_lstm') #save_model(model_bi_lstm,history_bi_lstm,coeff_determination,'model_bi_lstm') ###Output _____no_output_____ ###Markdown Funciones para cargar nuevamente los modelos ###Code #model_dnn, predict_y_dnn = load_models('modelo_dnn_pruebas',test_x) #model_cnn, predict_y_cnn = load_models('modelo_cnn_pruebas',test_x_cnn) #model_lstm, predict_y_lstm = load_models('modelo_lstm_pruebas',test_x) #model_lstm = model_lstm() #model_lstm = load_model_weights(model_lstm,'modelo_lstm_pruebas') datos = load('/content/drive/My Drive/Colab Notebooks/TFM/IA/modelo_lstm_pruebas.joblib.z') #model_bi_lstm, predict_bi_lstm = load_models('modelo_bilstm_pruebas',test_x) #model_extra = load('/content/drive/My Drive/Colab Notebooks/TFM/IA/model_extra_pruebas.joblib.z') ###Output _____no_output_____ ###Markdown Metricas establecidas ###Code metrics_dnn = all_metrics(test_y, predict_y_dnn, 'Red Neuronal Profunda') metrics_cnn = all_metrics(test_y, predict_y_cnn, 'Red Neuronal Convolucional') metrics_lstm = all_metrics(test_y, predict_y_lstm, 'Red Neuronal LSTM') metrics_cblstmd = all_metrics(test_y, predict_y_bi_lstm, 'Convolucional Bi-Direccional LSTM Profunda') metrics_extra = all_metrics(test_y, predict_y_extra, 'Árboles extremadamente aleatorios') df_metrics = pd.DataFrame(columns=['Nombre','MSE','RMSE','MAE','R2']) df_metrics.loc[df_metrics.shape[0]] = metrics_dnn df_metrics.loc[df_metrics.shape[0]] = metrics_cnn df_metrics.loc[df_metrics.shape[0]] = metrics_lstm df_metrics.loc[df_metrics.shape[0]] = metrics_cblstmd df_metrics.loc[df_metrics.shape[0]] = metrics_extra df_metrics.set_index('Nombre', inplace=True) df_metrics ###Output _____no_output_____ ###Markdown Gráficas comparativas DNN vs CNN vs LSTM ###Code #Gráficas comparativas de los diferentes resultados histories = [history_dnn, history_lstm, history_bi_lstm, history_cnn] names = ['Profunda', 'Recurrente LSTM', 'NNCRLSTMD', 'Convolucional',] #Loss Function plot_compare(histories, names, 'loss') #Error medio absoluto plot_compare(histories, names, 'mean_absolute_error') #Error cuadratico medio plot_compare(histories, names, 'mean_squared_error') #Coeff plot_compare(histories, names, 'coeff_determination') ###Output _____no_output_____
4. bag-of-words-base-model.ipynb
###Markdown Bag of words base model for text generationThis base model would try to predict next word in the text, treating text as a bag of words.Based on amazon food reviews, base model should be able to reproduce (at least, get close to) those reviews with sklearn algorithms.This notebook is based upon the previous one, "Sequential base model for text generation", and will skip some exploration and analysis. Load data ###Code import warnings warnings.filterwarnings('ignore') import pandas as pd from tqdm import tqdm from src.text_preprocessor import TextPreprocessor tqdm.pandas() reviews_df = pd.read_csv('../amazon-food-reviews.csv') reviews_df.columns = [col_name.lower() for col_name in reviews_df.columns] reviews_df = reviews_df[['text']] reviews_df = reviews_df.drop_duplicates(subset=['text']) reviews_df = reviews_df.dropna() text_preprocessor = TextPreprocessor(lemmatization=False) reviews_df['text'] = reviews_df['text'].progress_apply(text_preprocessor.process) reviews_df['wc'] = reviews_df['text'].progress_apply(lambda x: len([word for word in x.split(' ') if word != ''])) from sklearn.ensemble import IsolationForest wc_isolation_forest = IsolationForest(contamination=0.2) wc_vector = reviews_df['wc'].to_numpy().reshape((-1, 1)) print('Fitting...') wc_isolation_forest.fit(wc_vector) print('Predicting...') outlier_mask = wc_isolation_forest.predict(wc_vector) outlier_idx = outlier_mask == -1 filtered_reviews_df = reviews_df[~outlier_idx] import matplotlib.pyplot as plt %matplotlib inline _ = filtered_reviews_df['wc'].hist(bins=63, figsize=(16,8)) ###Output [nltk_data] Downloading package stopwords to [nltk_data] C:\Users\Dmitry\AppData\Roaming\nltk_data... [nltk_data] Package stopwords is already up-to-date! 100%|██████████| 393579/393579 [00:24<00:00, 15875.78it/s] 100%|██████████| 393579/393579 [00:02<00:00, 171567.03it/s] ###Markdown Prepare data for training ###Code # Debug only! max_word_count_forced = 41 filtered_reviews_df = filtered_reviews_df[filtered_reviews_df['wc'] < max_word_count_forced] _ = filtered_reviews_df['wc'].hist(bins=30, figsize=(16,8)) from src.embedding_manager import EmbeddingManager import numpy as np em = EmbeddingManager(path='../fasttext.wiki-news-cleaned-290d.vec') windowed_data = [] for _, row in tqdm(filtered_reviews_df.iterrows()): words = row['text'].split() for i in range(1, len(words)): text = ' '.join(words[:i]) target_word = words[i] windowed_data.append({ 'text': text, 'target_word': target_word }) windowed_df = pd.DataFrame(windowed_data) def get_output(target_word): if target_word in text_preprocessor.stopwords: return -1 if target_word not in em.embeddings_dict: return -1 return target_word windowed_df['output'] = windowed_df['target_word'].progress_apply(get_output) windowed_df = windowed_df[windowed_df['output'] != -1] windowed_df['input'] = windowed_df['text'].progress_apply(lambda row: ' '.join([word for word in row.split() if word in em.embeddings_dict])) windowed_df['wc'] = windowed_df['text'].progress_apply(lambda x: len([word for word in x.split() if word is not ''])) windowed_df['input_wc'] = windowed_df['input'].progress_apply(lambda x: len([word for word in x.split() if word is not ''])) # check how many entries lost more than 20% of words due to vocabulary limitations: insufficient_vocabulary_df = windowed_df[(windowed_df['input_wc'] * 1.25 < windowed_df['wc'])] clean_seq_reviews_df = windowed_df[~windowed_df.index.isin(insufficient_vocabulary_df.index)] clean_seq_reviews_df.describe() ###Output 234083it [00:15, 15510.64it/s] 100%|██████████| 5186260/5186260 [00:05<00:00, 962620.66it/s] 100%|██████████| 4757987/4757987 [00:17<00:00, 277387.10it/s] 100%|██████████| 4757987/4757987 [00:11<00:00, 420266.76it/s] 100%|██████████| 4757987/4757987 [00:11<00:00, 426560.91it/s] ###Markdown Balancing outputs ###Code len(clean_seq_reviews_df['output'].unique()) plt.figure(figsize=(16,8)) _ = plt.hist(clean_seq_reviews_df['output'].value_counts().values, bins=100) target_words_df = clean_seq_reviews_df['target_word'].value_counts().to_frame() target_words_df.describe() max_examples = 5 balanced_df = clean_seq_reviews_df.groupby('output', group_keys=False).progress_apply(lambda group: group.sample(n=min(len(group.index), max_examples))) balanced_df.describe() _ = balanced_df['output'].value_counts().hist(bins=5) ###Output _____no_output_____ ###Markdown Encode data with embeddings ###Code def average_vectorizations(row): vectors = [] for word in row.split(): if word in em.embeddings_dict: vectors.append(em.embeddings_dict[word]) result_vector = np.mean(vectors, axis=0) return result_vector balanced_df['input_vector'] = balanced_df['input'].progress_apply(average_vectorizations) balanced_df['output_vector'] = balanced_df['output'].progress_apply(average_vectorizations) ###Output 100%|██████████| 128580/128580 [00:04<00:00, 29247.49it/s] 100%|██████████| 128580/128580 [00:02<00:00, 50070.67it/s] ###Markdown Train model ###Code def get_arrayed_data(df_set): setX = np.stack(df_set['input_vector'].values, axis=0) setY = np.stack(df_set['output_vector'].values, axis=0) return (setX, setY) trainX, trainY = get_arrayed_data(balanced_df.sample(n=4000)) print(trainX.shape) print(trainY.shape) import time from sklearn.svm import SVR from sklearn.multioutput import MultiOutputRegressor regressor = SVR() mo_regressor = MultiOutputRegressor(estimator=regressor) start_training = time.time() mo_regressor.fit(trainX, trainY) end_training = time.time() print(f'Training finished, elapsed seconds: {end_training-start_training}') ###Output Training finished, elapsed seconds: 243.47743606567383 ###Markdown Producing result ###Code def generate_sequence(initial_sentence, max_generated_length=50): sentence = initial_sentence.lower() previous_word = [] for i in tqdm(range(max_generated_length)): processed_sentence = ' '.join([word for word in sentence.split() if word in em.embeddings_dict]) if len(processed_sentence.split()) >= max_generated_length: return sentence vectorized_sentence = average_vectorizations(processed_sentence) input_vector = vectorized_sentence.reshape((-1, vectorized_sentence.shape[0])) result = mo_regressor.predict(input_vector).reshape(em.shape) word_scores = em.get_words(result, k=4) for word_score in word_scores: word = word_score[0] if word not in previous_word[-3:]: new_word = word previous_word.append(word) break sentence += ' ' + new_word generate_sequence('I have bought') generate_sequence('really this was an amazing stuff for me to find') generate_sequence('i') ###Output 98%|█████████▊| 49/50 [00:22<00:00, 2.20it/s]
dmu1/dmu1_ml_COSMOS/2_Merging.ipynb
###Markdown COSMOS master catalogueThis notebook presents the merge of the various pristine catalogues to produce HELP mater catalogue on COSMOS. ###Code from herschelhelp_internal import git_version print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version())) import datetime print("This notebook was executed on: \n{}".format(datetime.datetime.now())) %matplotlib inline #%config InlineBackend.figure_format = 'svg' import matplotlib.pyplot as plt plt.rc('figure', figsize=(10, 6)) from collections import OrderedDict import os import time from astropy import units as u from astropy.coordinates import SkyCoord from astropy.table import Column, Table import numpy as np from pymoc import MOC from herschelhelp_internal.masterlist import merge_catalogues, nb_merge_dist_plot, specz_merge from herschelhelp_internal.utils import coords_to_hpidx, ebv, gen_help_id, inMoc TMP_DIR = os.environ.get('TMP_DIR', "./data_tmp") OUT_DIR = os.environ.get('OUT_DIR', "./data") SUFFIX = os.environ.get('SUFFIX', time.strftime("_%Y%m%d")) try: os.makedirs(OUT_DIR) except FileExistsError: pass ###Output _____no_output_____ ###Markdown I - Reading the prepared pristine catalogues ###Code #COSMOS was originally run with the official LAigle et al 2015 catalogue #so all those ids and photometry values must be preserved cosmos2015 = Table.read("{}/COSMOS2015_HELP.fits".format(TMP_DIR)) candels = Table.read("{}/CANDELS.fits".format(TMP_DIR)) cfhtls = Table.read("{}/CFHTLS.fits".format(TMP_DIR)) decals = Table.read("{}/DECaLS.fits".format(TMP_DIR)) hsc_deep = Table.read("{}/HSC-DEEP.fits".format(TMP_DIR)) hsc_udeep = Table.read("{}/HSC-UDEEP.fits".format(TMP_DIR)) kids = Table.read("{}/KIDS.fits".format(TMP_DIR)) ps1 = Table.read("{}/PS1.fits".format(TMP_DIR)) las = Table.read("{}/UKIDSS-LAS.fits".format(TMP_DIR)) wirds = Table.read("{}/CFHT-WIRDS.fits".format(TMP_DIR)) ###Output _____no_output_____ ###Markdown II - Merging tablesWe first merge the optical catalogues and then add the infrared ones: CANDELS, CFHTLS, DECaLS, HSC, KIDS, PanSTARRS, UKIDSS-LAS, and CFHT-WIRDS.At every step, we look at the distribution of the distances to the nearest source in the merged catalogue to determine the best crossmatching radius. COSMOS 2015 ###Code master_catalogue = cosmos2015 master_catalogue['cosmos_ra'].name = 'ra' master_catalogue['cosmos_dec'].name = 'dec' ###Output _____no_output_____ ###Markdown Add CANDELS ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(candels['candels_ra'], candels['candels_dec']) ) # Given the graph above, we use 0.8 arc-second radius master_catalogue = merge_catalogues(master_catalogue, candels, "candels_ra", "candels_dec", radius=0.8*u.arcsec) ###Output _____no_output_____ ###Markdown Add CFHTLS ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(cfhtls['cfhtls_ra'], cfhtls['cfhtls_dec']) ) # Given the graph above, we use 0.8 arc-second radius master_catalogue = merge_catalogues(master_catalogue, cfhtls, "cfhtls_ra", "cfhtls_dec", radius=0.8*u.arcsec) ###Output _____no_output_____ ###Markdown Add DECaLS ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(decals['decals_ra'], decals['decals_dec']) ) # Given the graph above, we use 0.8 arc-second radius master_catalogue = merge_catalogues(master_catalogue, decals, "decals_ra", "decals_dec", radius=0.8*u.arcsec) ###Output _____no_output_____ ###Markdown Add HSC-UDEEP ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(hsc_udeep['hsc-udeep_ra'], hsc_udeep['hsc-udeep_dec']) ) # Given the graph above, we use 0.8 arc-second radius master_catalogue = merge_catalogues(master_catalogue, hsc_udeep, "hsc-udeep_ra", "hsc-udeep_dec", radius=0.8*u.arcsec) ###Output _____no_output_____ ###Markdown Add HSC-DEEP ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(hsc_deep['hsc-deep_ra'], hsc_deep['hsc-deep_dec']) ) # Given the graph above, we use 0.8 arc-second radius master_catalogue = merge_catalogues(master_catalogue, hsc_deep, "hsc-deep_ra", "hsc-deep_dec", radius=0.8*u.arcsec) ###Output _____no_output_____ ###Markdown Add KIDS ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(kids['kids_ra'], kids['kids_dec']) ) # Given the graph above, we use 0.8 arc-second radius master_catalogue = merge_catalogues(master_catalogue, kids, "kids_ra", "kids_dec", radius=0.8*u.arcsec) ###Output _____no_output_____ ###Markdown Add UKIDSS LAS ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(las['las_ra'], las['las_dec']) ) # Given the graph above, we use 0.8 arc-second radius master_catalogue = merge_catalogues(master_catalogue, las, "las_ra", "las_dec", radius=0.8*u.arcsec) ###Output _____no_output_____ ###Markdown Add CFHT-WIRDS ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(wirds['wirds_ra'], wirds['wirds_dec']) ) #Given the graph above, we use 1 arc-second radius master_catalogue = merge_catalogues(master_catalogue, wirds, "wirds_ra", "wirds_dec", radius=1.*u.arcsec) ###Output _____no_output_____ ###Markdown Add PanSTARRS ###Code nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(ps1['ps1_ra'], ps1['ps1_dec']) ) #Given the graph above, we use 1 arc-second radius master_catalogue = merge_catalogues(master_catalogue, ps1, "ps1_ra", "ps1_dec", radius=1.*u.arcsec) ###Output _____no_output_____ ###Markdown CleaningWhen we merge the catalogues, astropy masks the non-existent values (e.g. when a row comes only from a catalogue and has no counterparts in the other, the columns from the latest are masked for that row). We indicate to use NaN for masked values for floats columns, False for flag columns and -1 for ID columns. ###Code for col in master_catalogue.colnames: #print(col) if (col.startswith("m_") or col.startswith("merr_") or col.startswith("f_") or col.startswith("ferr_") or "stellarity" in col): master_catalogue[col] = master_catalogue[col].astype(float) master_catalogue[col].fill_value = np.nan elif "flag" in col: master_catalogue[col].fill_value = 0 elif "id" in col: master_catalogue[col].fill_value = -1 master_catalogue = master_catalogue.filled() master_catalogue[:10].show_in_notebook() ###Output _____no_output_____ ###Markdown III - Merging flags and stellarityEach pristine catalogue contains a flag indicating if the source was associated to a another nearby source that was removed during the cleaning process. We merge these flags in a single one. ###Code flag_cleaned_columns = [column for column in master_catalogue.colnames if 'flag_cleaned' in column] flag_column = np.zeros(len(master_catalogue), dtype=bool) for column in flag_cleaned_columns: flag_column |= master_catalogue[column] master_catalogue.add_column(Column(data=flag_column, name="flag_cleaned")) master_catalogue.remove_columns(flag_cleaned_columns) ###Output _____no_output_____ ###Markdown Wirds was created with a merge so contains a flag to be merged with the merg flag produced here ###Code # master_catalogue['flag_merged'] |= master_catalogue['wirds_flag_merged'] # master_catalogue.remove_columns('wirds_flag_merged') ###Output _____no_output_____ ###Markdown Each pristine catalogue contains a flag indicating the probability of a source being a Gaia object (0: not a Gaia object, 1: possibly, 2: probably, 3: definitely). We merge these flags taking the highest value. ###Code flag_gaia_columns = [column for column in master_catalogue.colnames if 'flag_gaia' in column] master_catalogue.add_column(Column( data=np.max([master_catalogue[column] for column in flag_gaia_columns], axis=0), name="flag_gaia" )) master_catalogue.remove_columns(flag_gaia_columns) ###Output _____no_output_____ ###Markdown Each prisitine catalogue may contain one or several stellarity columns indicating the probability (0 to 1) of each source being a star. We merge these columns taking the highest value. ###Code stellarity_columns = [column for column in master_catalogue.colnames if 'stellarity' in column] print(", ".join(stellarity_columns)) # We create an masked array with all the stellarities and get the maximum value, as well as its # origin. Some sources may not have an associated stellarity. stellarity_array = np.array([master_catalogue[column] for column in stellarity_columns]) stellarity_array = np.ma.masked_array(stellarity_array, np.isnan(stellarity_array)) max_stellarity = np.max(stellarity_array, axis=0) max_stellarity.fill_value = np.nan no_stellarity_mask = max_stellarity.mask master_catalogue.add_column(Column(data=max_stellarity.filled(), name="stellarity")) stellarity_origin = np.full(len(master_catalogue), "NO_INFORMATION", dtype="S20") stellarity_origin[~no_stellarity_mask] = np.array(stellarity_columns)[np.argmax(stellarity_array, axis=0)[~no_stellarity_mask]] master_catalogue.add_column(Column(data=stellarity_origin, name="stellarity_origin")) master_catalogue.remove_columns(stellarity_columns) ###Output _____no_output_____ ###Markdown IV - Adding E(B-V) column ###Code master_catalogue.add_column( ebv(master_catalogue['ra'], master_catalogue['dec']) ) ###Output _____no_output_____ ###Markdown V a - Adding HELP unique identifiers and field columnsFirst we make a help_id column using the old values where we have them ###Code #master_catalogue.add_column(Column(gen_help_id(master_catalogue['ra'], master_catalogue['dec']), # name="help_id")) #Use HELP ids from original cat to make sure they are identical master_catalogue['help_id'] = master_catalogue['help_id'].astype('S27') master_catalogue.add_column(Column(gen_help_id(master_catalogue['ra'], master_catalogue['dec']), name="help_id_temp")) mask = (master_catalogue['help_id'] == '-1') | (master_catalogue['help_id'] == '') master_catalogue['help_id'][mask] = master_catalogue['help_id_temp'][mask] master_catalogue.remove_column('help_id_temp') master_catalogue.add_column(Column(np.full(len(master_catalogue), "COSMOS", dtype='<U18'), name="field")) # Check that the HELP Ids are unique if len(master_catalogue) != len(np.unique(master_catalogue['help_id'])): print("The HELP IDs are not unique!!!") else: print("OK!") ###Output OK! ###Markdown We now make a second help_id column using only the new naming convention but keeping the old column to permit joining in the old products ###Code master_catalogue['help_id'].name = 'old_help_id' master_catalogue.add_column(Column(gen_help_id(master_catalogue['ra'], master_catalogue['dec']), name="help_id")) # Check that the HELP Ids are unique if len(master_catalogue) != len(np.unique(master_catalogue['help_id'])): print("The HELP IDs are not unique!!!") else: print("OK!") ###Output OK! ###Markdown V b - Adding spec-z ###Code specz = Table.read("../../dmu23/dmu23_COSMOS/data/COSMOS-specz-v2.5-public_helpcoverage_helpid_20160512.fits") nb_merge_dist_plot( SkyCoord(master_catalogue['ra'], master_catalogue['dec']), SkyCoord(specz['ra'] * u.deg, specz['dec'] * u.deg) ) master_catalogue = specz_merge(master_catalogue, specz, radius=1. * u.arcsec) ###Output _____no_output_____ ###Markdown VI - Choosing between multiple values for the same filter VI.a HSC-DEEP and HSC-UDEEP and COSMOSOn COSMOS2015 we have early HSC y band photometry. To ensure values are the same as for the original run, we take fluxes in this order: COSMOS, HSC-DEEP, HSC-UDEEP. ###Code suprime_origin = Table() suprime_origin.add_column(master_catalogue['help_id']) suprime_stats = Table() suprime_stats.add_column(Column(data=['g','r','i','z','y', 'n921'], name="Band")) for col in ["HSC-UDEEP", "HSC-DEEP", "COSMOS2015"]: suprime_stats.add_column(Column(data=np.full(6, 0), name="{}".format(col))) suprime_stats.add_column(Column(data=np.full(6, 0), name="use {}".format(col))) suprime_stats.add_column(Column(data=np.full(6, 0), name="{} ap".format(col))) suprime_stats.add_column(Column(data=np.full(6, 0), name="use {} ap".format(col))) suprime_bands = ['g','r','i','z','y', 'n921'] for band in suprime_bands: # Suprime total flux has_hsc_udeep = ~np.isnan(master_catalogue['f_hsc-udeep_' + band]) has_hsc_deep = ~np.isnan(master_catalogue['f_hsc-deep_' + band]) if band == 'y': has_cosmos = ~np.isnan(master_catalogue['f_cosmos-suprime_y']) elif band != 'y': has_cosmos = np.full(len(master_catalogue), False, dtype=bool) use_cosmos = has_cosmos use_hsc_udeep = has_hsc_udeep & ~has_cosmos use_hsc_deep = has_hsc_deep & ~has_hsc_udeep & ~has_cosmos f_suprime = np.full(len(master_catalogue), np.nan) if band == 'y': f_suprime[use_cosmos] = master_catalogue['f_cosmos-suprime_y'][use_cosmos] f_suprime[use_hsc_udeep] = master_catalogue['f_hsc-udeep_' + band][use_hsc_udeep] f_suprime[use_hsc_deep] = master_catalogue['f_hsc-deep_' + band][use_hsc_deep] ferr_suprime = np.full(len(master_catalogue), np.nan) if band == 'y': ferr_suprime[use_cosmos] = master_catalogue['ferr_cosmos-suprime_y'][use_cosmos] ferr_suprime[use_hsc_udeep] = master_catalogue['ferr_hsc-udeep_' + band][use_hsc_udeep] ferr_suprime[use_hsc_deep] = master_catalogue['ferr_hsc-deep_' + band][use_hsc_deep] m_suprime = np.full(len(master_catalogue), np.nan) if band == 'y': m_suprime[use_cosmos] = master_catalogue['m_cosmos-suprime_y'][use_cosmos] m_suprime[use_hsc_udeep] = master_catalogue['m_hsc-udeep_' + band][use_hsc_udeep] m_suprime[use_hsc_deep] = master_catalogue['m_hsc-deep_' + band][use_hsc_deep] merr_suprime = np.full(len(master_catalogue), np.nan) if band == 'y': merr_suprime[use_cosmos] = master_catalogue['merr_cosmos-suprime_y'][use_cosmos] merr_suprime[use_hsc_udeep] = master_catalogue['merr_hsc-udeep_' + band][use_hsc_udeep] merr_suprime[use_hsc_deep] = master_catalogue['merr_hsc-deep_' + band][use_hsc_deep] flag_suprime = np.full(len(master_catalogue), False, dtype=bool) if band == 'y': flag_suprime[use_cosmos] = master_catalogue['flag_cosmos-suprime_y'][use_cosmos] flag_suprime[use_hsc_udeep] = master_catalogue['flag_hsc-udeep_' + band][use_hsc_udeep] flag_suprime[use_hsc_deep] = master_catalogue['flag_hsc-deep_' + band][use_hsc_deep] master_catalogue.add_column(Column(data=f_suprime, name="f_suprime_" + band)) master_catalogue.add_column(Column(data=ferr_suprime, name="ferr_suprime_" + band)) master_catalogue.add_column(Column(data=m_suprime, name="m_suprime_" + band)) master_catalogue.add_column(Column(data=merr_suprime, name="merr_suprime_" + band)) master_catalogue.add_column(Column(data=flag_suprime, name="flag_suprime_" + band)) old_hsc_udeep_columns = ['f_hsc-udeep_' + band, 'ferr_hsc-udeep_' + band, 'm_hsc-udeep_' + band, 'merr_hsc-udeep_' + band, 'flag_hsc-udeep_' + band] old_hsc_deep_columns = ['f_hsc-deep_' + band, 'ferr_hsc-deep_' + band, 'm_hsc-deep_' + band, 'merr_hsc-deep_' + band, 'flag_hsc-deep_' + band] old_cosmos_columns = ['f_cosmos-suprime_' + band, 'ferr_cosmos-suprime_' + band, 'm_cosmos-suprime_' + band, 'merr_cosmos-suprime_' + band, 'flag_cosmos-suprime_' + band] old_columns = old_hsc_udeep_columns + old_hsc_deep_columns if band == 'y': old_columns += old_cosmos_columns master_catalogue.remove_columns(old_columns) origin = np.full(len(master_catalogue), ' ', dtype='<U5') origin[use_hsc_udeep] = "HSC-UDEEP" origin[use_hsc_deep] = "HSC-DEEP" origin[use_cosmos] = "COSMOS2015" suprime_origin.add_column(Column(data=origin, name= 'f_suprime_' + band )) # Suprime aperture flux has_ap_hsc_udeep = ~np.isnan(master_catalogue['f_ap_hsc-udeep_' + band]) has_ap_hsc_deep = ~np.isnan(master_catalogue['f_ap_hsc-deep_' + band]) if band == 'y': has_ap_cosmos = ~np.isnan(master_catalogue['f_ap_cosmos-suprime_y']) elif band != 'y': has_ap_cosmos = np.full(len(master_catalogue), False, dtype=bool) use_ap_cosmos = has_ap_cosmos use_ap_hsc_udeep = has_ap_hsc_udeep & ~has_cosmos use_ap_hsc_deep = has_ap_hsc_deep & ~has_ap_hsc_udeep & ~has_cosmos f_ap_suprime = np.full(len(master_catalogue), np.nan) if band == 'y': f_ap_suprime[use_ap_cosmos] = master_catalogue['f_ap_cosmos-suprime_y'][use_ap_cosmos] f_ap_suprime[use_ap_hsc_udeep] = master_catalogue['f_ap_hsc-udeep_' + band][use_ap_hsc_udeep] f_ap_suprime[use_ap_hsc_deep] = master_catalogue['f_ap_hsc-deep_' + band][use_ap_hsc_deep] ferr_ap_suprime = np.full(len(master_catalogue), np.nan) if band == 'y': ferr_ap_suprime[use_ap_cosmos] = master_catalogue['ferr_ap_cosmos-suprime_y'][use_ap_cosmos] ferr_ap_suprime[use_ap_hsc_udeep] = master_catalogue['ferr_ap_hsc-udeep_' + band][use_ap_hsc_udeep] ferr_ap_suprime[use_ap_hsc_deep] = master_catalogue['ferr_ap_hsc-deep_' + band][use_ap_hsc_deep] m_ap_suprime = np.full(len(master_catalogue), np.nan) if band == 'y': m_ap_suprime[use_ap_cosmos] = master_catalogue['m_ap_cosmos-suprime_y'][use_ap_cosmos] m_ap_suprime[use_ap_hsc_udeep] = master_catalogue['m_ap_hsc-udeep_' + band][use_ap_hsc_udeep] m_ap_suprime[use_ap_hsc_deep] = master_catalogue['m_ap_hsc-deep_' + band][use_ap_hsc_deep] merr_ap_suprime = np.full(len(master_catalogue), np.nan) if band == 'y': merr_ap_suprime[use_ap_cosmos] = master_catalogue['merr_ap_cosmos-suprime_y'][use_ap_cosmos] merr_ap_suprime[use_ap_hsc_udeep] = master_catalogue['merr_ap_hsc-udeep_' + band][use_ap_hsc_udeep] merr_ap_suprime[use_ap_hsc_deep] = master_catalogue['merr_ap_hsc-deep_' + band][use_ap_hsc_deep] master_catalogue.add_column(Column(data=f_ap_suprime, name="f_ap_suprime_" + band)) master_catalogue.add_column(Column(data=ferr_ap_suprime, name="ferr_ap_suprime_" + band)) master_catalogue.add_column(Column(data=m_ap_suprime, name="m_ap_suprime_" + band)) master_catalogue.add_column(Column(data=merr_ap_suprime, name="merr_ap_suprime_" + band)) old_ap_hsc_udeep_columns = ['f_ap_hsc-udeep_' + band, 'ferr_ap_hsc-udeep_' + band, 'm_ap_hsc-udeep_' + band, 'merr_ap_hsc-udeep_' + band] old_ap_hsc_deep_columns = ['f_ap_hsc-deep_' + band, 'ferr_ap_hsc-deep_' + band, 'm_ap_hsc-deep_' + band, 'merr_ap_hsc-deep_' + band] old_ap_cosmos_columns = ['f_ap_cosmos-suprime_' + band, 'ferr_ap_cosmos-suprime_' + band, 'm_ap_cosmos-suprime_' + band, 'merr_ap_cosmos-suprime_' + band] old_ap_columns = old_ap_hsc_udeep_columns + old_ap_hsc_deep_columns if band == 'y': old_ap_columns += old_ap_cosmos_columns master_catalogue.remove_columns(old_ap_columns) origin_ap = np.full(len(master_catalogue), ' ', dtype='<U5') origin_ap[use_ap_hsc_udeep] = "HSC-UDEEP" origin_ap[use_ap_hsc_deep] = "HSC-DEEP" origin_ap[use_ap_cosmos] = "COSMOS2015" suprime_origin.add_column(Column(data=origin_ap, name= 'f_ap_suprime_' + band )) suprime_stats['HSC-UDEEP'][suprime_stats['Band'] == band] = np.sum(has_hsc_udeep) suprime_stats['HSC-DEEP'][suprime_stats['Band'] == band] = np.sum(has_hsc_deep) suprime_stats['COSMOS2015'][suprime_stats['Band'] == band] = np.sum(has_cosmos) suprime_stats['use HSC-UDEEP'][suprime_stats['Band'] == band] = np.sum(use_hsc_udeep) suprime_stats['use HSC-DEEP'][suprime_stats['Band'] == band] = np.sum(use_hsc_deep) suprime_stats['use COSMOS2015'][suprime_stats['Band'] == band] = np.sum(use_cosmos) suprime_stats['HSC-UDEEP ap'][suprime_stats['Band'] == band] = np.sum(has_ap_hsc_udeep) suprime_stats['HSC-DEEP ap'][suprime_stats['Band'] == band] = np.sum(has_ap_hsc_deep) suprime_stats['COSMOS2015 ap'][suprime_stats['Band'] == band] = np.sum(has_ap_cosmos) suprime_stats['use HSC-UDEEP ap'][suprime_stats['Band'] == band] = np.sum(use_ap_hsc_udeep) suprime_stats['use HSC-DEEP ap'][suprime_stats['Band'] == band] = np.sum(use_ap_hsc_deep) suprime_stats['use COSMOS2015 ap'][suprime_stats['Band'] == band] = np.sum(use_ap_cosmos) suprime_stats.show_in_notebook() suprime_origin.write("{}/cosmos_suprime_fluxes_origins{}.fits".format(OUT_DIR, SUFFIX), overwrite=True) ###Output _____no_output_____ ###Markdown VII.b Megacam COSMOS vs CFHT-WIRDS vs CFHTLSWe take COSMOS over CFHTLS over CFHT-WIRDS ###Code megacam_origin = Table() megacam_origin.add_column(master_catalogue['help_id']) megacam_stats = Table() megacam_stats.add_column(Column(data=['u','g','r','i','z'], name="Band")) for col in ["COSMOS2015", "CFHTLS", "CFHT-WIRDS"]: megacam_stats.add_column(Column(data=np.full(5, 0), name="{}".format(col))) megacam_stats.add_column(Column(data=np.full(5, 0), name="use {}".format(col))) megacam_stats.add_column(Column(data=np.full(5, 0), name="{} ap".format(col))) megacam_stats.add_column(Column(data=np.full(5, 0), name="use {} ap".format(col))) megacam_bands = ['u','g','r','i','z'] for band in megacam_bands: # megacam total flux has_cfhtls = ~np.isnan(master_catalogue['f_megacam_' + band]) has_wirds = ~np.isnan(master_catalogue['f_wirds_' + band]) if band == 'u': has_cosmos = ~np.isnan(master_catalogue['f_cosmos-megacam_' + band]) elif band != 'u': has_cosmos = np.full(len(master_catalogue), False, dtype=bool) use_cosmos = has_cosmos use_cfhtls = has_cfhtls & ~has_cosmos use_wirds = has_wirds & ~has_cfhtls & ~has_cosmos master_catalogue['f_megacam_' + band][use_wirds] = master_catalogue['f_wirds_' + band][use_wirds] master_catalogue['ferr_megacam_' + band][use_wirds] = master_catalogue['ferr_wirds_' + band][use_wirds] master_catalogue['m_megacam_' + band][use_wirds] = master_catalogue['m_wirds_' + band][use_wirds] master_catalogue['merr_megacam_' + band][use_wirds] = master_catalogue['merr_wirds_' + band][use_wirds] master_catalogue['flag_megacam_' + band][use_wirds] = master_catalogue['flag_wirds_' + band][use_wirds] master_catalogue.remove_columns(['f_wirds_' + band, 'ferr_wirds_' + band, 'm_wirds_' + band, 'merr_wirds_' + band, 'flag_wirds_' + band]) if band == 'u': master_catalogue['f_megacam_' + band][use_cosmos] = master_catalogue['f_cosmos-megacam_' + band][use_cosmos] master_catalogue['ferr_megacam_' + band][use_cosmos] = master_catalogue['ferr_cosmos-megacam_' + band][use_cosmos] master_catalogue['m_megacam_' + band][use_cosmos] = master_catalogue['m_cosmos-megacam_' + band][use_cosmos] master_catalogue['merr_megacam_' + band][use_cosmos] = master_catalogue['merr_cosmos-megacam_' + band][use_cosmos] master_catalogue['flag_megacam_' + band][use_cosmos] = master_catalogue['flag_cosmos-megacam_' + band][use_cosmos] master_catalogue.remove_columns(['f_cosmos-megacam_' + band, 'ferr_cosmos-megacam_' + band, 'm_cosmos-megacam_' + band, 'merr_cosmos-megacam_' + band, 'flag_cosmos-megacam_' + band]) origin = np.full(len(master_catalogue), ' ', dtype='<U5') origin[use_cfhtls] = "CFHTLS" origin[use_wirds] = "CFHT-WIRDS" origin[use_cosmos] = "COSMOS2015" megacam_origin.add_column(Column(data=origin, name= 'f_megacam_' + band )) # Megacam aperture flux has_ap_cfhtls = ~np.isnan(master_catalogue['f_ap_megacam_' + band]) has_ap_wirds = ~np.isnan(master_catalogue['f_ap_wirds_' + band]) if band == 'u': has_ap_cosmos = ~np.isnan(master_catalogue['f_ap_cosmos-megacam_' + band]) elif band != 'u': has_ap_cosmos = np.full(len(master_catalogue), False, dtype=bool) use_ap_cosmos = has_ap_cosmos use_ap_cfhtls = has_ap_cfhtls & ~has_ap_cosmos use_ap_wirds = has_ap_wirds & ~has_ap_cfhtls & ~has_ap_cosmos master_catalogue['f_ap_megacam_' + band][use_ap_wirds] = master_catalogue['f_ap_wirds_' + band][use_ap_wirds] master_catalogue['ferr_ap_megacam_' + band][use_ap_wirds] = master_catalogue['ferr_ap_wirds_' + band][use_ap_wirds] master_catalogue['m_ap_megacam_' + band][use_ap_wirds] = master_catalogue['m_ap_wirds_' + band][use_ap_wirds] master_catalogue['merr_ap_megacam_' + band][use_ap_wirds] = master_catalogue['merr_ap_wirds_' + band][use_ap_wirds] master_catalogue.remove_columns(['f_ap_wirds_' + band, 'ferr_ap_wirds_' + band, 'm_ap_wirds_' + band, 'merr_ap_wirds_' + band]) if band == 'u': master_catalogue['f_ap_megacam_' + band][use_ap_cosmos] = master_catalogue['f_ap_cosmos-megacam_' + band][use_ap_cosmos] master_catalogue['ferr_ap_megacam_' + band][use_ap_cosmos] = master_catalogue['ferr_ap_cosmos-megacam_' + band][use_ap_cosmos] master_catalogue['m_ap_megacam_' + band][use_ap_cosmos] = master_catalogue['m_ap_cosmos-megacam_' + band][use_ap_cosmos] master_catalogue['merr_ap_megacam_' + band][use_ap_cosmos] = master_catalogue['merr_ap_cosmos-megacam_' + band][use_ap_cosmos] master_catalogue.remove_columns(['f_ap_cosmos-megacam_' + band, 'ferr_ap_cosmos-megacam_' + band, 'm_ap_cosmos-megacam_' + band, 'merr_ap_cosmos-megacam_' + band]) origin_ap = np.full(len(master_catalogue), ' ', dtype='<U5') origin_ap[use_ap_cfhtls] = "CFHTLS" origin_ap[use_ap_wirds] = "CFHT-WIRDS" origin_ap[use_ap_cosmos] = "COSMOS2015" megacam_origin.add_column(Column(data=origin_ap, name= 'f_ap_megacam_' + band )) megacam_stats['CFHTLS'][megacam_stats['Band'] == band] = np.sum(has_cfhtls) megacam_stats['CFHT-WIRDS'][megacam_stats['Band'] == band] = np.sum(has_wirds) megacam_stats['COSMOS2015'][megacam_stats['Band'] == band] = np.sum(has_cosmos) megacam_stats['use CFHTLS'][megacam_stats['Band'] == band] = np.sum(use_cfhtls) megacam_stats['use CFHT-WIRDS'][megacam_stats['Band'] == band] = np.sum(use_wirds) megacam_stats['use COSMOS2015'][megacam_stats['Band'] == band] = np.sum(use_cosmos) megacam_stats['CFHTLS ap'][megacam_stats['Band'] == band] = np.sum(has_ap_cfhtls) megacam_stats['CFHT-WIRDS ap'][megacam_stats['Band'] == band] = np.sum(has_ap_wirds) megacam_stats['COSMOS2015 ap'][megacam_stats['Band'] == band] = np.sum(has_ap_cosmos) megacam_stats['use CFHTLS ap'][megacam_stats['Band'] == band] = np.sum(use_ap_cfhtls) megacam_stats['use CFHT-WIRDS ap'][megacam_stats['Band'] == band] = np.sum(use_ap_wirds) megacam_stats['use COSMOS2015 ap'][megacam_stats['Band'] == band] = np.sum(use_ap_cosmos) megacam_stats.show_in_notebook() megacam_origin.write("{}/cosmos_megacam_fluxes_origins{}.fits".format(OUT_DIR, SUFFIX), overwrite=True) ###Output _____no_output_____ ###Markdown WIRcam COSMOS vs WIRDSWe take COSMOS over WIRDS to ensure values are the same as for the original run ###Code wircam_origin = Table() wircam_origin.add_column(master_catalogue['help_id']) wircam_stats = Table() wircam_stats.add_column(Column(data=['h','ks'], name="Band")) for col in ["CFHT-WIRDS", "COSMOS2015"]: wircam_stats.add_column(Column(data=np.full(2, 0), name="{}".format(col))) wircam_stats.add_column(Column(data=np.full(2, 0), name="use {}".format(col))) wircam_stats.add_column(Column(data=np.full(2, 0), name="{} ap".format(col))) wircam_stats.add_column(Column(data=np.full(2, 0), name="use {} ap".format(col))) wircam_bands = ['h','ks'] for band in wircam_bands: # wircam total flux has_wirds = ~np.isnan(master_catalogue['f_wirds_' + band.rstrip('s')]) has_cosmos = ~np.isnan(master_catalogue['f_cosmos-wircam_' + band]) use_cosmos = has_cosmos use_wirds = has_wirds & ~has_cosmos f_wircam = np.full(len(master_catalogue), np.nan) f_wircam[use_cosmos] = master_catalogue['f_cosmos-wircam_' + band][use_cosmos] f_wircam[use_wirds] = master_catalogue['f_wirds_' + band.rstrip('s').rstrip('s')][use_wirds] ferr_wircam = np.full(len(master_catalogue), np.nan) ferr_wircam[use_cosmos] = master_catalogue['ferr_cosmos-wircam_' + band][use_cosmos] ferr_wircam[use_wirds] = master_catalogue['ferr_wirds_' + band.rstrip('s')][use_wirds] m_wircam = np.full(len(master_catalogue), np.nan) m_wircam[use_cosmos] = master_catalogue['m_cosmos-wircam_' + band][use_cosmos] m_wircam[use_wirds] = master_catalogue['m_wirds_' + band.rstrip('s')][use_wirds] merr_wircam = np.full(len(master_catalogue), np.nan) merr_wircam[use_cosmos] = master_catalogue['merr_cosmos-wircam_' + band][use_cosmos] merr_wircam[use_wirds] = master_catalogue['merr_wirds_' + band.rstrip('s')][use_wirds] flag_wircam = np.full(len(master_catalogue), False, dtype=bool) flag_wircam[use_cosmos] = master_catalogue['flag_cosmos-wircam_' + band][use_cosmos] flag_wircam[use_wirds] = master_catalogue['flag_wirds_' + band.rstrip('s')][use_wirds] master_catalogue.add_column(Column(data=f_wircam, name="f_wircam_" + band)) master_catalogue.add_column(Column(data=ferr_wircam, name="ferr_wircam_" + band)) master_catalogue.add_column(Column(data=m_wircam, name="m_wircam_" + band)) master_catalogue.add_column(Column(data=merr_wircam, name="merr_wircam_" + band)) master_catalogue.add_column(Column(data=flag_wircam, name="flag_wircam_" + band)) old_wirds_columns = ['f_wirds_' + band.rstrip('s'), 'ferr_wirds_' + band.rstrip('s'), 'm_wirds_' + band.rstrip('s'), 'merr_wirds_' + band.rstrip('s'), 'flag_wirds_' + band.rstrip('s')] old_cosmos_columns = ['f_cosmos-wircam_' + band, 'ferr_cosmos-wircam_' + band, 'm_cosmos-wircam_' + band, 'merr_cosmos-wircam_' + band, 'flag_cosmos-wircam_' + band] old_columns = old_wirds_columns + old_cosmos_columns master_catalogue.remove_columns(old_columns) origin = np.full(len(master_catalogue), ' ', dtype='<U5') origin[use_wirds] = "CFHT-WIRDS" origin[use_cosmos] = "COSMOS2015" wircam_origin.add_column(Column(data=origin, name= 'f_wircam_' + band )) # wircam aperture flux has_ap_wirds = ~np.isnan(master_catalogue['f_ap_wirds_' + band.rstrip('s')]) has_ap_cosmos = ~np.isnan(master_catalogue['f_ap_cosmos-wircam_' + band]) use_ap_cosmos = has_ap_cosmos use_ap_wirds = has_ap_wirds & ~has_cosmos f_ap_wircam = np.full(len(master_catalogue), np.nan) f_ap_wircam[use_ap_cosmos] = master_catalogue['f_ap_cosmos-wircam_' + band][use_ap_cosmos] f_ap_wircam[use_ap_wirds] = master_catalogue['f_ap_wirds_' + band.rstrip('s')][use_ap_wirds] ferr_ap_wircam = np.full(len(master_catalogue), np.nan) ferr_ap_wircam[use_ap_cosmos] = master_catalogue['ferr_ap_cosmos-wircam_' + band][use_ap_cosmos] ferr_ap_wircam[use_ap_wirds] = master_catalogue['ferr_ap_wirds_' + band.rstrip('s')][use_ap_wirds] m_ap_wircam = np.full(len(master_catalogue), np.nan) m_ap_wircam[use_ap_cosmos] = master_catalogue['m_ap_cosmos-wircam_' + band][use_ap_cosmos] m_ap_wircam[use_ap_wirds] = master_catalogue['m_ap_wirds_' + band.rstrip('s')][use_ap_wirds] merr_ap_wircam = np.full(len(master_catalogue), np.nan) merr_ap_wircam[use_ap_cosmos] = master_catalogue['merr_ap_cosmos-wircam_' + band][use_ap_cosmos] merr_ap_wircam[use_ap_wirds] = master_catalogue['merr_ap_wirds_' + band.rstrip('s')][use_ap_wirds] master_catalogue.add_column(Column(data=f_ap_wircam, name="f_ap_wircam_" + band)) master_catalogue.add_column(Column(data=ferr_ap_wircam, name="ferr_ap_wircam_" + band)) master_catalogue.add_column(Column(data=m_ap_wircam, name="m_ap_wircam_" + band)) master_catalogue.add_column(Column(data=merr_ap_wircam, name="merr_ap_wircam_" + band)) old_ap_wirds_columns = ['f_ap_wirds_' + band.rstrip('s'), 'ferr_ap_wirds_' + band.rstrip('s'), 'm_ap_wirds_' + band.rstrip('s'), 'merr_ap_wirds_' + band.rstrip('s')] old_ap_cosmos_columns = ['f_ap_cosmos-wircam_' + band, 'ferr_ap_cosmos-wircam_' + band, 'm_ap_cosmos-wircam_' + band, 'merr_ap_cosmos-wircam_' + band] old_ap_columns = old_ap_wirds_columns + old_ap_cosmos_columns master_catalogue.remove_columns(old_ap_columns) origin_ap = np.full(len(master_catalogue), ' ', dtype='<U5') origin_ap[use_ap_wirds] = "CFHT-WIRDS" origin_ap[use_ap_cosmos] = "COSMOS2015" wircam_origin.add_column(Column(data=origin_ap, name= 'f_ap_wircam_' + band )) wircam_stats['CFHT-WIRDS'][wircam_stats['Band'] == band] = np.sum(has_wirds) wircam_stats['COSMOS2015'][wircam_stats['Band'] == band] = np.sum(has_cosmos) wircam_stats['use CFHT-WIRDS'][wircam_stats['Band'] == band] = np.sum(use_wirds) wircam_stats['use COSMOS2015'][wircam_stats['Band'] == band] = np.sum(use_cosmos) wircam_stats['CFHT-WIRDS ap'][wircam_stats['Band'] == band] = np.sum(has_ap_wirds) wircam_stats['COSMOS2015 ap'][wircam_stats['Band'] == band] = np.sum(has_ap_cosmos) wircam_stats['use CFHT-WIRDS ap'][wircam_stats['Band'] == band] = np.sum(use_ap_wirds) wircam_stats['use COSMOS2015 ap'][wircam_stats['Band'] == band] = np.sum(use_ap_cosmos) wircam_stats.show_in_notebook() wircam_origin.write("{}/cosmos_wircam_fluxes_origins{}.fits".format(OUT_DIR, SUFFIX), overwrite=True) ###Output _____no_output_____ ###Markdown Final renamingWe rename some columns in line with HELP filter naming standards ###Code renaming = OrderedDict({ '_wirds_j':'_wircam_j', #'_wirds_h': '_wircam_h', #These two now merged with COSMOS #'_wirds_k': '_wircam_ks', '_kids_': '_omegacam_', '_cosmos-suprime_': '_suprime_', '_cosmos-vista_':'_vista_', '_cosmos-irac_':'_irac_', '_candels_f140w':'_wfc3_f140w', '_candels_f160w':'_wfc3_f160w', '_candels_f125w':'_wfc3_f125w', '_candels_f606w': '_acs_f606w', '_candels_f814w':'_acs_f814w', }) for col in master_catalogue.colnames: for rename_col in list(renaming): if rename_col in col: master_catalogue.rename_column(col, col.replace(rename_col, renaming[rename_col])) ###Output _____no_output_____ ###Markdown VII.a Wavelength domain coverageWe add a binary `flag_optnir_obs` indicating that a source was observed in a given wavelength domain:- 1 for observation in optical;- 2 for observation in near-infrared;- 4 for observation in mid-infrared (IRAC).It's an integer binary flag, so a source observed both in optical and near-infrared by not in mid-infrared would have this flag at 1 + 2 = 3.*Note 1: The observation flag is based on the creation of multi-order coverage maps from the catalogues, this may not be accurate, especially on the edges of the coverage.**Note 2: Being on the observation coverage does not mean having fluxes in that wavelength domain. For sources observed in one domain but having no flux in it, one must take into consideration de different depths in the catalogue we are using.* ###Code candels_moc = MOC(filename="../../dmu0/dmu0_CANDELS-3D-HST/data/CANDELS-3D-HST_COSMOS_MOC.fits") cfhtls_moc = MOC(filename="../../dmu0/dmu0_CFHTLS/data/CFHTLS-DEEP_COSMOS_MOC.fits") decals_moc = MOC(filename="../../dmu0/dmu0_DECaLS/data/DECaLS_COSMOS_MOC.fits") hsc_udeep_moc = MOC(filename="../../dmu0/dmu0_HSC/data/HSC-PDR1_deep_COSMOS_MOC.fits") hsc_deep_moc = MOC(filename="../../dmu0/dmu0_HSC/data/HSC-PDR1_uDeep_COSMOS_MOC.fits") kids_moc = MOC(filename="../../dmu0/dmu0_KIDS/data/KIDS-DR3_COSMOS_MOC.fits") ps1_moc = MOC(filename="../../dmu0/dmu0_PanSTARRS1-3SS/data/PanSTARRS1-3SS_COSMOS_MOC.fits") las_moc = MOC(filename="../../dmu0/dmu0_UKIDSS-LAS/data/UKIDSS-LAS_COSMOS_MOC.fits") wirds_moc = MOC(filename="../../dmu0/dmu0_CFHT-WIRDS/data/COSMOS_Ks-priors_MOC.fits") was_observed_optical = inMoc( master_catalogue['ra'], master_catalogue['dec'], candels_moc + cfhtls_moc + decals_moc + hsc_udeep_moc + hsc_deep_moc + kids_moc + ps1_moc) was_observed_nir = inMoc( master_catalogue['ra'], master_catalogue['dec'], las_moc + wirds_moc ) was_observed_mir = np.zeros(len(master_catalogue), dtype=bool) #was_observed_mir = inMoc( # master_catalogue['ra'], master_catalogue['dec'], #) master_catalogue.add_column( Column( 1 * was_observed_optical + 2 * was_observed_nir + 4 * was_observed_mir, name="flag_optnir_obs") ) ###Output _____no_output_____ ###Markdown VII.b Wavelength domain detectionWe add a binary `flag_optnir_det` indicating that a source was detected in a given wavelength domain:- 1 for detection in optical;- 2 for detection in near-infrared;- 4 for detection in mid-infrared (IRAC).It's an integer binary flag, so a source detected both in optical and near-infrared by not in mid-infrared would have this flag at 1 + 2 = 3.*Note 1: We use the total flux columns to know if the source has flux, in some catalogues, we may have aperture flux and no total flux.*To get rid of artefacts (chip edges, star flares, etc.) we consider that a source is detected in one wavelength domain when it has a flux value in **at least two bands**. That means that good sources will be excluded from this flag when they are on the coverage of only one band. ###Code nb_optical_flux = ( 1 * ~np.isnan(master_catalogue['f_megacam_u']) + 1 * ~np.isnan(master_catalogue['f_megacam_g']) + 1 * ~np.isnan(master_catalogue['f_megacam_r']) + 1 * ~np.isnan(master_catalogue['f_megacam_i']) + 1 * ~np.isnan(master_catalogue['f_megacam_z']) + 1 * ~np.isnan(master_catalogue['f_suprime_g']) + 1 * ~np.isnan(master_catalogue['f_suprime_r']) + 1 * ~np.isnan(master_catalogue['f_suprime_i']) + 1 * ~np.isnan(master_catalogue['f_suprime_z']) + 1 * ~np.isnan(master_catalogue['f_suprime_y']) + 1 * ~np.isnan(master_catalogue['f_suprime_n921']) + 1 * ~np.isnan(master_catalogue['f_gpc1_g']) + 1 * ~np.isnan(master_catalogue['f_gpc1_r']) + 1 * ~np.isnan(master_catalogue['f_gpc1_i']) + 1 * ~np.isnan(master_catalogue['f_gpc1_z']) + 1 * ~np.isnan(master_catalogue['f_gpc1_y']) + 1 * ~np.isnan(master_catalogue['f_decam_g']) + 1 * ~np.isnan(master_catalogue['f_decam_r']) + 1 * ~np.isnan(master_catalogue['f_decam_z']) + 1 * ~np.isnan(master_catalogue['f_omegacam_u']) + 1 * ~np.isnan(master_catalogue['f_omegacam_g']) + 1 * ~np.isnan(master_catalogue['f_omegacam_r']) + 1 * ~np.isnan(master_catalogue['f_omegacam_i']) ) nb_nir_flux = ( 1 * ~np.isnan(master_catalogue['f_vista_j']) + 1 * ~np.isnan(master_catalogue['f_vista_h']) + 1 * ~np.isnan(master_catalogue['f_vista_ks']) + 1 * ~np.isnan(master_catalogue['f_wircam_j']) + 1 * ~np.isnan(master_catalogue['f_wircam_h']) + 1 * ~np.isnan(master_catalogue['f_wircam_ks']) + 1 * ~np.isnan(master_catalogue['f_ukidss_y']) + 1 * ~np.isnan(master_catalogue['f_ukidss_j']) + 1 * ~np.isnan(master_catalogue['f_ukidss_h']) + 1 * ~np.isnan(master_catalogue['f_ukidss_k']) ) nb_mir_flux = np.zeros(len(master_catalogue), dtype=bool) has_optical_flux = nb_optical_flux >= 2 has_nir_flux = nb_nir_flux >= 2 has_mir_flux = nb_mir_flux >= 2 master_catalogue.add_column( Column( 1 * has_optical_flux + 2 * has_nir_flux + 4 * has_mir_flux, name="flag_optnir_det") ) ###Output _____no_output_____ ###Markdown VIII - Cross-identification tableWe are producing a table associating to each HELP identifier, the identifiers of the sources in the pristine catalogue. This can be used to easily get additional information from them. ###Code # # Addind SDSS ids # sdss = Table.read("../../dmu0/dmu0_SDSS-DR13/data/SDSS-DR13_COSMOS.fits")['objID', 'ra', 'dec'] sdss_coords = SkyCoord(sdss['ra'] * u.deg, sdss['dec'] * u.deg) idx_ml, d2d, _ = sdss_coords.match_to_catalog_sky(SkyCoord(master_catalogue['ra'], master_catalogue['dec'])) idx_sdss = np.arange(len(sdss)) # Limit the cross-match to 1 arcsec mask = d2d <= 1. * u.arcsec idx_ml = idx_ml[mask] idx_sdss = idx_sdss[mask] d2d = d2d[mask] nb_orig_matches = len(idx_ml) # In case of multiple associations of one master list object to an SDSS object, we keep only the # association to the nearest one. sort_idx = np.argsort(d2d) idx_ml = idx_ml[sort_idx] idx_sdss = idx_sdss[sort_idx] _, unique_idx = np.unique(idx_ml, return_index=True) idx_ml = idx_ml[unique_idx] idx_sdss = idx_sdss[unique_idx] print("{} master list rows had multiple associations.".format(nb_orig_matches - len(idx_ml))) # Adding the ObjID to the master list master_catalogue.add_column(Column(data=np.full(len(master_catalogue), -1, dtype='>i8'), name="sdss_id")) master_catalogue['sdss_id'][idx_ml] = sdss['objID'][idx_sdss] id_names = [] for col in master_catalogue.colnames: if '_id' in col: id_names += [col] if '_intid' in col: id_names += [col] print(id_names) master_catalogue[id_names].write( "{}/master_list_cross_ident_cosmos{}.fits".format(OUT_DIR, SUFFIX), overwrite=True) id_names.remove('help_id') id_names.remove('old_help_id') master_catalogue.remove_columns(id_names) ###Output _____no_output_____ ###Markdown IX - Adding HEALPix indexWe are adding a column with a HEALPix index at order 13 associated with each source. ###Code master_catalogue.add_column(Column( data=coords_to_hpidx(master_catalogue['ra'], master_catalogue['dec'], order=13), name="hp_idx" )) ###Output _____no_output_____ ###Markdown IX - Saving the catalogue ###Code columns = ["help_id", "old_help_id", "field", "ra", "dec", "hp_idx"] bands = [column[5:] for column in master_catalogue.colnames if 'f_ap' in column] for band in bands: columns += ["f_ap_{}".format(band), "ferr_ap_{}".format(band), "m_ap_{}".format(band), "merr_ap_{}".format(band), "f_{}".format(band), "ferr_{}".format(band), "m_{}".format(band), "merr_{}".format(band), "flag_{}".format(band)] # columns += ['f_wfc3_f125w', 'ferr_wfc3_f125w', 'm_wfc3_f125w', 'merr_wfc3_f125w', 'flag_wfc3_f125w', # 'f_acs_f606w', 'ferr_acs_f606w', 'm_acs_f606w', 'merr_acs_f606w', 'flag_acs_f606w', # 'f_acs_f814w', 'ferr_acs_f814w', 'm_acs_f814w', 'merr_acs_f814w', 'flag_acs_f814w' # ] for tot_band in ['wfc3_f125w', 'acs_f606w', 'acs_f814w', 'irac_i1', 'irac_i2', 'irac_i3', 'irac_i4']: columns += ['f_' + tot_band,'ferr_' + tot_band,'m_' + tot_band,'merr_' + tot_band,'flag_' + tot_band,] columns += ["stellarity", "stellarity_origin", "flag_cleaned", "flag_merged", "flag_gaia", "flag_optnir_obs", "flag_optnir_det", "ebv",'zspec_association_flag', 'zspec_qual', 'zspec'] # We check for columns in the master catalogue that we will not save to disk. print("Missing columns: {}".format(set(master_catalogue.colnames) - set(columns))) master_catalogue[columns].write("{}/master_catalogue_cosmos{}.fits".format(OUT_DIR, SUFFIX), overwrite = True) ###Output _____no_output_____
tutorials/streamlit_notebooks/BertForTokenClassification.ipynb
###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/BertForSequenceClassification.ipynb) `BertForSequenceClassification` **Models** 1. Colab Setup ###Code # Installing pyspark and spark-nlp ! pip install --upgrade -q pyspark==3.2.0 spark-nlp==3.4.1 ###Output _____no_output_____ ###Markdown Import Libraries ###Code import pandas as pd import numpy as np import json from pyspark.ml import Pipeline from pyspark.sql import SparkSession import pyspark.sql.functions as F from sparknlp.annotator import * from sparknlp.base import * import sparknlp from sparknlp.pretrained import PretrainedPipeline from pyspark.sql.types import StringType, IntegerType ###Output _____no_output_____ ###Markdown 2. Start Spark Session ###Code spark = sparknlp.start(spark32=True) print("Spark NLP Version :", sparknlp.version()) spark ###Output Spark NLP Version : 3.4.1 ###Markdown 3. Select the DL model ###Code ### Select Model model_antisemitism = 'bert_sequence_classifier_antisemitism' model_trec_coarse = "bert_sequence_classifier_trec_coarse" model_age_news = "bert_sequence_classifier_age_news" model_hatexplain = "bert_sequence_classifier_hatexplain" model_emotion = "bert_sequence_classifier_emotion" model_banking = "bert_sequence_classifier_banking77" ###Output _____no_output_____ ###Markdown 4. Some sample examples ###Code text_antisemitism = ["""Shylock in Merchant of Venice. Shylock was a Jew and moneylender. Depends on the context it is used but as the antisemitism is hotly debated nowadays if I were a Jew I wouldn't like to hear it. Perhaps I'm wrong but that's my opinion.""", """That Jew gripped yo nuts and you did nothing and you been attacking black people ever since. Probably like that shit""", """They came for the Jews, and I did not speak out Because I was not a Jew.Then they came for me and there was no one left to speak for me""", """David is a sephardic jew huh.... now i have to give him my entire heart i guess""", """I asked a genuine question, she has been smearing @georgegalloway for a while now without any evidence. Am I not allowed to ask her to show me the anti Semitism. Remember @RachelRileyRR is the one who said she ‘doesn’t look like a normal jew’ that to me is anti Semitic""", """I pointed the finger directly at the fascists still in control of Europe the muh jew shills began in earnest. Distraction, anger, insult, lies.""", ] text_trec_coarse = ["""Germany is the largest country in Europe economically.""", """What other prince showed his paintings in a two-prince exhibition with Prince Charles in London?""", """What is the name of the chronic neurological autoimmune disease which attacks the protein sheath that surrounds nerve cells causing a gradual loss of movement in the body?""", """How many hands does Bjorn Borg use when hitting his forehand?""", """CNN is the abbreviation for what?""", """Give a reason for American Indians oftentimes dropping out of school.""", """What was organized as a confederate veterans social club in Pulaski, in Tennessee, in 1866?""", """Who was the first person inducted into the U.S. Swimming Hall of Fame?""", """What did the only repealed amendment to the U.S. Constitution deal with?""", ] text_age_news = ["""Carlyle Looks Toward Commercial Aerospace (Reuters) Reuters - Private investment firm Carlyle Group, which has a reputation for making well-timed and occasionally controversial plays in the defense industry, has quietly placed its bets on another part of the market.""", """Juventus and Olympique Lyon joined Chelsea and Inter Milan in the last 16 of the Champions League after both scored late goals to seal their places on Wednesday.""", """Fearing the fate of Italy, the centre-right government has threatened to be merciless with those who flout tough restrictions.""", """Cisco Systems is teaming up with researchers at the Massachusetts Institute of Technology to develop new communications technology that the company hopes to ultimately turn into products.""", """Golf is not a sport renowned for being at ease with gender issues. Many of the games great clubs refuse to admit women, and the occasional inclusion of women in men professional tournaments has caused consternation from Arran to Augusta.""", """The Supreme Council of the United Arab Emirates (UAE) on Wednesday evening unanimously elected Sheikh Khalifa bin Zayed Al Nahyan as the new president of the UAE, the official Emirates news agency WAM reported Wednesday.""", """The Norwegian hacker famed for developing DVD encryption cracking software has apparently struck again; this time breaking the locks on Apple Computer Inc.'s wireless music streaming technology.""", """Utility holding company Ameren Corp. announced plans Wednesday to reduce its payroll by offering 950 union and management employees financial incentives to leave.""" ] text_hatexplain = ["""If English is not imposition then Hindi is also not imposition shut up chutiya retards stop hindi imposition.""", """The uk has threatened to return radioactive waste to the eu if an agreement cannot be reached can not we keep the radioactive waste and send back all the paki migrants instead""", """Afghani immigrant once told me that in Afghanistan we do not believe in fences while our conversation was about livestock guardian dogs it now occurs to me what this really means is that there no belief in protection of persons or property""", """I post a video showing the shocking promotion of violence by the south african eff themselves and jewtube still say i am promoting violence""", """I want to go back to school and get an education but i am poor and colleges are kike rip offs i am not satisfied with where i am in life and honestly have no clue where or how to start pushing forward""", """I am confused how a president blocked stimulus checks for those who married to immigrants while he himself married to an immigrant""", """I love you very much!"""] text_emotion = ["""I've been taking or milligrams or times recommended amount and I've fallen asleep a lot faster but I also feel like so funny.""", """I have been with petronas for years I feel that petronas has performed well and made a huge profit.""", """I feel a bit rude writing to an elderly gentleman to ask for gifts because i feel a bit greedy but what is christmas about if not mild greed.""", """I feel romantic too""", """I now feel compromised and skeptical of the value of every unit of work I put in""", """I started feeling sentimental about dolls I had as a child and so began a collection of vintage barbie dolls from the sixties"""] text_banking = ["""I have been waiting over a week. Is the card still coming?""", """I need a transaction reversed from my account.""", """How long does it take for cards to be delivered after ordering them?""", """I've just been married and need to update my name""", """I'm interested in learning more about disposable virtual cards. """, """I tried topping up using my card, but the money is gone?"""] ###Output _____no_output_____ ###Markdown 5. Define Spark NLP pipeline ###Code model_dict = {model_antisemitism: text_antisemitism, model_trec_coarse :text_trec_coarse, model_age_news :text_age_news, model_hatexplain: text_hatexplain, model_emotion: text_emotion, model_banking: text_banking} def run_pipeline(model, text, results): document_assembler = DocumentAssembler() \ .setInputCol('text') \ .setOutputCol('document') tokenizer = Tokenizer() \ .setInputCols(['document']) \ .setOutputCol('token') sequenceClassifier = BertForSequenceClassification\ .pretrained(model, 'en') \ .setInputCols(['token', 'document']) \ .setOutputCol('pred_class') pipeline = Pipeline(stages=[document_assembler, tokenizer, sequenceClassifier]) df = spark.createDataFrame(text, StringType()).toDF("text") results[model]=(pipeline.fit(df).transform(df)) ###Output _____no_output_____ ###Markdown 6. Run the pipeline ###Code results = {} for model, text in zip(model_dict.keys(),model_dict.values()): run_pipeline(model, text, results) ###Output bert_sequence_classifier_antisemitism download started this may take some time. Approximate size to download 390.8 MB [OK!] bert_sequence_classifier_trec_coarse download started this may take some time. Approximate size to download 387.8 MB [OK!] bert_sequence_classifier_age_news download started this may take some time. Approximate size to download 40.4 MB [OK!] bert_sequence_classifier_hatexplain download started this may take some time. Approximate size to download 391.1 MB [OK!] bert_sequence_classifier_emotion download started this may take some time. Approximate size to download 391.1 MB [OK!] bert_sequence_classifier_banking77 download started this may take some time. Approximate size to download 391.3 MB [OK!] ###Markdown 7. Visualize results ###Code for model_name, result in zip(results.keys(),results.values()): res = result.select(F.explode(F.arrays_zip(result.document.result, result.pred_class.result, result.pred_class.metadata)).alias("col"))\ .select(F.expr("col['1']").alias("prediction"), F.expr("col['2']").alias("confidence"), F.expr("col['0']").alias("sentence")) udf_func = F.udf(lambda x,y: x["Some("+str(y)+")"]) print("\n",model_name,"\n") res.withColumn('confidence', udf_func(res.confidence, res.prediction)).show(truncate=False) print("\n**********************************\n") ###Output _____no_output_____
mynote.ipynb
###Markdown Python の lambda 式* lambda式は無名変数です* "lambda 引き数 : 処理" のように書きます* 主に map や filter の引き数に使います![063.png](attachment:063.png) ###Code lst = map(lambda t: t**t, [1,2,3,4,5]) print (list(lst)) %matplotlib inline import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) # row1 col1 1st subplot dat = [0,1] ax.plot(dat) plt.show() ###Output _____no_output_____
biochemist-python/chapters/Nonlinear Curve Fitting Part 1.ipynb
###Markdown Non-Linear Curve Fitting, Part 1========================= OverviewQuestions How can I analyze enzyme kinetics data in Python? What is the process for non-linear least squares curve fitting in Python? Objectives: Create a pandas dataframe with enzyme kinetics data from a .csv file Add velocity calculations to the dataframe Perform the non-linear regression calculations In this module, we will calculate initial rates from the raw data (&Delta;A405) in an enzyme kinetics experiment with alkaline phosphatase. In the process, we will import the raw data into a pandas dataframe, use some pandas tools to reorganize the data, produce a second pandas dataframe that contains the substrate concentrations and initial rates at each concentration. Finally, we will export this information to a csv file to use in the next module, where you will explore nonlinear curve fitting in python. Importing the DataWe start by importing data from a csv file as we did earlier with the data for linear regression. These data represent the rate of p-nitrophenol appearance for a series of p-nitrophenol phosphate concentrations in the presence of alkaline phosphatase. We will import the libraries we need, import the data and set up a pandas dataframe. ###Code # import the libraries we need import os # to create a filehandle for the .csv file import pandas as pd # for importing the .csv file and creating a dataframe from scipy import stats # for performing non-linear regression # Create the filehandle for the csv file that contains your data datafile = os.path.join('data', 'AP_kin.csv') # filehandle created print(datafile) # filehandle confirmed ###Output data/AP_kin.csv ###Markdown Creating the pandas dataframeThe filehandle, `datafile`, points to a csv file that contains the raw kinetics data. As we saw in the `Working with Pandas` module, the pandas library has a tool for creating a dataframe from an existing csv file. Notice that the variable for the dataframe is called AP_kin_df. The `_df` at the end of the variable name is a reminder that this is a pandas dataframe. ###Code # Creating the pandas dataframe using read_csv AP_kin_df = pd.read_csv(datafile) AP_kin_df.head() # looking at the first five rows of the dataframe ###Output _____no_output_____ ###Markdown When you look at this dataframe, notice that the index (the item at the far left of each row) is an integer. In this case, we want to use the 'Time (min)' values that are found in the first series as the index. There is a simple fix - the set_index function. ```pythonAP_kin_df.set_index('Time (min)', inplace=True)```By making the time values the index for each row, we can easily omit them from our initial rate calculations. We use the `inplace=True` option to make the change to the dataframe permanent. ###Code AP_kin_df.set_index('Time (min)', inplace=True) AP_kin_df.head() ###Output _____no_output_____ ###Markdown DatatypeBefore we calculate the slopes to get initial velocities, we need to check for the datatypes on the numbers. We must ensure that the numbers are floats, rather than strings, so we can do calculations on them. ###Code AP_kin_df.index.dtype # checking to see if the numbers are strings or floats ###Output _____no_output_____ ###Markdown Calculating the initial velocityThe index for each row is the time, which will be the x values to get the slope of each line. The values in each column are the absorbance values at each time point, so those will be our y values. Now we need to follow these steps to calculate the initial velocity at each substrate concentration. 1. Inspect the data.1. Create a second dataframe with the substrate concentration as the first series.1. Calculate the slopes from the first dataframe and add those as a column to the second dataframe.1. Calculate the initial velocity by dividing the slope by the extinction coefficient for p-nitrophenol under these buffer conditions, 15.0 mM-1cm-1.1. Export the second dataframe to a csv file that we will use in the next module. Inspect the dataIn an earlier module, we used pyplot from matplotlib to create a well-annotated plot of our linear regression data. We could do that here, but we only want to inspect the data to make sure we are on the right track. To do that we can use the [plot command from pandas](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html), which builds the plot using tools from matplotlib. The syntax is```pythondataframe.plot()```In our case, the only argument we will pass is "marker = 'o' so that the individual data points will appear. ###Code # Inspect the data using the plot command that is available with the dataframe # We use the plot function that is built into pandas for this simple data display AP_kin_df.plot(marker = 'o') # Create a second dataframe with the substrate concentration as the first series. # Note the syntax for this pandas function - the D and F are capitalized. MM_df = pd.DataFrame() MM_df['pNPP (mM)'] = AP_kin_df.columns ###Output _____no_output_____ ###Markdown Calculate the SlopesThere is some scatter in the data, but generally, the slopes of the curves increase with increasing substrate concentration. Now we need to calculate the slopes for each of the lines in the plot above. To do so, we can use the linregress function from scipy.stats that we used for the least squares linear regression analysis of the protein assay data from an earlier module. We will use the index from our data frame, `Time (min)`, AP_kin_df. The y values will be taken from each of the series from the same dataframe. Remember that linregress provides five outputs: slope, intercept, r-value, p-value and standard error. We need only the slope, so we will use this format```pythonslope, _, _, _, _ = stats.linregress(xdata, ydata)```where _ is just a placeholder that we will ignore.To get the slopes for each series we will use a for loop. First, we'll create an empty list to contain the slopes that are generated as the for loop cycles through the series in the AP_kin_df dataframe. ###Code # Create an empty list to hold the slopes slopes = [] # Calculate the slopes from each column in the AP_kin_df dataframe for column in AP_kin_df.columns: slope, _, _, _, _, = stats.linregress(AP_kin_df.index, AP_kin_df[column]) slopes.append(slope) # Did we get a list of 11 slopes for each of the 11 series? slopes # Populate the new dataframe with the slopes MM_df['slopes'] = slopes # Check the dataframe MM_df ###Output _____no_output_____ ###Markdown Calculate the initial velocityThe initial velocity can be calculated by dividing the slope by the extinction micromlar coefficient under the experimental conditions, 0.015 $\mu$M$^{-1}$cm$^{-1}$. It is possible to complete the calculation and add it to the dataframe with a single line of code. ###Code # Calculate initial velocities and place those in a new column in the dataframe MM_df['initial velocities'] = MM_df['slopes'] / 0.015 MM_df ###Output _____no_output_____ ###Markdown We will use this dataframe now to perform the nonlinear regression fit using the SciPy library in part 2 of this lesson. To save this data for part 2, so we need to write it to a csv file in our data directory. ###Code outputfile = os.path.join('data', 'MM_data.csv') MM_df.to_csv(outputfile) ###Output _____no_output_____ ###Markdown Check your understanding You will find an Excel file in your data folder, chymotrypsin_kinetics.xlsx, with some kinetic data from a chymotrypsin experiment. Apply the principles above to create dataframes and a .csv file for creating a Michaelis-Menten plot with these data. Under these assay conditions the extinction coefficient for p-nitrophenol is 18,320 M-1cm-1.```{admonition} Hint:class: dropdownYou will need to get the data into a layout and file format that is easily read by pandas. Delete the first seven lines of the Excel file.Delete the first column of the Excel file.Save the file as chymotrypsin_kinetics.csv.Your data will should look something like this: ![csv image](images/csv_image.png "csv image")``` ```{admonition} Solution:class: dropdown ```pythonimport os import pandas as pd import numpy as np from scipy import stats datafile = os.path.join('data', 'chymotrypsin_kinetics.csv') filehandle createdchymo_rates_df = pd.read_csv(datafile)chymo_rates_df = chymo_rates_df.set_index('Time (sec)')def linregress_slope(df_series): slope, _, _, _, _, = stats.linregress(df_series.index, df_series.values) return slopechymo_MM_df = pd.DataFrame()chymo_MM_df['slopes'] = chymo_rates_df.apply(linregress_slope)chymo_MM_df['Initial Velocities'] = chymo_MM_df['slopes'] / 0.01832chymo_MM_df.to_csv('biochemist-python/chapters/data/chymo_MM_data.csv')print(chymo_MM_df) ```The solution is in the next cell. We will remove that cell before we publish the final Jupyter book. ###Code import os import pandas as pd import numpy as np from scipy import stats datafile = os.path.join('data', 'chymotrypsin_kinetics.csv') # filehandle created chymo_rates_df = pd.read_csv(datafile) chymo_rates_df = chymo_rates_df.set_index('Time (sec)') def linregress_slope(df_series): slope, _, _, _, _, = stats.linregress(df_series.index, df_series.values) return slope chymo_MM_df = pd.DataFrame() chymo_MM_df['slopes'] = chymo_rates_df.apply(linregress_slope) chymo_MM_df['Initial Velocities'] = chymo_MM_df['slopes'] / 0.01832 outputfile = os.path.join('data', 'chymo_MM_data.csv') chymo_MM_df.to_csv(outputfile) print(chymo_MM_df) ###Output slopes Initial Velocities 1 0.000557 0.030412 0.5 0.000539 0.029437 0.25 0.000500 0.027293 0.125 0.000450 0.024563 0.0625 0.000511 0.027877 0.03125 0.000493 0.026903 0.015625 0.000357 0.019495 0.0078125 0.000229 0.012477 0.00390625 0.000111 0.006043 0.00195313 0.000071 0.003899
IBM_AI_Engineering/Course-4-deep-neural-networks-with-pytorch/Week-4-Softmax-Regression-ShallowNN/6.2lab_predicting _MNIST_using_Softmax_v2.ipynb
###Markdown Softmax Classifier Table of ContentsIn this lab, you will use a single layer Softmax to classify handwritten digits from the MNIST database. Make some Data Softmax Classifier Define Softmax, Criterion Function, Optimizer, and Train the Model Analyze ResultsEstimated Time Needed: 25 min Preparation We'll need the following libraries ###Code # Import the libraries we need for this lab # Using the following line code to install the torchvision library # !conda install -y torchvision import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets import matplotlib.pylab as plt import numpy as np ###Output _____no_output_____ ###Markdown Use the following function to plot out the parameters of the Softmax function: ###Code # The function to plot parameters def PlotParameters(model): W = model.state_dict()['linear.weight'].data w_min = W.min().item() w_max = W.max().item() fig, axes = plt.subplots(2, 5) fig.subplots_adjust(hspace=0.01, wspace=0.1) for i, ax in enumerate(axes.flat): if i < 10: # Set the label for the sub-plot. ax.set_xlabel("class: {0}".format(i)) # Plot the image. ax.imshow(W[i, :].view(28, 28), vmin=w_min, vmax=w_max, cmap='seismic') ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() ###Output _____no_output_____ ###Markdown Use the following function to visualize the data: ###Code # Plot the data def show_data(data_sample): plt.imshow(data_sample[0].numpy().reshape(28, 28), cmap='gray') plt.title('y = ' + str(data_sample[1])) # ata_sample[1].item() ###Output _____no_output_____ ###Markdown Make Some Data Load the training dataset by setting the parameters train to True and convert it to a tensor by placing a transform object in the argument transform. ###Code # Create and print the training dataset train_dataset = dsets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor()) print("Print the training dataset:\n ", train_dataset) ###Output Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to ./data\MNIST\raw\train-images-idx3-ubyte.gz ###Markdown Load the testing dataset by setting the parameters train to False and convert it to a tensor by placing a transform object in the argument transform. ###Code # Create and print the validating dataset validation_dataset = dsets.MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor()) print("Print the validating dataset:\n ", validation_dataset) ###Output Print the validating dataset: Dataset MNIST Number of datapoints: 10000 Root location: ./data Split: Test StandardTransform Transform: ToTensor() ###Markdown You can see that the data type is long: ###Code # Print the type of the element print("Type of data element: ", train_dataset.data.shape)# train_dataset[0][1].type print("Type of data element: ", train_dataset.targets.shape)# 是N,不是N*1(之前的程序是N*1,因为是linear regression) ###Output Type of data element: torch.Size([60000, 28, 28]) Type of data element: torch.Size([60000]) ###Markdown Each element in the rectangular tensor corresponds to a number that represents a pixel intensity as demonstrated by the following image: In this image, the values are inverted i.e back represents wight. Print out the label of the fourth element: ###Code # Print the label print("The label: ", train_dataset[3][1]) ###Output The label: 1 ###Markdown The result shows the number in the image is 1 Plot the fourth sample: ###Code # Plot the image print("The image: ", show_data(train_dataset[3])) ###Output The image: None ###Markdown You see that it is a 1. Now, plot the third sample: ###Code # Plot the image show_data(train_dataset[2]) ###Output _____no_output_____ ###Markdown Build a Softmax Classifer Build a Softmax classifier class: ###Code # Define softmax classifier class class SoftMax(nn.Module): # Constructor def __init__(self, input_size, output_size):# 输入28*28,输出10 super(SoftMax, self).__init__() self.linear = nn.Linear(input_size, output_size) # Prediction def forward(self, x): z = self.linear(x) # 注意是linear return z ###Output _____no_output_____ ###Markdown The Softmax function requires vector inputs. Note that the vector shape is 28x28. ###Code # Print the shape of train dataset train_dataset[0][0].shape ###Output _____no_output_____ ###Markdown Flatten the tensor as shown in this image: The size of the tensor is now 784. Set the input size and output size: ###Code # Set input size and output size input_dim = 28 * 28 output_dim = 10 ###Output _____no_output_____ ###Markdown Define the Softmax Classifier, Criterion Function, Optimizer, and Train the Model ###Code # Create the model model = SoftMax(input_dim, output_dim) print("Print the model:\n ", model) ###Output Print the model: SoftMax( (linear): Linear(in_features=784, out_features=10, bias=True) ) ###Markdown View the size of the model parameters: ###Code # Print the parameters print('W: ',list(model.parameters())[0].size()) print('b: ',list(model.parameters())[1].size()) ###Output W: torch.Size([10, 784]) b: torch.Size([10]) ###Markdown You can cover the model parameters for each class to a rectangular grid: Plot the model parameters for each class as a square image: ###Code # Plot the model parameters for each class # initial values of parameters looks like noise PlotParameters(model) ###Output _____no_output_____ ###Markdown Define the learning rate, optimizer, criterion, data loader: ###Code # Define the learning rate, optimizer, criterion and data loader learning_rate = 0.1 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss()# 对多分类用 自动用softmax classification train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=100) validation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=5000) ###Output _____no_output_____ ###Markdown Train the model and determine validation accuracy **(should take a few minutes)**: ###Code # Train the model n_epochs = 10 loss_list = [] accuracy_list = [] N_test = len(validation_dataset) def train_model(n_epochs): for epoch in range(n_epochs): for x, y in train_loader: optimizer.zero_grad() # What this line does is that for each element in the training batch # it converts the rectangle tensors in a batch to a row tensor. z = model(x.view(-1, 28 * 28)) loss = criterion(z, y) loss.backward() optimizer.step() correct = 0 # perform a prediction on the validationdata for x_test, y_test in validation_loader: # These two lines of code will first produce an input. # Then they will calculate the max of the predicted output on the validation data z = model(x_test.view(-1, 28 * 28)) _, yhat = torch.max(z.data, 1) # What this line of code does is that it compares each element of y hat # with the corresponding element of “y test”. We get a binary vector, # which contains 1 at the index if there's a match at the corresponding # elements of y hat and “y test”. Other wise it contains a zero if there isn't a match. # We sum all the values in this binary vector. correct += (yhat == y_test).sum().item() accuracy = correct / N_test # 计算accuracy loss_list.append(loss.data) accuracy_list.append(accuracy) train_model(n_epochs) ###Output _____no_output_____ ###Markdown Analyze Results Plot the loss and accuracy on the validation data: ###Code # Plot the loss and accuracy fig, ax1 = plt.subplots() color = 'tab:red' ax1.plot(loss_list,color=color) ax1.set_xlabel('epoch',color=color) ax1.set_ylabel('total loss',color=color) ax1.tick_params(axis='y', color=color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('accuracy', color=color) ax2.plot( accuracy_list, color=color) ax2.tick_params(axis='y', color=color) fig.tight_layout() ###Output _____no_output_____ ###Markdown View the results of the parameters for each class after the training. You can see that they look like the corresponding numbers. ###Code # Plot the parameters PlotParameters(model) ###Output _____no_output_____ ###Markdown We Plot the first five misclassified samples and the probability of that class. ###Code # Plot the misclassified samples Softmax_fn=nn.Softmax(dim=-1) count = 0 for x, y in validation_dataset: z = model(x.reshape(-1, 28 * 28)) _, yhat = torch.max(z, 1) if yhat != y: show_data((x, y)) plt.show() print("yhat:", yhat) print("probability of class ", torch.max(Softmax_fn(z)).item()) count += 1 if count >= 5: break ###Output _____no_output_____ ###Markdown We Plot the first five correctly classified samples and the probability of that class, we see the probability is much larger. ###Code # Plot the classified samples Softmax_fn=nn.Softmax(dim=-1) count = 0 for x, y in validation_dataset: z = model(x.reshape(-1, 28 * 28)) _, yhat = torch.max(z, 1) if yhat == y: show_data((x, y)) plt.show() print("yhat:", yhat) print("probability of class ", torch.max(Softmax_fn(z)).item()) count += 1 if count >= 5: break ###Output _____no_output_____
analysis/YT-Metadata-Analysis.ipynb
###Markdown YouTube Metadata AnalysisHere we analyze YouTube metadata as upload behaviour for example. ImportsImport matplotlib package and configure some plot drawing variables. ###Code import matplotlib.pylab as plt import numpy as np import pandas as pd from IPython.display import set_matplotlib_formats set_matplotlib_formats('pdf', 'png') plt.rcParams['savefig.dpi'] = 75 plt.rcParams['figure.autolayout'] = False plt.rcParams['figure.figsize'] = 10, 6 plt.rcParams['axes.labelsize'] = 18 plt.rcParams['axes.titlesize'] = 20 plt.rcParams['axes.axisbelow'] = True plt.rcParams['font.size'] = 16 plt.rcParams['lines.linewidth'] = 2.0 plt.rcParams['lines.markersize'] = 8 plt.rcParams['legend.fontsize'] = 13 plt.rcParams['text.usetex'] = True plt.rcParams['font.family'] = "serif" plt.rcParams['font.serif'] = "cm" plt.rcParams['text.latex.preamble'] = r"\usepackage{type1cm}" ###Output _____no_output_____ ###Markdown Load data ###Code import sqlite3 import pandas as pd import isodate import pytz conn = sqlite3.connect("../rsc/caption_party.db") data = pd.read_sql("SELECT * from tab", conn) cest = pytz.timezone("Europe/Berlin") data['publishedAt'] = pd.to_datetime(data['publishedAt']).dt.tz_convert(cest) data['updated'] = pd.to_datetime(data['updated']).dt.tz_convert(cest) data['viewCount'] = pd.to_numeric(data['viewCount'], downcast='unsigned') data['commentCount'] = pd.to_numeric(data['commentCount'], downcast='unsigned', errors='coerce') data['likeCount'] = pd.to_numeric(data['likeCount'], downcast='unsigned', errors='coerce') data['dislikeCount'] = pd.to_numeric(data['dislikeCount'], downcast='unsigned', errors='coerce') data['tags'] = data['tags'].apply(lambda t: t.split("'")[1::2]) data['duration'] = data['duration'].apply(lambda t: isodate.parse_duration(t).total_seconds()) data['faction'] = data['faction'].apply(lambda x: x == 'True') no_subs_ids = data['subtitle'].apply(lambda x: len(x) <= 10) data.loc[no_subs_ids, 'subtitle'] = data.loc[no_subs_ids, 'description'] data.set_index('videoId', inplace=True) ###Output _____no_output_____ ###Markdown Variables Dates and periods ###Code from datetime import datetime as time, timedelta bt_election = cest.localize(time.fromisoformat('2017-09-24')).replace(hour=8) eu_election = cest.localize(time.fromisoformat('2019-05-26')).replace(hour=8) bt_start = bt_election - timedelta(days=83) eu_start = eu_election - timedelta(days=83) bt_end = bt_election + timedelta(days=84) eu_end = eu_election + timedelta(days=84) bt_period = (bt_start <= data['publishedAt']) & (data['publishedAt'] <= bt_end) eu_period = (eu_start <= data['publishedAt']) & (data['publishedAt'] <= eu_end) pre_bt_period = (bt_start <= data['publishedAt']) & (data['publishedAt'] <= bt_election) pre_eu_period = (eu_start <= data['publishedAt']) & (data['publishedAt'] <= eu_election) pos_bt_period = (bt_election <= data['publishedAt']) & (data['publishedAt'] <= bt_end) pos_eu_period = (eu_election <= data['publishedAt']) & (data['publishedAt'] <= eu_end) corpus = data[bt_period|eu_period] bt_corpus = data[bt_period] eu_corpus = data[eu_period] campaign_corpus = data[pre_bt_period|pre_eu_period] bt_campaign_corpus = data[pre_bt_period] eu_campaign_corpus = data[pre_eu_period] ###Output _____no_output_____ ###Markdown Parties, elections and states ###Code parties = {'union': 'Union', 'spd': 'SPD', 'afd': 'AfD', 'fdp': 'FDP', 'linke': 'Die Linke', 'grüne': 'Die Grünen'} elections = ['bt', 'eu'] new_states = ['BB', 'MV', 'SN', 'ST', 'TH'] old_states = ['BW', 'BY', 'HB', 'HH', 'HE', 'NI', 'NW', 'RP', 'SL', 'SH'] states = {s:s for s in ['DE'] + new_states + old_states} ###Output _____no_output_____ ###Markdown Colors ###Code import matplotlib.colors as mc import colorsys def scale_color(color, amount=0.5): c_hls = colorsys.rgb_to_hls(*mc.to_rgb(color)) c_rgb = colorsys.hls_to_rgb(c_hls[0], 1 - amount * (1 - c_hls[1]), c_hls[2]) return mc.to_hex(c_rgb) colors = { 'union': '#252422', 'spd': '#e2001a', 'afd': '#009ee0', 'fdp': '#ffec01', 'linke': '#ffa7b6', 'grüne': '#42923b'} colors_light = {key: scale_color(color, 0.35) for key, color in colors.items()} ###Output _____no_output_____ ###Markdown Visualization ###Code from matplotlib.ticker import MaxNLocator, FuncFormatter from IPython.display import display_html import matplotlib.patches as mpatches import matplotlib.dates as mdates import matplotlib.transforms as transforms def plot_bar(plot_data, label): """ Returns a bar chart in which a DataFrame is visualized. """ fig = plt.figure() ax = fig.gca() ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.ticklabel_format(useOffset=False, style='plain') ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ','))) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.grid(color='gray', linestyle='dashed', alpha=0.35, axis='y') ax.set_ylabel(label); color_map = [colors[party] for party in parties] ax.bar(parties.values(), plot_data[parties.keys()], color=color_map, width=.8) return fig def plot_compare_bar(bt_data, eu_data, label, index_label_dic=parties, legend_left=False, color=None, max_y=None): """ Returns a bar chart where two DataFrames are compared. """ fig = plt.figure() ax = fig.gca() ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.ticklabel_format(useOffset=False, style='plain') ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ','))) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.set_ylabel(label); ax.grid(color='gray', linestyle='dashed', alpha=0.3, axis='y') if color: try: color_map_left = colors[color] color_map_right = colors_light[color] except: color_map_left = color color_map_right = scale_color(color, 0.35) else: color_map_left = [colors[party] for party in parties] color_map_right = [colors_light[party] for party in parties] if max_y: plt.ylim(top=max_y) ax.bar(index_label_dic.values(), bt_data.reindex(index_label_dic.keys()), color=color_map_left, width=-.4, align='edge') ax.bar(index_label_dic.values(), eu_data.reindex(index_label_dic.keys()), color=color_map_right, width=.4, align='edge') if color: print_legend(ax, legend_left, [color]) else: print_legend(ax, legend_left) return fig def display_side_by_side(*args): """ Displays mutiple panda DataFrames side by side. """ html_str='' for df in args: html_str+=df.to_html() display_html(html_str.replace('table','table style="display:inline"'),raw=True) def top_n(df, column, n): """ Sorts a pandas DataFrame and returns the first n entries of a specific column. """ return df[[column]].sort_values(ascending=False, by=column).iloc[:n] def plot_hist(bt, eu, ylabel): fig, ax = plt.subplots(2,1, figsize=(10,5), sharey=True) ind_bt = bt.index ind_eu = eu.index # Find week of election bt_election_idx = np.argmax(ind_bt[ind_bt <= (bt_election - timedelta(hours=8))]) eu_election_idx = np.argmax(ind_eu[ind_eu <= (eu_election - timedelta(hours=8))]) ax[0].axvline(x=bt_election_idx, c='black', linewidth=2, linestyle='dashed') ax[1].axvline(x=eu_election_idx, c='black', linewidth=2, linestyle='dashed') # Election label trans_bt = transforms.blended_transform_factory(ax[0].transData, ax[0].transAxes) trans_eu = transforms.blended_transform_factory(ax[1].transData, ax[1].transAxes) ax[0].text(bt_election_idx, 1.05, '2017 Bundestag election', transform=trans_bt, horizontalalignment='center', size=12) ax[1].text(eu_election_idx, 1.05, '2019 European election', transform=trans_eu, horizontalalignment='center', size=12) # Plot data bt_bottom = [0] * len(ind_bt) eu_bottom = [0] * len(ind_eu) for party in parties: color = colors[party] label = party.upper() bt_values = bt.loc[ind_bt, party].fillna(0) eu_values = eu.loc[ind_eu, party].fillna(0) bt_values.plot.bar(ax=ax[0], bottom=bt_bottom, color=color, label=parties[party]) eu_values.plot.bar(ax=ax[1], bottom=eu_bottom, color=color, label=parties[party]) bt_bottom = [x + y for x, y in zip(bt_bottom, bt_values)] eu_bottom = [x + y for x, y in zip(eu_bottom, eu_values)] ax[0].spines['top'].set_visible(False) ax[0].spines['right'].set_visible(False) ax[1].spines['top'].set_visible(False) ax[1].spines['right'].set_visible(False) ticks = 3 off = 2 ax[0].set_xticks(ax[0].get_xticks()[off::ticks]) ax[1].set_xticks(ax[1].get_xticks()[off::ticks]) ax[0].xaxis.set_major_formatter(plt.FixedFormatter(ind_bt[off::ticks].to_series(keep_tz=True).dt.strftime("%d %b"))) ax[1].xaxis.set_major_formatter(plt.FixedFormatter(ind_eu[off::ticks].to_series(keep_tz=True).dt.strftime("%d %b"))) plt.setp(ax[0].get_xticklabels(), rotation=0) plt.setp(ax[1].get_xticklabels(), rotation=0) ax[0].grid(color='gray', linestyle='dashed', alpha=0.3, axis='y') ax[1].grid(color='gray', linestyle='dashed', alpha=0.3, axis='y') ax[0].axes.get_xaxis().get_label().set_visible(False) ax[1].axes.get_xaxis().get_label().set_visible(False) ax[0].get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ','))) ax[1].get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ','))) ax[0].set_ylabel(ylabel) ax[1].set_ylabel(ylabel) ax[0].legend(loc="upper center", prop={'size': 13}, ncol=len(parties), bbox_to_anchor=(0.5, -0.5)) plt.tight_layout() return fig def print_legend(ax, left=False, legend_colors=parties): if left: label = ax.annotate('Bundestag election\n European election', xy=(0, 1), xytext=(57, -10), horizontalalignment='left', verticalalignment='top', xycoords='axes fraction', textcoords='offset points',) else: label = ax.annotate('Bundestag election\n European election', xy=(1, 1), xytext=(-15, -10), horizontalalignment='right', verticalalignment='top', xycoords='axes fraction', textcoords='offset points') ax.figure.canvas.draw() bbox = label.get_window_extent() bbox_data = ax.transData.inverted().transform(bbox) text_x_left = bbox_data[0][0] text_x_right = bbox_data[1][0] text_y_bottom = bbox_data[0][1] text_y_top = bbox_data[1][1] text_width = text_x_right - text_x_left text_height = text_y_top - text_y_bottom patches_width = 0.4 * text_width x_box_border = 0.04 * text_width y_box_border = 0.04 * text_height box_width = patches_width + text_width + 2 * x_box_border box_height = text_height + 5 * y_box_border box_position = (text_x_left-patches_width-x_box_border, text_y_bottom - 2 * y_box_border) box = mpatches.Rectangle(box_position, box_width, box_height, alpha=.9, facecolor='white', edgecolor='black', lw=0.5) ax.add_patch(box) num_patches = len(legend_colors) patch_width = (patches_width - 2 * x_box_border)/num_patches patch_height = (box_height/2 - 4.5 * y_box_border) for num, party in enumerate(legend_colors): try: color = colors[party] lcolor = colors_light[party] except: color = party lcolor = scale_color(color, 0.35) patch_x = box_position[0] + x_box_border + num * patch_width patch_y = box_position[1] + 2.5 * y_box_border patch = mpatches.Rectangle((patch_x, patch_y), patch_width, patch_height, color=lcolor) ax.add_patch(patch) patch_x = box_position[0] + x_box_border + num * patch_width patch_y = box_position[1] + box_height - patch_height - 2.5 * y_box_border patch = mpatches.Rectangle((patch_x, patch_y), patch_width, patch_height, color=color) ax.add_patch(patch) def plot_stacked_compare_bar(bt_data, eu_data, label, legend_left=False, max_y=None): """ Returns a bar chart where two DataFrames are compared. """ fig = plt.figure() ax = fig.gca() ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.ticklabel_format(useOffset=False, style='plain') ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ','))) ax.set_ylabel(label); ax.grid(color='gray', linestyle='dashed', alpha=0.3, axis='y') if max_y: plt.ylim(top=max_y) bt_bottom = [0] * len(bt_data.index) eu_bottom = [0] * len(eu_data.index) for party in parties: color_left = colors[party] color_right = colors_light[party] bt_values = bt_data[party].fillna(0) eu_values = eu_data[party].fillna(0) ax.bar(bt_data.index, bt_values, bottom=bt_bottom, color=color_left, width=-.4, align='edge') ax.bar(eu_data.index, eu_values, bottom=eu_bottom, color=color_right, width=.4, align='edge') bt_bottom = [x + y for x, y in zip(bt_bottom, bt_values)] eu_bottom = [x + y for x, y in zip(eu_bottom, eu_values)] print_legend(ax, legend_left) return fig from collections import OrderedDict def plot_negative_hist(bt1, bt2, eu1, eu2, ylabel, sharey=True): fig, ax = plt.subplots(2,1, figsize=(10,5), sharey=sharey) ind_bt = bt1.index ind_eu = eu1.index # Find week of election bt_election_idx = np.argmax(ind_bt[ind_bt <= (bt_election - timedelta(hours=8))]) eu_election_idx = np.argmax(ind_eu[ind_eu <= (eu_election - timedelta(hours=8))]) ax[0].axvline(x=bt_election_idx, c='black', linewidth=2, linestyle='dashed') ax[1].axvline(x=eu_election_idx, c='black', linewidth=2, linestyle='dashed') # Election label trans_bt = transforms.blended_transform_factory(ax[0].transData, ax[0].transAxes) trans_eu = transforms.blended_transform_factory(ax[1].transData, ax[1].transAxes) ax[0].text(bt_election_idx, -.1, '2017 Bundestag election', transform=trans_bt, horizontalalignment='center', size=12) ax[1].text(eu_election_idx, 1.05, '2019 European election', transform=trans_eu, horizontalalignment='center', size=12) # Plot data bt1_bottom = [0] * len(ind_bt) eu1_bottom = [0] * len(ind_eu) bt2_bottom = [0] * len(ind_bt) eu2_bottom = [0] * len(ind_eu) for party in parties: color = colors[party] label = party.upper() # Plot bt1 and eu1 bt1_values = bt1.loc[ind_bt, party].fillna(0) eu1_values = eu1.loc[ind_eu, party].fillna(0) bt1_values.plot.bar(ax=ax[0], bottom=bt1_bottom, color=color, label=parties[party]) eu1_values.plot.bar(ax=ax[1], bottom=eu1_bottom, color=color, label=parties[party]) # Plot bt2 and eu2 bt2_values = -1 * bt2.loc[ind_bt, party].fillna(0) eu2_values = -1 * eu2.loc[ind_eu, party].fillna(0) bt2_values.plot.bar(ax=ax[0], bottom=bt2_bottom, color=color, label=parties[party]) eu2_values.plot.bar(ax=ax[1], bottom=eu2_bottom, color=color, label=parties[party]) # Updating bt1 and eu1 bt1_bottom = [x + y for x, y in zip(bt1_bottom, bt1_values)] eu1_bottom = [x + y for x, y in zip(eu1_bottom, eu1_values)] # Updating bt2 and eu2 bt2_bottom = [x + y for x, y in zip(bt2_bottom, bt2_values)] eu2_bottom = [x + y for x, y in zip(eu2_bottom, eu2_values)] ax[0].xaxis.tick_top() ax[0].spines['bottom'].set_visible(False) ax[0].spines['right'].set_visible(False) ax[1].spines['top'].set_visible(False) ax[1].spines['right'].set_visible(False) ticks = 3 off = 2 ax[0].set_xticks(ax[0].get_xticks()[off::ticks]) ax[1].set_xticks(ax[1].get_xticks()[off::ticks]) ax[0].xaxis.set_major_formatter(plt.FixedFormatter(ind_bt[off::ticks].to_series(keep_tz=True).dt.strftime("%d %b"))) ax[1].xaxis.set_major_formatter(plt.FixedFormatter(ind_eu[off::ticks].to_series(keep_tz=True).dt.strftime("%d %b"))) ax[0].yaxis.set_major_locator(MaxNLocator(6, integer=True)) ax[1].yaxis.set_major_locator(MaxNLocator(6, integer=True)) plt.setp(ax[0].get_xticklabels(), rotation=0) plt.setp(ax[1].get_xticklabels(), rotation=0) ax[0].grid(color='gray', linestyle='dashed', alpha=0.3, axis='y') ax[1].grid(color='gray', linestyle='dashed', alpha=0.3, axis='y') ax[0].axes.get_xaxis().get_label().set_visible(False) ax[1].axes.get_xaxis().get_label().set_visible(False) ax[0].set_ylabel(ylabel) ax[1].set_ylabel(ylabel) plt.gca() handles, labels = plt.gca().get_legend_handles_labels() by_label = OrderedDict(zip(labels, handles)) ax[0].legend(by_label.values(), by_label.keys(), loc="lower center", prop={'size': 13}, ncol=len(parties), bbox_to_anchor=(0.5, -0.5)) plt.tight_layout() return fig ###Output _____no_output_____ ###Markdown Plotting CHANNEL ANALYSIS ###Code channel_count = corpus.groupby('party').nunique()['channelId'] fig = plot_bar(channel_count, 'Channel') fig.savefig('outputs/yt-channel-count.pdf', bbox_inches = 'tight', pad_inches = 0) print("Channel which uploaded videos in investigated periods:", corpus.channelId.nunique()) print("Number of faction-related channels:", sum(corpus.faction)/len(corpus)) ###Output Channel which uploaded videos in investigated periods: 148 Number of faction-related channels: 0.7142684984333575 ###Markdown UPLOAD ANALYSIS General properties Total videos ###Code print("Total number of videos in both investigated periods:", len(corpus)) print("Total number of words contained in all videos:", corpus['subtitle'].apply(lambda x: len(x.split())).sum()) ###Output Total number of videos in both investigated periods: 8298 Total number of words contained in all videos: 9125283 ###Markdown Average videos per channel ###Code corpus.reset_index().groupby(['channelId', 'party']).count()['videoId'].groupby('party').median() ###Output _____no_output_____ ###Markdown Missing subtitles count ###Code bt_msubtitle_count = bt_corpus.groupby('party')['subtitle'].apply(lambda df: sum(df.apply(lambda x: len(x) < 10))) eu_msubtitle_count = eu_corpus.groupby('party')['subtitle'].apply(lambda df: sum(df.apply(lambda x: len(x) < 10))) fig = plot_compare_bar(bt_msubtitle_count, eu_msubtitle_count, 'Videos without subs', legend_left=True) fig.savefig('outputs/yt-videos-without-subs-count.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown All videos ###Code bt_video_count = bt_corpus.groupby('party')['title'].count() eu_video_count = eu_corpus.groupby('party')['title'].count() fig = plot_compare_bar(bt_video_count, eu_video_count, 'Number of uploads') plt.savefig('outputs/yt-video-count.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown State level ###Code bt = bt_corpus.groupby(['party','state'])['title'].count().unstack().T.drop('DE') eu = eu_corpus.groupby(['party','state'])['title'].count().unstack().T.drop('DE') fig = plot_stacked_compare_bar(bt, eu, 'Number of uploads') plt.savefig('outputs/yt-video-count-all.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-unrelated ###Code bt_video_count = bt_corpus[~bt_corpus.faction].groupby('party')['title'].count() eu_video_count = eu_corpus[~eu_corpus.faction].groupby('party')['title'].count() fig = plot_compare_bar(bt_video_count, eu_video_count, 'Number of uploads') plt.savefig('outputs/yt-no-faction-video-count.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-related ###Code bt_video_count = bt_corpus[bt_corpus.faction].groupby('party')['title'].count() eu_video_count = eu_corpus[eu_corpus.faction].groupby('party')['title'].count() fig = plot_compare_bar(bt_video_count, eu_video_count, 'Number of uploads') plt.savefig('outputs/yt-faction-video-count.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Federal-wide channels Faction-unrelated ###Code bt_video_count = bt_corpus[~bt_corpus.faction & (bt_corpus.state == 'DE')].groupby('party')['title'].count() eu_video_count = eu_corpus[~eu_corpus.faction & (eu_corpus.state == 'DE')].groupby('party')['title'].count() fig = plot_compare_bar(bt_video_count, eu_video_count, 'Number of uploads') plt.savefig('outputs/yt-nfaction-video-count-federalwide.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-related ###Code bt_video_count = bt_corpus[bt_corpus.faction & (bt_corpus.state == 'DE')].groupby('party')['title'].count() eu_video_count = eu_corpus[eu_corpus.faction & (eu_corpus.state == 'DE')].groupby('party')['title'].count() fig = plot_compare_bar(bt_video_count, eu_video_count, 'Number of uploads') plt.savefig('outputs/yt-faction-video-count-federalwide.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Time analysis ###Code bt_upload_count = bt_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['title'].count().unstack() eu_upload_count = eu_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['title'].count().unstack() fig = plot_hist(bt_upload_count, eu_upload_count, 'Number of uploads') fig.savefig('outputs/yt-upload-count-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-unrelated ###Code bt_nfaction_upload_count = bt_corpus[~bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['title'].count().unstack() eu_nfaction_upload_count = eu_corpus[~eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['title'].count().unstack() fig = plot_hist(bt_nfaction_upload_count, eu_nfaction_upload_count, 'Number of uploads') fig.savefig('outputs/yt-nfaction-upload-count-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-related ###Code bt_faction_upload_count = bt_corpus[bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['title'].count().unstack() eu_faction_upload_count = eu_corpus[eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['title'].count().unstack() fig = plot_hist(bt_faction_upload_count, eu_faction_upload_count, 'Number of uploads') fig.savefig('outputs/yt-faction-upload-count-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown VIDEO DURATION ANALYSIS ###Code bt_video_length = bt_corpus.groupby('party')['duration'].median() eu_video_length = eu_corpus.groupby('party')['duration'].median() fig = plot_compare_bar(bt_video_length, eu_video_length, 'Number of seconds', legend_left=True) plt.savefig('outputs/yt-duration-median.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown VIEW ANALYSIS General properties Average views per channel ###Code corpus.reset_index().groupby(['channelId', 'party']).sum()['viewCount'].groupby('party').median() ###Output _____no_output_____ ###Markdown Average views per video ###Code corpus.reset_index().groupby(['party'])['viewCount'].mean() ###Output _____no_output_____ ###Markdown Federal-wide channels vs state-level channels ###Code corpus.groupby(['party', corpus.state == 'DE'])['viewCount'].sum()/(corpus.groupby('party')['viewCount'].sum()) corpus.groupby(corpus.state == 'DE')['viewCount'].sum()/(corpus['viewCount'].sum()) ###Output _____no_output_____ ###Markdown All videos ###Code bt_view_count = bt_corpus.groupby('party')['viewCount'].sum() eu_view_count = eu_corpus.groupby('party')['viewCount'].sum() fig = plot_compare_bar(bt_view_count, eu_view_count, 'Number of views') plt.savefig('outputs/yt-view-count.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown State level ###Code bt_view_count_all = bt_corpus.groupby('state')['viewCount'].sum() eu_view_count_all = eu_corpus.groupby('state')['viewCount'].sum() fig = plot_compare_bar(bt_view_count_all, eu_view_count_all, 'Number of views', color='union', index_label_dic=states) plt.savefig('outputs/yt-view-count-all.pdf', bbox_inches = 'tight', pad_inches = 0) bt = bt_corpus.groupby(['party','state'])['viewCount'].sum().unstack().T.drop('DE') eu = eu_corpus.groupby(['party','state'])['viewCount'].sum().unstack().T.drop('DE') fig = plot_stacked_compare_bar(bt, eu, "Number of views", legend_left=True) plt.savefig('outputs/yt-view-count-stacked.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Compare old states / new states ###Code data.loc[:, 'is_old_state_video'] = data['state'].apply(lambda x: x in old_states) bt = data[bt_period & (corpus.state != 'DE') & (corpus.state != 'BE')].groupby(['party', 'is_old_state_video'])['viewCount'].sum().unstack() eu = data[eu_period & (corpus.state != 'DE') & (corpus.state != 'BE')].groupby(['party', 'is_old_state_video'])['viewCount'].sum().unstack() fig = plot_compare_bar(bt[False], eu[False], 'Views in "Old States"') fig = plot_compare_bar(bt[True], eu[True], 'Views in "New States"') temp = corpus.copy() temp.loc[:, 'is_old_state_video'] = corpus['state'].apply(lambda x: x in old_states) temp['viewCount'] = temp['viewCount'].fillna(0) total_views = temp[(temp.state != 'DE') & (temp.state != 'BE')].groupby('party')['viewCount'].sum() region_views = temp[(temp.state != 'DE') & (temp.state != 'BE')].groupby(['party', 'is_old_state_video'])['viewCount'].sum().unstack() print(region_views[True]/total_views) print(region_views[False]/total_views) #fig = plot_compare_bar(eu[False], eu[True], 'Views in "Old States"') ###Output party afd 0.788071 fdp 0.797200 grüne 0.408657 linke 0.345436 spd 0.830745 union 0.919392 dtype: float64 party afd 0.211929 fdp 0.202800 grüne 0.591343 linke 0.654564 spd 0.169255 union 0.080608 dtype: float64 ###Markdown Faction-unrelated ###Code bt_view_count = bt_corpus[~bt_corpus.faction].groupby('party')['viewCount'].sum() eu_view_count = eu_corpus[~eu_corpus.faction].groupby('party')['viewCount'].sum() fig = plot_compare_bar(bt_view_count, eu_view_count, 'Number of views') plt.savefig('outputs/yt-view-count-nfaction.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-related ###Code bt_view_count = bt_corpus[bt_corpus.faction].groupby('party')['viewCount'].sum() eu_view_count = eu_corpus[eu_corpus.faction].groupby('party')['viewCount'].sum() fig = plot_compare_bar(bt_view_count, eu_view_count, 'Number of views') plt.savefig('outputs/yt-view-count-faction.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Federal-wide channels ###Code bt_view_count = bt_corpus[bt_corpus.state == 'DE'].groupby('party')['viewCount'].sum() eu_view_count = eu_corpus[eu_corpus.state == 'DE'].groupby('party')['viewCount'].sum() fig = plot_compare_bar(bt_view_count, eu_view_count, 'Number of views') plt.savefig('outputs/yt-view-count-de.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-unrelated ###Code bt = bt_corpus[~bt_corpus.faction & (bt_corpus.state == 'DE')].groupby('party')['viewCount'].sum() eu = eu_corpus[~eu_corpus.faction & (eu_corpus.state == 'DE')].groupby('party')['viewCount'].sum() fig = plot_compare_bar(bt, eu, 'Number of views') plt.savefig('outputs/yt-view-count-de-nfaction.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-related ###Code bt = bt_corpus[bt_corpus.faction & (bt_corpus.state == 'DE')].groupby('party')['viewCount'].sum() eu = eu_corpus[eu_corpus.faction & (eu_corpus.state == 'DE')].groupby('party')['viewCount'].sum() fig = plot_compare_bar(bt, eu, 'Number of views') plt.savefig('outputs/yt-view-count-de-faction.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Time analysis All videos ###Code bt_view_count = bt_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['viewCount'].sum().unstack() eu_view_count = eu_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['viewCount'].sum().unstack() fig = plot_hist(bt_view_count, eu_view_count, 'Number of views') fig.savefig('outputs/yt-view-count-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-unrelated ###Code bt_nfaction_view_count = bt_corpus[~bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['viewCount'].sum().unstack() eu_nfaction_view_count = eu_corpus[~eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['viewCount'].sum().unstack() fig = plot_hist(bt_nfaction_view_count, eu_nfaction_view_count, 'Number of views') fig.savefig('outputs/yt-no-faction-view-count-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-related ###Code bt_faction_view_count = bt_corpus[bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['viewCount'].sum().unstack() eu_faction_view_count = eu_corpus[eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['viewCount'].sum().unstack() fig = plot_hist(bt_faction_view_count, eu_faction_view_count, 'Number of views') fig.savefig('outputs/yt-faction-view-count-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown COMMENT ANALYSIS ###Code bt_comment_count = bt_corpus.groupby('party')['commentCount'].sum() eu_comment_count = eu_corpus.groupby('party')['commentCount'].sum() fig = plot_compare_bar(bt_comment_count, eu_comment_count, 'Number of comments') plt.savefig('outputs/yt-comment-count.pdf', bbox_inches = 'tight', pad_inches = 0) bt_disabled_comments = bt_corpus[bt_corpus.state == 'DE'].groupby('party')['commentCount'].apply(lambda l: sum(np.isnan(x) for x in l)/len(l)) eu_disabled_comments = eu_corpus[eu_corpus.state == 'DE'].groupby('party')['commentCount'].apply(lambda l: sum(np.isnan(x) for x in l)/len(l)) print("BT: " + str([(party, bt_disabled_comments[party]/len(bt_corpus.query('party == @party'))) for party in parties])) print("EU: " + str([(party, eu_disabled_comments[party]/len(eu_corpus.query('party == @party'))) for party in parties])) disabled_comments = corpus[corpus.state == 'DE'].groupby('party')['commentCount'].apply(lambda l: sum(np.isnan(x) for x in l)/len(l)) disabled_comments #fig = plot_compare_bar(bt_disabled_comments, eu_disabled_comments, 'Percentage of\nClosed Comment Sections', legend_left=True) #plt.savefig('outputs/yt-disabled_comments.pdf', bbox_inches = 'tight', pad_inches = 0) bt = bt_corpus.groupby(['party', 'state'])['commentCount'].sum().unstack().T eu = eu_corpus.groupby(['party', 'state'])['commentCount'].sum().unstack().T bt = bt.loc[['DE'] + [ix for ix in bt.index if ix != 'DE']] eu = eu.loc[['DE'] + [ix for ix in eu.index if ix != 'DE']] fig = plot_stacked_compare_bar(bt, eu, 'Number of comments') plt.savefig('outputs/yt-comment-count-states.pdf', bbox_inches = 'tight', pad_inches = 0) for party in parties: bt = bt_corpus.loc[bt_corpus.party == party].groupby('state')['commentCount'].sum() eu = eu_corpus.loc[eu_corpus.party == party].groupby('state')['commentCount'].sum() fig = plot_compare_bar(bt, eu, parties[party] + ' Number of comments', color=party, index_label_dic=states, max_y=140000) plt.savefig('outputs/yt-comment-count-' + party + '.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown All comments in time ###Code bt_comment_count = bt_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['commentCount'].sum().unstack() eu_comment_count = eu_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['commentCount'].sum().unstack() fig = plot_hist(bt_comment_count, eu_comment_count, 'Number of comments') fig.savefig('outputs/yt-comment-count-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Federal-wide videos in time ###Code bt_comment_count = bt_corpus[bt_corpus.state == 'DE'].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['commentCount'].sum().unstack() eu_comment_count = eu_corpus[eu_corpus.state == 'DE'].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['commentCount'].sum().unstack() fig = plot_hist(bt_comment_count, eu_comment_count, 'Number of comments') fig.savefig('outputs/yt-comment-count-analysis-de.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Non-faction videos ###Code bt_nfaction_comment_count = bt_corpus[~bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['commentCount'].sum().unstack() eu_nfaction_comment_count = eu_corpus[~eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['commentCount'].sum().unstack() fig = plot_hist(bt_nfaction_comment_count, eu_nfaction_comment_count, 'Number of comments') fig.savefig('outputs/yt-no-faction-comment-count-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-only videos ###Code bt_faction_comment_count = bt_corpus[bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['commentCount'].sum().unstack() eu_faction_comment_count = eu_corpus[eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['commentCount'].sum().unstack() fig = plot_hist(bt_faction_comment_count, eu_faction_comment_count, 'Number of comments') fig.savefig('outputs/yt-faction-comment-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) bt_corpus.reset_index().groupby(['channelId', 'party']).sum()['commentCount'].groupby('party').mean() eu_corpus.reset_index().groupby(['channelId', 'party']).sum()['commentCount'].groupby('party').mean() bt_corpus[bt_corpus.state == 'DE'].reset_index().groupby(['channelId', 'party']).sum()['commentCount'].groupby('party').mean() eu_corpus[eu_corpus.state == 'DE'].reset_index().groupby(['channelId', 'party']).sum()['commentCount'].groupby('party').mean() ###Output _____no_output_____ ###Markdown RATING ANALYSIS ###Code print(bt_corpus[['likeCount', 'dislikeCount']].sum()) print(eu_corpus[['likeCount', 'dislikeCount']].sum()) ###Output likeCount 103061.0 dislikeCount 88281.0 dtype: float64 likeCount 1001289.0 dislikeCount 90216.0 dtype: float64 ###Markdown All videos ###Code bt_like_count = bt_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['likeCount'].sum().unstack() eu_like_count = eu_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['likeCount'].sum().unstack() bt_dislike_count = bt_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['dislikeCount'].sum().unstack() eu_dislike_count = eu_corpus.groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['dislikeCount'].sum().unstack() fig = plot_negative_hist(bt_like_count, bt_dislike_count, eu_like_count, eu_dislike_count, 'Dislikes / Likes') fig.savefig('outputs/yt-rating-analysis.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Non-faction videos ###Code bt_nfaction_like_count = bt_corpus[~bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['likeCount'].sum().unstack() eu_nfaction_like_count = eu_corpus[~eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['likeCount'].sum().unstack() bt_nfaction_dislike_count = bt_corpus[~bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['dislikeCount'].sum().unstack() eu_nfaction_dislike_count = eu_corpus[~eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['dislikeCount'].sum().unstack() fig = plot_negative_hist(bt_nfaction_like_count, bt_nfaction_dislike_count, eu_nfaction_like_count, eu_nfaction_dislike_count, 'Dislikes / Likes') fig.savefig('outputs/yt-rating-analysis-nfaction.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown Faction-only videos ###Code bt_faction_like_count = bt_corpus[bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['likeCount'].sum().unstack() eu_faction_like_count = eu_corpus[eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['likeCount'].sum().unstack() bt_faction_dislike_count = bt_corpus[bt_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['dislikeCount'].sum().unstack() eu_faction_dislike_count = eu_corpus[eu_corpus.faction].groupby([pd.Grouper(key='publishedAt', freq='W', label='right'), 'party'])['dislikeCount'].sum().unstack() fig = plot_negative_hist(bt_faction_like_count, bt_faction_dislike_count, eu_faction_like_count, eu_faction_dislike_count, 'Dislikes / Likes', sharey=False) fig.savefig('outputs/yt-rating-analysis-faction.pdf', bbox_inches = 'tight', pad_inches = 0) data['ratingDifference'] = (data['likeCount'] - data['dislikeCount']) bt_rating_count = data[bt_period].groupby('party')['ratingDifference'].sum() eu_rating_count = data[eu_period].groupby('party')['ratingDifference'].sum() fig = plot_compare_bar(bt_rating_count, eu_rating_count, 'Rating difference') plt.savefig('outputs/yt-rating-difference.pdf', bbox_inches = 'tight', pad_inches = 0) fig = plot_bar(bt_rating_count, 'Rating difference') fig.savefig('outputs/yt-rating-difference_bt.pdf', bbox_inches = 'tight', pad_inches = 0) fig = plot_bar(eu_rating_count, 'Rating difference') fig.savefig('outputs/yt-rating-difference_eu.pdf', bbox_inches = 'tight', pad_inches = 0) ###Output _____no_output_____ ###Markdown CommentCount ###Code data['publishedAt'] = pd.to_datetime(data['publishedAt']) viewCount = data.groupby([data["publishedAt"].dt.month, data['party']]).sum()['viewCount'].unstack() viewCount count = data.groupby([data["publishedAt"].dt.week, data['party']])['title'].count().unstack() count.plot(color=count.apply(lambda x: colors[x.name])) count.sum() viewCount.plot(kind='bar', color=viewCount.apply(lambda x: colors[x.name])) data.columns ###Output _____no_output_____ ###Markdown Tags ###Code from itertools import chain from collections import Counter tags_by_party = {party: [tag.lower() for tag in chain(*data.loc[data['party'] == party, 'tags'])] for party in parties} most_used_tags_by_party = {party: Counter(tags_by_party[party]) for party in parties} most_used_tags_by_party = pd.DataFrame.from_dict(most_used_tags_by_party).fillna(0).astype(int) display_side_by_side(*(top_n(most_used_tags_by_party, party, 15) for party in parties)) ###Output _____no_output_____
MB_Molec/archive/wnt_shh.ipynb
###Markdown Tumors ###Code import numpy as np import pandas as pd from sklearn import svm from sklearn import preprocessing from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neural_network import MLPClassifier from xgboost import XGBClassifier from sklearn.multiclass import OneVsRestClassifier from sklearn.multiclass import OneVsOneClassifier from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.utils import resample from sklearn.svm import LinearSVC from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_curve, roc_auc_score, plot_confusion_matrix from sklearn.preprocessing import label_binarize import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.ticker as mtick import seaborn as sns sns.set() df = pd.read_excel("/Users/michaelzhang/Desktop/peds_tumor/mbpyradiomics/molec_classifier/binary/wnt_shh.xlsx") df.head() reduced_features = pd.read_excel("/Users/michaelzhang/Desktop/peds_tumor/mbpyradiomics/molec_classifier/binary/binary_reduced.xlsx") reduced_features_list = reduced_features.loc[reduced_features['wnt_shh'].isnull() == False, ['wnt_shh']].values.tolist() reduced_features_list = [item for sublist in reduced_features_list for item in sublist] reduced_features_list target = df["molecular"] target.value_counts() #Marking 0 as wnt and 1 shh mapping_dict = {0: 'wnt', 1: 'shh'} ## target = target.map(dict(wnt = 0, shh = 1)) ## features_after_lasso = df[reduced_features_list] X_train, X_test, y_train, y_test = train_test_split(features_after_lasso, target, test_size = 0.25, random_state = 42) y_train.value_counts() y_test.value_counts() NIR = 25/36 NIR ###Output _____no_output_____ ###Markdown Resampling to Correct for Imbalance ###Code X = pd.concat([X_train, y_train], axis = 1) X_0 = X[X['molecular'] == 0] X_1 = X[X['molecular'] == 1] upsampled_0 = resample(X_0, replace = True, n_samples = len(X_1), random_state = 42) upsampled = pd.concat([upsampled_0, X_1]) upsampled = upsampled.sample(frac = 1, random_state = 42) X_train = upsampled.iloc[:, :-1] y_train = upsampled.iloc[:, -1] y_train.value_counts() ###Output _____no_output_____ ###Markdown Standardizing Features ###Code names = X_train.columns scaler = preprocessing.StandardScaler() X_train = scaler.fit_transform(X_train) X_train = pd.DataFrame(X_train, columns = names) X_test = scaler.transform(X_test) X_test = pd.DataFrame(X_test, columns = names) def create_ci(bootstrapped_scores, name): sorted_scores = np.array(bootstrapped_scores) sorted_scores.sort() confidence_lower = sorted_scores[int(0.025 * len(sorted_scores))] confidence_upper = sorted_scores[int(0.975 * len(sorted_scores))] print("95% Confidence interval for the {} score: [{:0.4f} - {:0.4}]".format(name, confidence_lower, confidence_upper)) def specificity_score(y_true, y_pred): TN = 0 FP = 0 for i in range(len(y_true)): if y_true[i] == y_pred[i] == 0: TN += 1 elif y_true[i] == 0 and y_pred[i] == 1: FP += 1 return TN / (TN + FP) def positive_pv_score(y_true, y_pred): TP = 0 FP = 0 for i in range(len(y_true)): if y_true[i] == y_pred[i] == 1: TP += 1 elif y_true[i] == 0 and y_pred[i] == 1: FP += 1 return TP/(TP+FP) def negative_pv_score(y_true, y_pred): TN = 0 FN = 0 for i in range(len(y_true)): if y_true[i] == y_pred[i] == 0: TN += 1 elif y_true[i] == 1 and y_pred[i] == 0: FN += 1 return TN/(TN+FN) ###Output _____no_output_____ ###Markdown SVM Grid Search for Hyperparameters ###Code svm_model = svm.SVC() grid_param = {'C': [0.1, 1, 10, 100, 1000], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001], 'kernel': ['rbf', 'sigmoid', 'linear'] } svm_grid = GridSearchCV(estimator=svm_model, param_grid=grid_param, scoring='accuracy', cv=5, n_jobs=-1) svm_grid.fit(X_train, y_train) print(svm_grid.best_params_) ###Output {'C': 1, 'gamma': 1, 'kernel': 'rbf'} ###Markdown Model Performance ###Code svm_model = svm.SVC(C = 1, gamma = 1, kernel = 'rbf', probability = True) svm_model.fit(X_train, y_train) y_pred = svm_model.predict(X_test) y_train_pred = svm_model.predict(X_train) print("Accuracy Score: " + str(accuracy_score(y_test, y_pred))) print("Specificity Score: " + str(specificity_score(y_test.to_numpy(), y_pred))) print("Precision: " + str(precision_score(y_test, y_pred))) print("Recall: " + str(recall_score(y_test, y_pred))) print("F1 Score: " + str(f1_score(y_test, y_pred))) print("PPV Score: " + str(positive_pv_score(y_test.to_numpy(), y_pred))) print("NPV Score: " + str(negative_pv_score(y_test.to_numpy(), y_pred))) svm_probs = svm_model.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, svm_probs) auc = roc_auc_score(y_test, svm_probs) print("AUC: " + str(auc)) plt.figure(figsize=(7, 7)) ## ns_preds = [0 for _ in range(len(y_test))] ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_preds) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(fpr, tpr, marker='.', label='SVM') plt.xlabel('1 - Specificity (False Positive Rate)',fontsize=16) plt.ylabel('Sensitivity (True Positive Rate)',fontsize=16) plt.legend(loc='lower right') plt.title('SVM: ROC Curve for Test Set', fontsize=20, fontweight="semibold") short_auc = round(auc,4) plt.text(.93,.1, "AUC: " + str(short_auc), horizontalalignment="center", verticalalignment="center", fontsize=14, fontweight="semibold") plt.show() #sns.reset_orig() matrix = plot_confusion_matrix(svm_model, X_test, y_test, cmap=plt.cm.Blues) plt.title('SVM Confusion Matrix') plt.show(matrix) plt.show() #n_bootstraps = 2000 #rng_seed = 42 # control reproducibility #bootstrapped_auc_scores = [] #bootstrapped_acc_scores = [] #bootstrapped_spec_scores = [] #bootstrapped_prec_scores = [] #bootstrapped_rec_scores = [] #bootstrapped_f1_scores = [] #bootstrapped_ppv_scores = [] #bootstrapped_npv_scores = [] #rng = np.random.RandomState(rng_seed) #for i in range(n_bootstraps): # # bootstrap by sampling with replacement on the prediction indices # indices = rng.randint(0, len(svm_probs), len(svm_probs)) # if len(np.unique(y_test.to_numpy()[indices])) < 2: # # We need at least one positive and one negative sample for ROC AUC # # to be defined: reject the sample # continue # auc_score = roc_auc_score(y_test.to_numpy()[indices], svm_probs[indices]) # acc_score = accuracy_score(y_test.to_numpy()[indices], y_pred[indices]) # spec_score = specificity_score(y_test.to_numpy()[indices], y_pred[indices]) # prec_score = precision_score(y_test.to_numpy()[indices], y_pred[indices]) # rec_score = recall_score(y_test.to_numpy()[indices], y_pred[indices]) # f_one_score = f1_score(y_test.to_numpy()[indices].astype('float64'), y_pred[indices].astype('float64')) # ppv_score = positive_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # npv_score = negative_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # bootstrapped_auc_scores.append(auc_score) # bootstrapped_acc_scores.append(acc_score) # bootstrapped_spec_scores.append(spec_score) # bootstrapped_prec_scores.append(prec_score) # bootstrapped_rec_scores.append(rec_score) # bootstrapped_f1_scores.append(f_one_score) # bootstrapped_ppv_scores.append(ppv_score) # bootstrapped_npv_scores.append(npv_score) #plt.hist(bootstrapped_auc_scores, bins=50) #plt.title('Histogram of the bootstrapped ROC AUC scores') #plt.show() #create_ci(bootstrapped_auc_scores, "AUC") #create_ci(bootstrapped_acc_scores, "Accuracy") #create_ci(bootstrapped_spec_scores, "Specificity") #create_ci(bootstrapped_prec_scores, "Precision") #create_ci(bootstrapped_rec_scores, "Recall") #create_ci(bootstrapped_f1_scores, "F1") #create_ci(bootstrapped_ppv_scores, "PPV") #create_ci(bootstrapped_npv_scores, "NPV") ###Output _____no_output_____ ###Markdown Logistic Regression Grid Search for Hyperparameters ###Code lr_model = LogisticRegression() grid_param = {'penalty': ['l1', 'l2', 'elasticnet', 'none'], 'C': [10, 1, 0.1, 0.01, 0.001] } lr_grid = GridSearchCV(estimator=lr_model, param_grid=grid_param, scoring='accuracy', cv=5, n_jobs=-1) lr_grid.fit(X_train, y_train) print(lr_grid.best_params_) ###Output {'C': 0.1, 'penalty': 'l2'} ###Markdown Model Performance ###Code lr_model = LogisticRegression(penalty = 'l2', C = 0.01) lr_model.fit(X_train, y_train) y_pred = lr_model.predict(X_test) print("Accuracy Score: " + str(accuracy_score(y_test, y_pred))) print("Specificity Score: " + str(specificity_score(y_test.to_numpy(), y_pred))) print("Precision: " + str(precision_score(y_test, y_pred))) print("Recall: " + str(recall_score(y_test, y_pred))) print("F1 Score: " + str(f1_score(y_test, y_pred))) print("PPV Score: " + str(positive_pv_score(y_test.to_numpy(), y_pred))) print("NPV Score: " + str(negative_pv_score(y_test.to_numpy(), y_pred))) lr_probs = lr_model.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, lr_probs) auc = roc_auc_score(y_test, lr_probs) print("AUC: " + str(auc)) plt.figure(figsize=(7, 7)) ## ns_preds = [0 for _ in range(len(y_test))] ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_preds) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(fpr, tpr, marker='.', label='Logistic Regression') plt.xlabel('1 - Specificity (False Positive Rate)',fontsize=16) plt.ylabel('Sensitivity (True Positive Rate)',fontsize=16) plt.legend(loc='lower right') plt.title('LR: ROC Curve for Test Set', fontsize=20, fontweight="semibold") short_auc = round(auc,4) plt.text(.93,.1, "AUC: " + str(short_auc), horizontalalignment="center", verticalalignment="center", fontsize=14, fontweight="semibold") plt.show() matrix = plot_confusion_matrix(lr_model, X_test, y_test, cmap=plt.cm.Blues) plt.title('Logistic Regression Confusion Matrix') plt.show(matrix) plt.show() #n_bootstraps = 2000 #rng_seed = 42 # control reproducibility #bootstrapped_auc_scores = [] #bootstrapped_acc_scores = [] #bootstrapped_spec_scores = [] #bootstrapped_prec_scores = [] #bootstrapped_rec_scores = [] #bootstrapped_f1_scores = [] #bootstrapped_ppv_scores = [] #bootstrapped_npv_scores = [] #rng = np.random.RandomState(rng_seed) #for i in range(n_bootstraps): # # bootstrap by sampling with replacement on the prediction indices # indices = rng.randint(0, len(svm_probs), len(svm_probs)) # if len(np.unique(y_test.to_numpy()[indices])) < 2: # # We need at least one positive and one negative sample for ROC AUC # # to be defined: reject the sample # continue # auc_score = roc_auc_score(y_test.to_numpy()[indices], svm_probs[indices]) # acc_score = accuracy_score(y_test.to_numpy()[indices], y_pred[indices]) # spec_score = specificity_score(y_test.to_numpy()[indices], y_pred[indices]) # prec_score = precision_score(y_test.to_numpy()[indices], y_pred[indices]) # rec_score = recall_score(y_test.to_numpy()[indices], y_pred[indices]) # f_one_score = f1_score(y_test.to_numpy()[indices].astype('float64'), y_pred[indices].astype('float64')) # ppv_score = positive_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # npv_score = negative_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # bootstrapped_auc_scores.append(auc_score) # bootstrapped_acc_scores.append(acc_score) # bootstrapped_spec_scores.append(spec_score) # bootstrapped_prec_scores.append(prec_score) # bootstrapped_rec_scores.append(rec_score) # bootstrapped_f1_scores.append(f_one_score) # bootstrapped_ppv_scores.append(ppv_score) # bootstrapped_npv_scores.append(npv_score) #create_ci(bootstrapped_auc_scores, "AUC") #create_ci(bootstrapped_acc_scores, "Accuracy") #create_ci(bootstrapped_spec_scores, "Specificity") #create_ci(bootstrapped_prec_scores, "Precision") #create_ci(bootstrapped_rec_scores, "Recall") #create_ci(bootstrapped_f1_scores, "F1") #create_ci(bootstrapped_ppv_scores, "PPV") #create_ci(bootstrapped_npv_scores, "NPV") #feature_labels_dict = {'t1_original_shape_Elongation': 'T1 Original Shape Elongation', # 't2_log-sigma-5-mm-3D_firstorder_90Percentile': 'T2 Log Sigma 5mm 3D 90th Percentile (1st Order)', # 't2_original_shape_Elongation': 'T2 Original Shape Elongation', # 't2_original_shape_Flatness': 'T2 Original Shape Flatness', # 't2_wavelet-LLL_glcm_Idmn': 'T2 Wavelet LLL Glcm Idmn', # 't2_wavelet-HLL_firstorder_Kurtosis': 'T2 Wavelet HLL Kurtosis (1st Order)',} sns.set() feature_names = X_test.columns.to_list() importances = lr_model.coef_[0] importances = np.abs(importances) / np.sum(np.abs(importances)) indices = np.argsort(importances) plt.figure(figsize=(10,8)) plt.title('Logistic Regression: Feature Importance',fontsize=20, fontweight='semibold') plt.barh(range(len(indices)), importances[indices], color='b', align='center') plt.xlim(0, 0.30) for index, value in enumerate(importances[indices]): plt.text(value, index, " {:.1%}".format(value), fontsize=14) #plt.yticks(range(len(indices)), [feature_labels_dict[feature_names[i]] for i in indices],fontsize=14) plt.yticks(range(len(indices)), [feature_names[i] for i in indices]) plt.xlabel('Relative Importance',fontsize=16) plt.gca().xaxis.set_major_formatter(mtick.PercentFormatter(1)) plt.show() #sns.reset_orig() ###Output _____no_output_____ ###Markdown KNN Grid Search for Hyperparameters ###Code knn_model = KNeighborsClassifier() grid_param = {'n_neighbors': [3, 5, 7, 9] } knn_grid = GridSearchCV(estimator=knn_model, param_grid=grid_param, scoring='accuracy', cv=5, n_jobs=-1) knn_grid.fit(X_train, y_train) print(knn_grid.best_params_) ###Output {'n_neighbors': 3} ###Markdown Model Performance ###Code knn_model = KNeighborsClassifier(n_neighbors = 3) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) print("Accuracy Score: " + str(accuracy_score(y_test, y_pred))) print("Specificity Score: " + str(specificity_score(y_test.to_numpy(), y_pred))) print("Precision: " + str(precision_score(y_test, y_pred))) print("Recall: " + str(recall_score(y_test, y_pred))) print("F1 Score: " + str(f1_score(y_test, y_pred))) print("PPV Score: " + str(positive_pv_score(y_test.to_numpy(), y_pred))) print("NPV Score: " + str(negative_pv_score(y_test.to_numpy(), y_pred))) knn_probs = knn_model.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, knn_probs) auc = roc_auc_score(y_test, knn_probs) print("AUC: " + str(auc)) plt.figure(figsize=(7, 7)) ## ns_preds = [0 for _ in range(len(y_test))] ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_preds) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(fpr, tpr, marker='.', label='K Nearest Neighbors') plt.xlabel('1 - Specificity (False Positive Rate)',fontsize=16) plt.ylabel('Sensitivity (True Positive Rate)',fontsize=16) plt.legend(loc='lower right') plt.title('KNN: ROC Curve for Test Set', fontsize=20, fontweight="semibold") short_auc = round(auc,4) plt.text(.93,.1, "AUC: " + str(short_auc), horizontalalignment="center", verticalalignment="center", fontsize=14, fontweight="semibold") plt.show() matrix = plot_confusion_matrix(knn_model, X_test, y_test, cmap=plt.cm.Blues) plt.title('KNN Confusion Matrix') plt.show(matrix) plt.show() #n_bootstraps = 2000 #rng_seed = 42 # control reproducibility #bootstrapped_auc_scores = [] #bootstrapped_acc_scores = [] #bootstrapped_spec_scores = [] #bootstrapped_prec_scores = [] #bootstrapped_rec_scores = [] #bootstrapped_f1_scores = [] #bootstrapped_ppv_scores = [] #bootstrapped_npv_scores = [] #rng = np.random.RandomState(rng_seed) #for i in range(n_bootstraps): # # bootstrap by sampling with replacement on the prediction indices # indices = rng.randint(0, len(svm_probs), len(svm_probs)) # if len(np.unique(y_test.to_numpy()[indices])) < 2: # # We need at least one positive and one negative sample for ROC AUC # # to be defined: reject the sample # continue # auc_score = roc_auc_score(y_test.to_numpy()[indices], svm_probs[indices]) # acc_score = accuracy_score(y_test.to_numpy()[indices], y_pred[indices]) # spec_score = specificity_score(y_test.to_numpy()[indices], y_pred[indices]) # prec_score = precision_score(y_test.to_numpy()[indices], y_pred[indices]) # rec_score = recall_score(y_test.to_numpy()[indices], y_pred[indices]) # f_one_score = f1_score(y_test.to_numpy()[indices].astype('float64'), y_pred[indices].astype('float64')) # ppv_score = positive_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # npv_score = negative_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # bootstrapped_auc_scores.append(auc_score) # bootstrapped_acc_scores.append(acc_score) # bootstrapped_spec_scores.append(spec_score) # bootstrapped_prec_scores.append(prec_score) # bootstrapped_rec_scores.append(rec_score) # bootstrapped_f1_scores.append(f_one_score) # bootstrapped_ppv_scores.append(ppv_score) # bootstrapped_npv_scores.append(npv_score) #create_ci(bootstrapped_auc_scores, "AUC") #create_ci(bootstrapped_acc_scores, "Accuracy") #create_ci(bootstrapped_spec_scores, "Specificity") #create_ci(bootstrapped_prec_scores, "Precision") #create_ci(bootstrapped_rec_scores, "Recall") #create_ci(bootstrapped_f1_scores, "F1") #create_ci(bootstrapped_ppv_scores, "PPV") #create_ci(bootstrapped_npv_scores, "NPV") ###Output _____no_output_____ ###Markdown Random Forest Grid Search for Hyperparameters ###Code rf_model = RandomForestClassifier(random_state = 42) grid_param = {'n_estimators': [None, 50, 100, 200, 300], 'max_depth': [1, 2, 3, 4] } rf_grid = GridSearchCV(estimator=rf_model, param_grid=grid_param, scoring='accuracy', cv=5, n_jobs=-1) rf_grid.fit(X_train, y_train) print(rf_grid.best_params_) ###Output {'max_depth': 4, 'n_estimators': 100} ###Markdown Model Performance ###Code rf_model = RandomForestClassifier(max_depth = 4, n_estimators = 100, random_state = 42) rf_model.fit(X_train, y_train) y_pred = rf_model.predict(X_test) print("Accuracy Score: " + str(accuracy_score(y_test, y_pred))) print("Specificity Score: " + str(specificity_score(y_test.to_numpy(), y_pred))) print("Precision: " + str(precision_score(y_test, y_pred))) print("Recall: " + str(recall_score(y_test, y_pred))) print("F1 Score: " + str(f1_score(y_test, y_pred))) print("PPV Score: " + str(positive_pv_score(y_test.to_numpy(), y_pred))) print("NPV Score: " + str(negative_pv_score(y_test.to_numpy(), y_pred))) rf_probs = rf_model.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, rf_probs) auc = roc_auc_score(y_test, rf_probs) print("AUC: " + str(auc)) plt.figure(figsize=(7, 7)) ## ns_preds = [0 for _ in range(len(y_test))] ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_preds) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(fpr, tpr, marker='.', label='Random Forest') plt.xlabel('1 - Specificity (False Positive Rate)',fontsize=16) plt.ylabel('Sensitivity (True Positive Rate)',fontsize=16) plt.legend(loc='lower right') plt.title('RF: ROC Curve for Test Set', fontsize=20, fontweight="semibold") short_auc = round(auc,4) plt.text(.93,.1, "AUC: " + str(short_auc), horizontalalignment="center", verticalalignment="center", fontsize=14, fontweight="semibold") plt.show() matrix = plot_confusion_matrix(rf_model, X_test, y_test, cmap=plt.cm.Blues) plt.title('Random Forest Confusion Matrix') plt.show(matrix) plt.show() #n_bootstraps = 2000 #rng_seed = 42 # control reproducibility #bootstrapped_auc_scores = [] #bootstrapped_acc_scores = [] #bootstrapped_spec_scores = [] #bootstrapped_prec_scores = [] #bootstrapped_rec_scores = [] #bootstrapped_f1_scores = [] #bootstrapped_ppv_scores = [] #bootstrapped_npv_scores = [] #rng = np.random.RandomState(rng_seed) #for i in range(n_bootstraps): # # bootstrap by sampling with replacement on the prediction indices # indices = rng.randint(0, len(svm_probs), len(svm_probs)) # if len(np.unique(y_test.to_numpy()[indices])) < 2: # # We need at least one positive and one negative sample for ROC AUC # # to be defined: reject the sample # continue # auc_score = roc_auc_score(y_test.to_numpy()[indices], svm_probs[indices]) # acc_score = accuracy_score(y_test.to_numpy()[indices], y_pred[indices]) # spec_score = specificity_score(y_test.to_numpy()[indices], y_pred[indices]) # prec_score = precision_score(y_test.to_numpy()[indices], y_pred[indices]) # rec_score = recall_score(y_test.to_numpy()[indices], y_pred[indices]) # f_one_score = f1_score(y_test.to_numpy()[indices].astype('float64'), y_pred[indices].astype('float64')) # ppv_score = positive_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # npv_score = negative_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # bootstrapped_auc_scores.append(auc_score) # bootstrapped_acc_scores.append(acc_score) # bootstrapped_spec_scores.append(spec_score) # bootstrapped_prec_scores.append(prec_score) # bootstrapped_rec_scores.append(rec_score) # bootstrapped_f1_scores.append(f_one_score) # bootstrapped_ppv_scores.append(ppv_score) # bootstrapped_npv_scores.append(npv_score) #create_ci(bootstrapped_auc_scores, "AUC") #create_ci(bootstrapped_acc_scores, "Accuracy") #create_ci(bootstrapped_spec_scores, "Specificity") #create_ci(bootstrapped_prec_scores, "Precision") #create_ci(bootstrapped_rec_scores, "Recall") #create_ci(bootstrapped_f1_scores, "F1") #create_ci(bootstrapped_ppv_scores, "PPV") #create_ci(bootstrapped_npv_scores, "NPV") sns.set() feature_names = X_test.columns.to_list() importances = rf_model.feature_importances_ indices = np.argsort(importances) plt.figure(figsize=(10,8)) plt.title('Random Forest: Feature Importance',fontsize=20, fontweight='semibold') plt.barh(range(len(indices)), importances[indices], color='b', align='center') plt.xlim(0, 0.35) for index, value in enumerate(importances[indices]): plt.text(value, index, " {:.1%}".format(value), fontsize=14) #plt.yticks(range(len(indices)), [feature_labels_dict[feature_names[i]] for i in indices],fontsize=14) plt.yticks(range(len(indices)), [feature_names[i] for i in indices]) plt.xlabel('Relative Importance',fontsize=16) plt.gca().xaxis.set_major_formatter(mtick.PercentFormatter(1)) plt.show() #sns.reset_orig() ###Output _____no_output_____ ###Markdown XGBoost Grid Search for Hyperparameters ###Code xgb_model = XGBClassifier(random_state = 42) grid_param = {'learning_rate': [0.1, 0.2, 0.3, 0.4, 0.5], 'max_depth': [3, 4, 5, 6] } xgb_grid = GridSearchCV(estimator=xgb_model, param_grid=grid_param, scoring='accuracy', cv=5, n_jobs=-1) xgb_grid.fit(X_train, y_train) print(xgb_grid.best_params_) ###Output {'learning_rate': 0.4, 'max_depth': 6} ###Markdown Model Performance ###Code xgb_model = XGBClassifier(learning_rate = 0.4, max_depth = 6, random_state = 42) xgb_model.fit(X_train, y_train) y_pred = xgb_model.predict(X_test) print("Accuracy Score: " + str(accuracy_score(y_test, y_pred))) print("Specificity Score: " + str(specificity_score(y_test.to_numpy(), y_pred))) print("Precision: " + str(precision_score(y_test, y_pred))) print("Recall: " + str(recall_score(y_test, y_pred))) print("F1 Score: " + str(f1_score(y_test, y_pred))) print("PPV Score: " + str(positive_pv_score(y_test.to_numpy(), y_pred))) print("NPV Score: " + str(negative_pv_score(y_test.to_numpy(), y_pred))) xgb_probs = xgb_model.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, xgb_probs) auc = roc_auc_score(y_test, xgb_probs) print("AUC: " + str(auc)) plt.figure(figsize=(7, 7)) ## ns_preds = [0 for _ in range(len(y_test))] ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_preds) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(fpr, tpr, marker='.', label='XGBoost') plt.xlabel('1 - Specificity (False Positive Rate)',fontsize=16) plt.ylabel('Sensitivity (True Positive Rate)',fontsize=16) plt.legend(loc='lower right') plt.title('XGB: ROC Curve for Test Set', fontsize=20, fontweight="semibold") short_auc = round(auc,4) plt.text(.93,.1, "AUC: " + str(short_auc), horizontalalignment="center", verticalalignment="center", fontsize=14, fontweight="semibold") plt.show() matrix = plot_confusion_matrix(xgb_model, X_test, y_test, cmap=plt.cm.Blues) plt.title('XGBoost Confusion Matrix') plt.show(matrix) plt.show() #n_bootstraps = 2000 #rng_seed = 42 # control reproducibility #bootstrapped_auc_scores = [] #bootstrapped_acc_scores = [] #bootstrapped_spec_scores = [] #bootstrapped_prec_scores = [] #bootstrapped_rec_scores = [] #bootstrapped_f1_scores = [] #bootstrapped_ppv_scores = [] #bootstrapped_npv_scores = [] #rng = np.random.RandomState(rng_seed) #for i in range(n_bootstraps): # # bootstrap by sampling with replacement on the prediction indices # indices = rng.randint(0, len(svm_probs), len(svm_probs)) # if len(np.unique(y_test.to_numpy()[indices])) < 2: # # We need at least one positive and one negative sample for ROC AUC # # to be defined: reject the sample # continue # auc_score = roc_auc_score(y_test.to_numpy()[indices], svm_probs[indices]) # acc_score = accuracy_score(y_test.to_numpy()[indices], y_pred[indices]) # spec_score = specificity_score(y_test.to_numpy()[indices], y_pred[indices]) # prec_score = precision_score(y_test.to_numpy()[indices], y_pred[indices]) # rec_score = recall_score(y_test.to_numpy()[indices], y_pred[indices]) # f_one_score = f1_score(y_test.to_numpy()[indices].astype('float64'), y_pred[indices].astype('float64')) # ppv_score = positive_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # npv_score = negative_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # bootstrapped_auc_scores.append(auc_score) # bootstrapped_acc_scores.append(acc_score) # bootstrapped_spec_scores.append(spec_score) # bootstrapped_prec_scores.append(prec_score) # bootstrapped_rec_scores.append(rec_score) # bootstrapped_f1_scores.append(f_one_score) # bootstrapped_ppv_scores.append(ppv_score) # bootstrapped_npv_scores.append(npv_score) #create_ci(bootstrapped_auc_scores, "AUC") #create_ci(bootstrapped_acc_scores, "Accuracy") #create_ci(bootstrapped_spec_scores, "Specificity") #create_ci(bootstrapped_prec_scores, "Precision") #create_ci(bootstrapped_rec_scores, "Recall") #create_ci(bootstrapped_f1_scores, "F1") #create_ci(bootstrapped_ppv_scores, "PPV") #create_ci(bootstrapped_npv_scores, "NPV") sns.set() feature_names = X_test.columns.to_list() importances = xgb_model.feature_importances_ indices = np.argsort(importances) plt.figure(figsize=(10,8)) plt.title('eXtreme Gradient Boosting: Feature Importance',fontsize=20, fontweight='semibold') plt.barh(range(len(indices)), importances[indices], color='b', align='center') plt.xlim(0, 0.5) for index, value in enumerate(importances[indices]): plt.text(value, index, " {:.1%}".format(value), fontsize=14) plt.yticks(range(len(indices)), [feature_labels_dict[feature_names[i]] for i in indices],fontsize=14) plt.xlabel('Relative Importance',fontsize=16) plt.gca().xaxis.set_major_formatter(mtick.PercentFormatter(1)) plt.show() #sns.reset_orig() ###Output _____no_output_____ ###Markdown Neural Net Grid Search for Hyperparameters ###Code nn_model = MLPClassifier(max_iter = 2000, random_state = 42) grid_param = {'hidden_layer_sizes': [(100, 100, 50), (50, 100, 50), (100, 50, 100)], 'learning_rate': ['constant', 'invscaling', 'adaptive'] } nn_grid = GridSearchCV(estimator=nn_model, param_grid=grid_param, scoring='accuracy', cv=5, n_jobs=-1) nn_grid.fit(X_train, y_train) print(nn_grid.best_params_) ###Output {'hidden_layer_sizes': (100, 50, 100), 'learning_rate': 'constant'} ###Markdown Model Performance ###Code nn_model = MLPClassifier(hidden_layer_sizes = (100, 50, 100), learning_rate = 'constant', max_iter = 2000, random_state = 42) nn_model.fit(X_train, y_train) y_pred = nn_model.predict(X_test) print("Accuracy Score: " + str(accuracy_score(y_test, y_pred))) print("Specificity Score: " + str(specificity_score(y_test.to_numpy(), y_pred))) print("Precision: " + str(precision_score(y_test, y_pred))) print("Recall: " + str(recall_score(y_test, y_pred))) print("F1 Score: " + str(f1_score(y_test, y_pred))) print("PPV Score: " + str(positive_pv_score(y_test.to_numpy(), y_pred))) print("NPV Score: " + str(negative_pv_score(y_test.to_numpy(), y_pred))) nn_probs = nn_model.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, nn_probs) auc = roc_auc_score(y_test, nn_probs) print("AUC: " + str(auc)) plt.figure(figsize=(7, 7)) ## ns_preds = [0 for _ in range(len(y_test))] ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_preds) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill') plt.plot(fpr, tpr, marker='.', label='Neural Network') plt.xlabel('1 - Specificity (False Positive Rate)',fontsize=16) plt.ylabel('Sensitivity (True Positive Rate)',fontsize=16) plt.legend(loc='lower right') plt.title('NN: ROC Curve for Test Set', fontsize=20, fontweight="semibold") short_auc = round(auc,4) plt.text(.93,.1, "AUC: " + str(short_auc), horizontalalignment="center", verticalalignment="center", fontsize=14, fontweight="semibold") plt.show() matrix = plot_confusion_matrix(nn_model, X_test, y_test, cmap=plt.cm.Blues) plt.title('Neural Network Confusion Matrix') plt.show(matrix) plt.show() #n_bootstraps = 2000 #rng_seed = 42 # control reproducibility #bootstrapped_auc_scores = [] #bootstrapped_acc_scores = [] #bootstrapped_spec_scores = [] #bootstrapped_prec_scores = [] #bootstrapped_rec_scores = [] #bootstrapped_f1_scores = [] #bootstrapped_ppv_scores = [] #bootstrapped_npv_scores = [] #rng = np.random.RandomState(rng_seed) #for i in range(n_bootstraps): # # bootstrap by sampling with replacement on the prediction indices # indices = rng.randint(0, len(svm_probs), len(svm_probs)) # if len(np.unique(y_test.to_numpy()[indices])) < 2: # # We need at least one positive and one negative sample for ROC AUC # # to be defined: reject the sample # continue # auc_score = roc_auc_score(y_test.to_numpy()[indices], svm_probs[indices]) # acc_score = accuracy_score(y_test.to_numpy()[indices], y_pred[indices]) # spec_score = specificity_score(y_test.to_numpy()[indices], y_pred[indices]) # prec_score = precision_score(y_test.to_numpy()[indices], y_pred[indices]) # rec_score = recall_score(y_test.to_numpy()[indices], y_pred[indices]) # f_one_score = f1_score(y_test.to_numpy()[indices].astype('float64'), y_pred[indices].astype('float64')) # ppv_score = positive_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # npv_score = negative_pv_score(y_test.to_numpy()[indices], y_pred[indices]) # bootstrapped_auc_scores.append(auc_score) # bootstrapped_acc_scores.append(acc_score) # bootstrapped_spec_scores.append(spec_score) # bootstrapped_prec_scores.append(prec_score) # bootstrapped_rec_scores.append(rec_score) # bootstrapped_f1_scores.append(f_one_score) # bootstrapped_ppv_scores.append(ppv_score) # bootstrapped_npv_scores.append(npv_score) #create_ci(bootstrapped_auc_scores, "AUC") #create_ci(bootstrapped_acc_scores, "Accuracy") #create_ci(bootstrapped_spec_scores, "Specificity") #create_ci(bootstrapped_prec_scores, "Precision") #create_ci(bootstrapped_rec_scores, "Recall") #create_ci(bootstrapped_f1_scores, "F1") #create_ci(bootstrapped_ppv_scores, "PPV") #create_ci(bootstrapped_npv_scores, "NPV") ###Output 95% Confidence interval for the AUC score: [0.5184 - 0.9241] 95% Confidence interval for the Accuracy score: [0.5833 - 0.8889] 95% Confidence interval for the Specificity score: [0.3333 - 0.9231] 95% Confidence interval for the Precision score: [0.6667 - 0.9615] 95% Confidence interval for the Recall score: [0.6364 - 0.9474] 95% Confidence interval for the F1 score: [0.6818 - 0.9259] 95% Confidence interval for the PPV score: [0.6667 - 0.9615] 95% Confidence interval for the NPV score: [0.2857 - 0.8571]
contents/k-nearest-neighbor/Iris Dataset.ipynb
###Markdown Understanding K Nearest Neighbor Algorithm with Iris Dataset ###Code from sklearn.datasets import load_iris import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns iris = load_iris() print(iris.DESCR[:500]) def get_iris_df(): iris = load_iris() df = pd.DataFrame(data=iris['data'], columns=iris['feature_names']) df['target'] = pd.Series(data=iris.target).apply(lambda x: iris.target_names[x]) return df X = iris.data y = iris.target df = get_iris_df() df.sample(5) ###Output _____no_output_____ ###Markdown Data Exploration ###Code df.describe() iris.target_names iris.feature_names ###Output _____no_output_____ ###Markdown With Graphs ###Code sns.heatmap(df.corr(), annot = True, cmap = 'flare') plt.show() sns.pairplot(df, hue="target", palette="husl", markers=["o", "s", "D"]) # Plot the training points def plot_scatter(ax, ds1, ds2, target): ax.scatter( ds1, ds2, s=50, c=target, edgecolor='k') ax.set_xlabel(ds1.name) ax.set_ylabel(ds2.name) return ax fig, (ax1, ax2, ax3) = plt.subplots(1, 3) fig.set_size_inches(15, 4) plot_scatter(ax1, df.iloc[:, 0], df.iloc[:, 1], iris.target) plot_scatter(ax2, df.iloc[:, 0], df.iloc[:, 2], iris.target) plot_scatter(ax3, df.iloc[:, 1], df.iloc[:, 2], iris.target) plt.show() def plot_in_3d(dsx, dsy, target, dsz=0, size_array=50): ax.scatter3D(dsx, dsy, dsz, c=target, cmap=plt.cm.Set1, edgecolor='k', s = size_array) ax.set_xlabel(dsx.name) ax.set_ylabel(dsy.name) # ax.set_zlabel(dsz.name) ax.set_title("{} vs {}".format(dsx.name, dsy.name)) return ax fig = plt.figure(1, figsize=(10, 5)) ax = fig.add_subplot(1, 2, 1, projection='3d') ax = plot_in_3d( df.iloc[:, 0], df.iloc[:, 1], iris.target, dsz=df.iloc[:, 2]) fig.add_axes(ax) ax = fig.add_subplot(1, 2, 2, projection='3d') ax = plot_in_3d( df.iloc[:, 0], df.iloc[:, 1], iris.target, size_array=df.iloc[:, 2] *50) fig.add_axes(ax) plt.show() from pandas.plotting import andrews_curves, parallel_coordinates, radviz # Andrews Curves involve using attributes of samples as coefficients for Fourier series # and then plotting these andrews_curves(df, "target") # plots each feature on a separate column & then draws lines # connecting the features for each data sample parallel_coordinates(df, "target") # puts each feature as a point on a 2D plane, and then simulates # having each sample attached to those points through a spring weighted # by the relative value for that feature radviz(df, "target") ###Output _____no_output_____ ###Markdown Modeling and Metrics ###Code from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics model = KNeighborsClassifier(n_neighbors=3) model.fit(X, y) y_pred = model.predict(X) model.score(X, y) metrics.accuracy_score(y_true=y, y_pred=y_pred) conf_matrix = pd.DataFrame( data=metrics.confusion_matrix(y_true=y, y_pred=y_pred), columns=zip(['Predicted'] * 3, iris.target_names), index=zip(['Actual'] * 3, iris.target_names) ) conf_matrix.columns = pd.MultiIndex.from_tuples(conf_matrix.columns) conf_matrix.index = pd.MultiIndex.from_tuples(conf_matrix.index) conf_matrix ###Output _____no_output_____ ###Markdown How to interpret the classification report?- Precision -> what fraction of positive predicitions are correct- Recall -> what fraction of all positive instances does the classifier correctly indentify as positive- F1 score -> convey the balance between the precision and the recall- Support -> number of actual occurances of the class in the specified dataset ###Code print(metrics.classification_report(y_true=y, y_pred=y_pred)) # Elbow method scaley = [] error = [] x = list(range(1,20)) for i in x: model = KNeighborsClassifier(n_neighbors=i) model.fit(X, y) scaley.append(model.score(X, y)) error.append(np.mean(model.predict(X) != y)) plt.plot(x, scaley) plt.xticks(x) plt.title('Model performance vs Value of K') plt.show() plt.figure() plt.plot(x, error, color='red', linestyle='dashed', marker='o', markerfacecolor='blue', markersize=10) plt.title('Error Rate K Value') plt.xlabel('K Value') plt.ylabel('Mean Error') plt.show() ###Output _____no_output_____
tsf_examples/Forecasting_w_LSTM.ipynb
###Markdown Forecasting with an LSTM Setup ###Code import numpy as np import matplotlib.pyplot as plt import tensorflow as tf keras = tf.keras def plot_series(time, series, format="-", start=0, end=None, label=None): plt.plot(time[start:end], series[start:end], format, label=label) plt.xlabel("Time") plt.ylabel("Value") if label: plt.legend(fontsize=14) plt.grid(True) def trend(time, slope=0): return slope * time def seasonal_pattern(season_time): """Just an arbitrary pattern, you can change it if you wish""" return np.where(season_time < 0.4, np.cos(season_time * 2 * np.pi), 1 / np.exp(3 * season_time)) def seasonality(time, period, amplitude=1, phase=0): """Repeats the same pattern at each period""" season_time = ((time + phase) % period) / period return amplitude * seasonal_pattern(season_time) def white_noise(time, noise_level=1, seed=None): rnd = np.random.RandomState(seed) return rnd.randn(len(time)) * noise_level def sequential_window_dataset(series, window_size): series = tf.expand_dims(series, axis=-1) ds = tf.data.Dataset.from_tensor_slices(series) ds = ds.window(window_size + 1, shift=window_size, drop_remainder=True) ds = ds.flat_map(lambda window: window.batch(window_size + 1)) ds = ds.map(lambda window: (window[:-1], window[1:])) return ds.batch(1).prefetch(1) time = np.arange(4 * 365 + 1) slope = 0.05 baseline = 10 amplitude = 40 series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude) noise_level = 5 noise = white_noise(time, noise_level, seed=42) series += noise plt.figure(figsize=(10, 6)) plot_series(time, series) plt.show() split_time = 1000 time_train = time[:split_time] x_train = series[:split_time] time_valid = time[split_time:] x_valid = series[split_time:] class ResetStatesCallback(keras.callbacks.Callback): def on_epoch_begin(self, epoch, logs): self.model.reset_states() ###Output _____no_output_____ ###Markdown LSTM RNN Forecasting ###Code keras.backend.clear_session() tf.random.set_seed(42) np.random.seed(42) window_size = 30 train_set = sequential_window_dataset(x_train, window_size) model = keras.models.Sequential([ keras.layers.LSTM(100, return_sequences=True, stateful=True, batch_input_shape=[1, None, 1]), keras.layers.LSTM(100, return_sequences=True, stateful=True), keras.layers.Dense(1), keras.layers.Lambda(lambda x: x * 200.0) ]) lr_schedule = keras.callbacks.LearningRateScheduler( lambda epoch: 1e-8 * 10**(epoch / 20)) reset_states = ResetStatesCallback() optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9) model.compile(loss=keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) history = model.fit(train_set, epochs=100, callbacks=[lr_schedule, reset_states]) plt.semilogx(history.history["lr"], history.history["loss"]) plt.axis([1e-8, 1e-4, 0, 30]) keras.backend.clear_session() tf.random.set_seed(42) np.random.seed(42) window_size = 30 train_set = sequential_window_dataset(x_train, window_size) valid_set = sequential_window_dataset(x_valid, window_size) model = keras.models.Sequential([ keras.layers.LSTM(100, return_sequences=True, stateful=True, batch_input_shape=[1, None, 1]), keras.layers.LSTM(100, return_sequences=True, stateful=True), keras.layers.Dense(1), keras.layers.Lambda(lambda x: x * 200.0) ]) optimizer = keras.optimizers.SGD(lr=5e-7, momentum=0.9) model.compile(loss=keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) reset_states = ResetStatesCallback() model_checkpoint = keras.callbacks.ModelCheckpoint( "my_checkpoint.h5", save_best_only=True) early_stopping = keras.callbacks.EarlyStopping(patience=50) model.fit(train_set, epochs=500, validation_data=valid_set, callbacks=[early_stopping, model_checkpoint, reset_states]) model = keras.models.load_model("my_checkpoint.h5") rnn_forecast = model.predict(series[np.newaxis, :, np.newaxis]) rnn_forecast = rnn_forecast[0, split_time - 1:-1, 0] plt.figure(figsize=(10, 6)) plot_series(time_valid, x_valid) plot_series(time_valid, rnn_forecast) keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy() ###Output _____no_output_____
week1/6. if statements.ipynb
###Markdown logical operators - ifhttps://docs.python.org/3/tutorial/controlflow.htmlif-statements ###Code a = 0 if a==1: print('a equals 1') else: print('a does not equal to 1') x = int(input("Please enter an integer: ")) if x < 0: x = 0 print('Negative changed to zero') elif x == 0: print('Zero') elif x == 1: print('Single') else: print('More') # notice the code outside the the code block will be executed no matter what, # just be careful about this x = 0 if x == 0: print('x is 0') print('x is not 0') ###Output x is 0 x is not 0
1_Polynomial_Regression.ipynb
###Markdown Polynomial Regression - ML from the Fundamentals (1)* **Check out the corresponding blog post: https://rickwierenga.com/blog/ml-fundamentals/polynomial-regression.html*** Full series: https://rickwierenga.com/blog/ml-fundamentals/ ###Code %pylab inline ###Output Populating the interactive namespace from numpy and matplotlib ###Markdown Data & normalization ###Code X = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).T y = np.array([45000, 50000, 60000, 80000, 110000, 150000, 200000, 300000, 500000, 1000000]) m, n = X.shape plt.plot(X, y, 'rx') X = np.hstack((np.ones((m, 1)), X)) X = np.hstack((X, (X[:, 1] ** 2).reshape((m, 1)), (X[:, 1] ** 3).reshape((m, 1)), (X[:, 1] ** 4).reshape((m, 1)))); X _, n = X.shape X[:, 1:] = (X[:, 1:] - np.mean(X[:, 1:], axis=0)) / np.std(X[:, 1:], axis=0) np.mean(X[:, 1:], axis=0), np.std(X[:, 1:], axis=0) ###Output _____no_output_____ ###Markdown Hypothesis & predictions ###Code def h(X, theta): return X @ theta theta = np.random.random(n) predictions = h(X, theta) predictions h(X, theta), y predictions = h(X, theta) plt.plot(X[:, 1], predictions, label='predictions') plt.plot(X[:, 1], y, 'rx', label='labels') plt.legend() ###Output _____no_output_____ ###Markdown Loss ###Code def J(theta, X, y): return np.mean(np.square(h(X, theta) - y)) J(theta, X, y) ###Output _____no_output_____ ###Markdown Training ###Code alpha = 0.01 losses = [] for _ in range(5000): theta = theta - alpha * (1/m) * (X.T @ ((X @ theta) - y)) losses.append(J(theta, X, y)) predictions = h(X, theta) plt.plot(X[:, 1], predictions, label='predictions') plt.plot(X[:, 1], y, 'rx', label='labels') plt.legend() plt.plot(losses) losses[-1] ###Output _____no_output_____ ###Markdown Normal equation ###Code # recompute theta theta = np.linalg.pinv(X.T@X) @ X.T @ y predictions = h(X, theta) plt.plot(X[:, 1], predictions, label='predictions') plt.plot(X[:, 1], y, 'rx', label='labels') plt.legend() ###Output _____no_output_____
docs/contents/tools/classes/parmed_GromacsTopologyFile/to_mdtraj_Topology.ipynb
###Markdown To mdtraj.Topology ###Code from molsysmt.tools import parmed_GromacsTopologyFile #parmed_GromacsTopologyFile.to_mdtraj_Topology(item) ###Output _____no_output_____
d2l-en/mxnet/chapter_natural-language-processing-pretraining/subword-embedding.ipynb
###Markdown Subword Embedding:label:`sec_fasttext`English words usually have internal structures and formation methods. For example, we can deduce the relationship between "dog", "dogs", and "dogcatcher" by their spelling. All these words have the same root, "dog", but they use different suffixes to change the meaning of the word. Moreover, this association can be extended to other words. For example, the relationship between "dog" and "dogs" is just like the relationship between "cat" and "cats". The relationship between "boy" and "boyfriend" is just like the relationship between "girl" and "girlfriend". This characteristic is not unique to English. In French and Spanish, a lot of verbs can have more than 40 different forms depending on the context. In Finnish, a noun may have more than 15 forms. In fact, morphology, which is an important branch of linguistics, studies the internal structure and formation of words. fastTextIn word2vec, we did not directly use morphology information. In both theskip-gram model and continuous bag-of-words model, we use different vectors torepresent words with different forms. For example, "dog" and "dogs" arerepresented by two different vectors, while the relationship between these twovectors is not directly represented in the model. In view of this, fastText :cite:`Bojanowski.Grave.Joulin.ea.2017`proposes the method of subword embedding, thereby attempting to introducemorphological information in the skip-gram model in word2vec.In fastText, each central word is represented as a collection of subwords. Below we use the word "where" as an example to understand how subwords are formed. First, we add the special characters “&lt;” and “&gt;” at the beginning and end of the word to distinguish the subwords used as prefixes and suffixes. Then, we treat the word as a sequence of characters to extract the $n$-grams. For example, when $n=3$, we can get all subwords with a length of $3$:$$\textrm{""},$$and the special subword $\textrm{""}$.In fastText, for a word $w$, we record the union of all its subwords with length of $3$ to $6$ and special subwords as $\mathcal{G}_w$. Thus, the dictionary is the union of the collection of subwords of all words. Assume the vector of the subword $g$ in the dictionary is $\mathbf{z}_g$. Then, the central word vector $\mathbf{u}_w$ for the word $w$ in the skip-gram model can be expressed as$$\mathbf{u}_w = \sum_{g\in\mathcal{G}_w} \mathbf{z}_g.$$The rest of the fastText process is consistent with the skip-gram model, so it is not repeated here. As we can see, compared with the skip-gram model, the dictionary in fastText is larger, resulting in more model parameters. Also, the vector of one word requires the summation of all subword vectors, which results in higher computation complexity. However, we can obtain better vectors for more uncommon complex words, even words not existing in the dictionary, by looking at other words with similar structures. Byte Pair Encoding:label:`subsec_Byte_Pair_Encoding`In fastText, all the extracted subwords have to be of the specified lengths, such as $3$ to $6$, thus the vocabulary size cannot be predefined.To allow for variable-length subwords in a fixed-size vocabulary,we can apply a compression algorithmcalled *byte pair encoding* (BPE) to extract subwords :cite:`Sennrich.Haddow.Birch.2015`.Byte pair encoding performs a statistical analysis of the training dataset to discover common symbols within a word,such as consecutive characters of arbitrary length.Starting from symbols of length $1$,byte pair encoding iteratively merges the most frequent pair of consecutive symbols to produce new longer symbols.Note that for efficiency, pairs crossing word boundaries are not considered.In the end, we can use such symbols as subwords to segment words.Byte pair encoding and its variants has been used for input representations in popular natural language processing pretraining models such as GPT-2 :cite:`Radford.Wu.Child.ea.2019` and RoBERTa :cite:`Liu.Ott.Goyal.ea.2019`.In the following, we will illustrate how byte pair encoding works.First, we initialize the vocabulary of symbols as all the English lowercase characters, a special end-of-word symbol `'_'`, and a special unknown symbol `'[UNK]'`. ###Code import collections symbols = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '_', '[UNK]'] ###Output _____no_output_____ ###Markdown Since we do not consider symbol pairs that cross boundaries of words,we only need a dictionary `raw_token_freqs` that maps words to their frequencies (number of occurrences)in a dataset.Note that the special symbol `'_'` is appended to each word so thatwe can easily recover a word sequence (e.g., "a taller man")from a sequence of output symbols ( e.g., "a_ tall er_ man").Since we start the merging process from a vocabulary of only single characters and special symbols, space is inserted between every pair of consecutive characters within each word (keys of the dictionary `token_freqs`).In other words, space is the delimiter between symbols within a word. ###Code raw_token_freqs = {'fast_': 4, 'faster_': 3, 'tall_': 5, 'taller_': 4} token_freqs = {} for token, freq in raw_token_freqs.items(): token_freqs[' '.join(list(token))] = raw_token_freqs[token] token_freqs ###Output _____no_output_____ ###Markdown We define the following `get_max_freq_pair` function that returns the most frequent pair of consecutive symbols within a word,where words come from keys of the input dictionary `token_freqs`. ###Code def get_max_freq_pair(token_freqs): pairs = collections.defaultdict(int) for token, freq in token_freqs.items(): symbols = token.split() for i in range(len(symbols) - 1): # Key of `pairs` is a tuple of two consecutive symbols pairs[symbols[i], symbols[i + 1]] += freq return max(pairs, key=pairs.get) # Key of `pairs` with the max value ###Output _____no_output_____ ###Markdown As a greedy approach based on frequency of consecutive symbols,byte pair encoding will use the following `merge_symbols` function to merge the most frequent pair of consecutive symbols to produce new symbols. ###Code def merge_symbols(max_freq_pair, token_freqs, symbols): symbols.append(''.join(max_freq_pair)) new_token_freqs = dict() for token, freq in token_freqs.items(): new_token = token.replace(' '.join(max_freq_pair), ''.join(max_freq_pair)) new_token_freqs[new_token] = token_freqs[token] return new_token_freqs ###Output _____no_output_____ ###Markdown Now we iteratively perform the byte pair encoding algorithm over the keys of the dictionary `token_freqs`. In the first iteration, the most frequent pair of consecutive symbols are `'t'` and `'a'`, thus byte pair encoding merges them to produce a new symbol `'ta'`. In the second iteration, byte pair encoding continues to merge `'ta'` and `'l'` to result in another new symbol `'tal'`. ###Code num_merges = 10 for i in range(num_merges): max_freq_pair = get_max_freq_pair(token_freqs) token_freqs = merge_symbols(max_freq_pair, token_freqs, symbols) print(f'merge #{i + 1}:', max_freq_pair) ###Output merge #1: ('t', 'a') merge #2: ('ta', 'l') merge #3: ('tal', 'l') merge #4: ('f', 'a') merge #5: ('fa', 's') merge #6: ('fas', 't') merge #7: ('e', 'r') merge #8: ('er', '_') merge #9: ('tall', '_') merge #10: ('fast', '_') ###Markdown After 10 iterations of byte pair encoding, we can see that list `symbols` now contains 10 more symbols that are iteratively merged from other symbols. ###Code print(symbols) ###Output ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '_', '[UNK]', 'ta', 'tal', 'tall', 'fa', 'fas', 'fast', 'er', 'er_', 'tall_', 'fast_'] ###Markdown For the same dataset specified in the keys of the dictionary `raw_token_freqs`,each word in the dataset is now segmented by subwords "fast_", "fast", "er_", "tall_", and "tall"as a result of the byte pair encoding algorithm.For instance, words "faster_" and "taller_" are segmented as "fast er_" and "tall er_", respectively. ###Code print(list(token_freqs.keys())) ###Output ['fast_', 'fast er_', 'tall_', 'tall er_'] ###Markdown Note that the result of byte pair encoding depends on the dataset being used.We can also use the subwords learned from one datasetto segment words of another dataset.As a greedy approach, the following `segment_BPE` function tries to break words into the longest possible subwords from the input argument `symbols`. ###Code def segment_BPE(tokens, symbols): outputs = [] for token in tokens: start, end = 0, len(token) cur_output = [] # Segment token with the longest possible subwords from symbols while start < len(token) and start < end: if token[start: end] in symbols: cur_output.append(token[start: end]) start = end end = len(token) else: end -= 1 if start < len(token): cur_output.append('[UNK]') outputs.append(' '.join(cur_output)) return outputs ###Output _____no_output_____ ###Markdown In the following, we use the subwords in list `symbols`, which is learned from the aforementioned dataset,to segment `tokens` that represent another dataset. ###Code tokens = ['tallest_', 'fatter_'] print(segment_BPE(tokens, symbols)) ###Output ['tall e s t _', 'fa t t er_']
analysis_jpntbs/Mean_occ_plot.ipynb
###Markdown BSA/TA MR = 32 Mean Occ Load Coordinates and Trajectory ###Code MR32_bsaTA = mda.Universe('BSA_TA_ions/BSA_TA_MR_32/trial_1/mr32TA_nosol.pdb' ,'BSA_TA_ions/BSA_TA_MR_32/trial_1/centnoPBC_MR32TA.xtc') MR32_bsaTA ###Output _____no_output_____ ###Markdown Check that we are on the first frame ###Code MR32_bsaTA.trajectory.frame MR32_bsaTA.trajectory[0] # Each frame was saved with dt being 20 ps (20 ps * 10000 frames = 200,000 ps = 200 ns) # Length of trajectory bTAmr32_len = len(MR32_bsaTA.trajectory) bTAmr32_len # Select protein bsa_bTAmr32 = MR32_bsaTA.select_atoms("protein") bsa_bTAmr32 # Select TA molecules ta_bTAmr32 = MR32_bsaTA.select_atoms("resname TCL") ta_bTAmr32 # Select K+ ions CL_bTAmr32 = MR32_bsaTA.select_atoms("resname CL") CL_bTAmr32 ###Output _____no_output_____ ###Markdown TA occupancy ###Code #dmax = 4.0, DO NOT use surface atom group, you will get the wrong answer dmax = 4.0 start = 0 #end = 1000 end = bTAmr32_len - 1 s_time = timeit.default_timer() bTAmr32_occdict = aa_frmcount(bsa_bTAmr32, ta_bTAmr32, dmax, MR32_bsaTA, start, end) timeit.default_timer() - s_time #import json with open('bTAmr32_occdict.json', 'w') as fp: json.dump(bTAmr32_occdict, fp) #import json with open('bTAmr32_occdict.json', 'r') as fp: bTAmr32_occdict = json.load(fp) a_a = ['ALA','ARG','ASH','ASN','ASP','CYS','GLH','GLN','GLU','GLY','HID','HIE','HIP','ILE','LEU','LYS','MET' ,'PHE','PRO','SER','THR','TRP','TYR','VAL','CYX'] len(bTAmr32_occdict.keys()) TAmr32_occ = {key:bTAmr32_occdict[key][1] for key, value in bTAmr32_occdict.items()} #test_TA bTAmr32_occdict moccg95_bTA32 = [] for key, value in TAmr32_occ.items(): if value > 0.95: moccg95_bTA32.append(key) len(moccg95_bTA32) tt_TAmr32 = [] #cnt_sav = [] ssbTAmr32 = [] for i in range(len(a_a)): count = 0 for j in range(len(moccg95_bTA32)): if (moccg95_bTA32[j].find(a_a[i]) != -1) == True: #print(a_a[i]) #print(moccg95_bTA32[j]) tt_TAmr32.append(moccg95_bTA32[j]) #cnt_sav.append(j) count += 1 ssbTAmr32.append(str(str(a_a[i])+" "+str(count))) ssbTAmr32 #tt_ta # Sorting of AA in prot_polymer_analysis functions # Grouping of residues in Smith et al #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] #polar_res = ['ASN', 'CYS', 'CYX','ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] #neg_res = ['ASP', 'GLU'] #pos_res = ['ARG', 'HIP', 'LYS'] #all_res = [pos_res, neg_res, polar_res, hydrophobic_res, aromatic_res] # Put the AA count in a pandas dataframe TA32_dg , TA32_ji = AA_list_org(ssbTAmr32) prot95_mocc = pd.DataFrame(data=TA32_dg, index=None, columns=['Amino_acids']) new_lf = pd.Series(data=TA32_ji, index=None) prot95_mocc['BSA/TA MR = 32'] = new_lf prot95_mocc prot95_mocc['BSA/TA MR = 32'][:].sum() len(bTAmr32_occdict.keys()) totres_TAmr32 = prot95_mocc['BSA/TA MR = 32'][:].sum() # Number of positively charged residues (SASA) #pos_res = ['ARG', 'HIP', 'LYS'] pos_TAmr32 = prot95_mocc['BSA/TA MR = 32'][0:3].sum()/totres_TAmr32 # Number of negatively charged residues (SASA) #neg_res = ['ASP', 'GLU'] neg_TAmr32 = prot95_mocc['BSA/TA MR = 32'][3:5].sum()/totres_TAmr32 # Number of polar residues (SASA) #polar_res = ['ASN', 'CYS', 'CYX' 'ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] pol_TAmr32 = prot95_mocc['BSA/TA MR = 32'][5:16].sum()/totres_TAmr32 # Number of hydrophobic residues (SASA) #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] hb_TAmr32 = prot95_mocc['BSA/TA MR = 32'][16:25].sum()/totres_TAmr32 # Number of aromatic residues (SASA) #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] arm_TAmr32 = prot95_mocc['BSA/TA MR = 32'][25:30].sum()/totres_TAmr32 ###Output _____no_output_____ ###Markdown BSA/TA MR = 128 Mean Occ Load Coordinates and Trajectory ###Code MR128_bsaTA = mda.Universe('BSA_TA_ions/BSA_TA_MR_128/trial_1/NC_MR128nosol.pdb' ,'BSA_TA_ions/BSA_TA_MR_128/trial_1/cent_noPBCpH3.7.xtc') MR128_bsaTA ###Output _____no_output_____ ###Markdown Check that we are on the first frame ###Code MR128_bsaTA.trajectory.frame MR128_bsaTA.trajectory[0] # Each frame was saved with dt being 20 ps (20 ps * 10000 frames = 200,000 ps = 200 ns) # Length of trajectory bTAmr128_len = len(MR128_bsaTA.trajectory) bTAmr128_len # Select protein bsa_bTAmr128 = MR128_bsaTA.select_atoms("protein") bsa_bTAmr128 # Select TA molecules ta_bTAmr128 = MR128_bsaTA.select_atoms("resname TCL") ta_bTAmr128 # Select K+ ions K_bTAmr128 = MR128_bsaTA.select_atoms("resname K") K_bTAmr128 ###Output _____no_output_____ ###Markdown TA occupancy ###Code #dmax = 4.0, DO NOT use surface atom group, you will get the wrong answer dmax = 4.0 start = 0 #end = 1000 end = bTAmr128_len - 1 s_time = timeit.default_timer() bTAMR128_occdict = aa_frmcount(bsa_bTAmr128, ta_bTAmr128, dmax, MR128_bsaTA, start, end) timeit.default_timer() - s_time with open('bTAMR128_occdict.json', 'w') as fp: json.dump(bTAMR128_occdict, fp) with open('bTAMR128_occdict.json', 'r') as fp: bTAMR128_occdict = json.load(fp) len(bTAMR128_occdict.keys()) taMR128_occ = {key:bTAMR128_occdict[key][1] for key, value in bTAMR128_occdict.items()} #test_TA bTAMR128_occdict moccg95_bTA128 = [] for key, value in taMR128_occ.items(): if value > 0.95: moccg95_bTA128.append(key) len(moccg95_bTA128) tt_taMR128 = [] #cnt_sav = [] ssbTAmr128 = [] for i in range(len(a_a)): count = 0 for j in range(len(moccg95_bTA128)): if (moccg95_bTA128[j].find(a_a[i]) != -1) == True: #print(a_a[i]) #print(moccg95_bTA128[j]) tt_taMR128.append(moccg95_bTA128[j]) #cnt_sav.append(j) count += 1 ssbTAmr128.append(str(str(a_a[i])+" "+str(count))) ssbTAmr128 #tt_ta moccg95_bTA128[71] len(cnt_sav) # Sorting of AA in prot_polymer_analysis functions # Grouping of residues in Smith et al #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] #polar_res = ['ASN', 'CYS', 'CYX','ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] #neg_res = ['ASP', 'GLU'] #pos_res = ['ARG', 'HIP', 'LYS'] #all_res = [pos_res, neg_res, polar_res, hydrophobic_res, aromatic_res] # Put the AA count in a pandas dataframe ta128_dg , ta128_ji = AA_list_org(ssbTAmr128) prot95_mocc = pd.DataFrame(data=ta128_dg, index=None, columns=['Amino_acids']) new_lf = pd.Series(data=ta128_ji, index=None) prot95_mocc['BSA/TA MR = 128'] = new_lf prot95_mocc prot95_mocc['BSA/TA MR = 128'][:].sum() len(bTAMR128_occdict.keys()) totres_TAmr128 = prot95_mocc['BSA/TA MR = 128'][:].sum() # Number of positively charged residues (SASA) #pos_res = ['ARG', 'HIP', 'LYS'] pos_taMR128 = prot95_mocc['BSA/TA MR = 128'][0:3].sum()/totres_TAmr128 # Number of negatively charged residues (SASA) #neg_res = ['ASP', 'GLU'] neg_taMR128 = prot95_mocc['BSA/TA MR = 128'][3:5].sum()/totres_TAmr128 # Number of polar residues (SASA) #polar_res = ['ASN', 'CYS', 'CYX' 'ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] pol_taMR128 = prot95_mocc['BSA/TA MR = 128'][5:16].sum()/totres_TAmr128 # Number of hydrophobic residues (SASA) #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] hb_taMR128 = prot95_mocc['BSA/TA MR = 128'][16:25].sum()/totres_TAmr128 # Number of aromatic residues (SASA) #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] arm_taMR128 = prot95_mocc['BSA/TA MR = 128'][25:30].sum()/totres_TAmr128 ###Output _____no_output_____ ###Markdown BSA/SDS MR = 16 Mean Occ Load Coordinates and Trajectory ###Code MR16_bsaSDS = mda.Universe('BSA_SDS_ions/BSA_SDS_MR_16/trial_1/SDSmr16_bsa.pdb' ,'BSA_SDS_ions/BSA_SDS_MR_16/trial_1/centnoPBC_SDSmr16.xtc') MR16_bsaSDS ###Output _____no_output_____ ###Markdown Check that we are on the first frame ###Code MR16_bsaSDS.trajectory.frame # Each frame was saved with dt being 20 ps (20 ps * 10000 frames = 200,000 ps = 200 ns) # Length of trajectory bSDSmr16_len = len(MR16_bsaSDS.trajectory) bSDSmr16_len # Select protein bsa_bSDSmr16 = MR16_bsaSDS.select_atoms("protein") bsa_bSDSmr16 # Select SDS molecules SDS_bSDSmr16 = MR16_bsaSDS.select_atoms("resname SDS") SDS_bSDSmr16 # Select K+ ions CL_bSDSmr16 = MR16_bsaSDS.select_atoms("resname CL") CL_bSDSmr16 ###Output _____no_output_____ ###Markdown SDS occupancy ###Code #dmax = 4.0, DO NOT use surface atom group, you will get the wrong answer dmax = 4.0 start = 0 #end = 1000 end = bSDSmr16_len - 1 s_time = timeit.default_timer() bSDSmr16_occdict = aa_frmcount(bsa_bSDSmr16, SDS_bSDSmr16, dmax, MR16_bsaSDS, start, end) timeit.default_timer() - s_time with open('bSDSmr16_occdict.json', 'w') as fp: json.dump(bSDSmr16_occdict, fp) with open('bSDSmr16_occdict.json', 'r') as fp: bSDSmr16_occdict = json.load(fp) len(bSDSmr16_occdict.keys()) SDSmr16_occ = {key:bSDSmr16_occdict[key][1] for key, value in bSDSmr16_occdict.items()} #test_TA bSDSmr16_occdict moccg95_bSDS16 = [] for key, value in SDSmr16_occ.items(): if value > 0.95: moccg95_bSDS16.append(key) len(moccg95_bSDS16) tt_SDSmr16 = [] #cnt_sav = [] ssbSDSmr16 = [] for i in range(len(a_a)): count = 0 for j in range(len(moccg95_bSDS16)): if (moccg95_bSDS16[j].find(a_a[i]) != -1) == True: #print(a_a[i]) #print(moccg95_bSDS16[j]) tt_SDSmr16.append(moccg95_bSDS16[j]) #cnt_sav.append(j) count += 1 ssbSDSmr16.append(str(str(a_a[i])+" "+str(count))) ssbSDSmr16 #tt_ta # Sorting of AA in prot_polymer_analysis functions # Grouping of residues in Smith et al #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] #polar_res = ['ASN', 'CYS', 'CYX','ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] #neg_res = ['ASP', 'GLU'] #pos_res = ['ARG', 'HIP', 'LYS'] #all_res = [pos_res, neg_res, polar_res, hydrophobic_res, aromatic_res] # Put the AA count in a pandas dataframe ta128_dg , ta128_ji = AA_list_org(ssbSDSmr16) #prot95_mocc = pd.DataFrame(data=ta128_dg, index=None, columns=['Amino_acids']) new_lfSDS16 = pd.Series(data=ta128_ji, index=None) prot95_mocc['BSA/SDS MR = 16'] = new_lfSDS16 prot95_mocc prot95_mocc['BSA/SDS MR = 16'][:].sum() totres_SDS16 = prot95_mocc['BSA/SDS MR = 16'][:].sum() # Number of positively charged residues (SASA) #pos_res = ['ARG', 'HIP', 'LYS'] pos_SDSmr16 = prot95_mocc['BSA/SDS MR = 16'][0:3].sum()/totres_SDS16 # Number of negatively charged residues (SASA) #neg_res = ['ASP', 'GLU'] neg_SDSmr16 = prot95_mocc['BSA/SDS MR = 16'][3:5].sum()/totres_SDS16 # Number of polar residues (SASA) #polar_res = ['ASN', 'CYS', 'CYX' 'ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] pol_SDSmr16 = prot95_mocc['BSA/SDS MR = 16'][5:16].sum()/totres_SDS16 # Number of hydrophobic residues (SASA) #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] hb_SDSmr16 = prot95_mocc['BSA/SDS MR = 16'][16:25].sum()/totres_SDS16 # Number of aromatic residues (SASA) #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] arm_SDSmr16 = prot95_mocc['BSA/SDS MR = 16'][25:30].sum()/totres_SDS16 ###Output _____no_output_____ ###Markdown BSA/SDS MR = 128 Mean Occ Load Coordinates and Trajectory ###Code MR128_bsaSDS = mda.Universe('BSA_SDS_ions/BSA_SDS_MR_128/trial_1/sds_MR128nosol.pdb' ,'BSA_SDS_ions/BSA_SDS_MR_128/trial_1/centnoPBC_MR128.xtc') MR128_bsaSDS ###Output _____no_output_____ ###Markdown Check that we are on the first frame ###Code MR128_bsaSDS.trajectory.frame # Each frame was saved with dt being 20 ps (20 ps * 10000 frames = 200,000 ps = 200 ns) # Length of trajectory bSDSmr128_len = len(MR128_bsaSDS.trajectory) # Select protein bsa_bSDSmr128 = MR128_bsaSDS.select_atoms("protein") bsa_bSDSmr128 # Select SDS molecules SDS_bSDSmr128 = MR128_bsaSDS.select_atoms("resname SDS") SDS_bSDSmr128 # Select K+ ions K_bSDSmr128 = MR128_bsaSDS.select_atoms("resname K") K_bSDSmr128 ###Output _____no_output_____ ###Markdown SDS occupancy ###Code #dmax = 4.0, DO NOT use surface atom group, you will get the wrong answer dmax = 4.0 start = 0 #end = 1000 end = bSDSmr128_len - 1 s_time = timeit.default_timer() bSDSMR128_occdict = aa_frmcount(bsa_bSDSmr128, SDS_bSDSmr128, dmax, MR128_bsaSDS, start, end) timeit.default_timer() - s_time with open('bSDSMR128_occdict.json', 'w') as fp: json.dump(bSDSMR128_occdict, fp) with open('bSDSMR128_occdict.json', 'r') as fp: bSDSMR128_occdict = json.load(fp) len(bSDSMR128_occdict.keys()) sdsMR128_occ = {key:bSDSMR128_occdict[key][1] for key, value in bSDSMR128_occdict.items()} #test_TA bSDSMR128_occdict moccg95_bSDS128 = [] for key, value in sdsMR128_occ.items(): if value > 0.95: moccg95_bSDS128.append(key) len(moccg95_bSDS128) tt_sdsMR128 = [] #cnt_sav = [] ssbsdsMR128 = [] for i in range(len(a_a)): count = 0 for j in range(len(moccg95_bSDS128)): if (moccg95_bSDS128[j].find(a_a[i]) != -1) == True: #print(a_a[i]) #print(moccg95_bSDS128[j]) tt_sdsMR128.append(moccg95_bSDS128[j]) #cnt_sav.append(j) count += 1 ssbsdsMR128.append(str(str(a_a[i])+" "+str(count))) ssbsdsMR128 #tt_ta moccg95_bSDS128[71] len(cnt_sav) # Sorting of AA in prot_polymer_analysis functions # Grouping of residues in Smith et al #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] #polar_res = ['ASN', 'CYS', 'CYX','ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] #neg_res = ['ASP', 'GLU'] #pos_res = ['ARG', 'HIP', 'LYS'] #all_res = [pos_res, neg_res, polar_res, hydrophobic_res, aromatic_res] # Put the AA count in a pandas dataframe ta128_dg , ta128_ji = AA_list_org(ssbsdsMR128) #prot95_mocc = pd.DataFrame(data=ta128_dg, index=None, columns=['Amino_acids']) new_lfsds128 = pd.Series(data=ta128_ji, index=None) prot95_mocc['BSA/SDS MR = 128'] = new_lfsds128 prot95_mocc prot95_mocc['BSA/SDS MR = 128'][:].sum() totres_SDS128 = prot95_mocc['BSA/SDS MR = 128'][:].sum() # Number of positively charged residues (SASA) #pos_res = ['ARG', 'HIP', 'LYS'] pos_sdsMR128 = prot95_mocc['BSA/SDS MR = 128'][0:3].sum()/totres_SDS128 # Number of negatively charged residues (SASA) #neg_res = ['ASP', 'GLU'] neg_sdsMR128 = prot95_mocc['BSA/SDS MR = 128'][3:5].sum()/totres_SDS128 # Number of polar residues (SASA) #polar_res = ['ASN', 'CYS', 'CYX' 'ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] pol_sdsMR128 = prot95_mocc['BSA/SDS MR = 128'][5:16].sum()/totres_SDS128 # Number of hydrophobic residues (SASA) #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] hb_sdsMR128 = prot95_mocc['BSA/SDS MR = 128'][16:25].sum()/totres_SDS128 # Number of aromatic residues (SASA) #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] arm_sdsMR128 = prot95_mocc['BSA/SDS MR = 128'][25:30].sum()/totres_SDS128 ###Output _____no_output_____ ###Markdown BSA/DS MR = 1 Mean Occ Load Coordinates and Trajectory ###Code MR1_bsaDS = mda.Universe('BSA_DS_ions/BSA_DS_MR_1/trial_1/DSmr1_bsa.pdb' ,'BSA_DS_ions/BSA_DS_MR_1/trial_1/centnoPBC_DSmr1.xtc') MR1_bsaDS ###Output _____no_output_____ ###Markdown Check that we are on the first frame ###Code MR1_bsaDS.trajectory.frame # Each frame was saved with dt being 20 ps (20 ps * 10000 frames = 200,000 ps = 200 ns) # Length of trajectory bDSmr1_len = len(MR1_bsaDS.trajectory) bDSmr1_len # Select protein bsa_bDSmr1 = MR1_bsaDS.select_atoms("protein") bsa_bDSmr1 # Select DS molecules DS_bDSmr1 = MR1_bsaDS.select_atoms("resname ROH ADN LDN") DS_bDSmr1 # Select K+ ions CL_bDSmr1 = MR1_bsaDS.select_atoms("resname CL") CL_bDSmr1 ###Output _____no_output_____ ###Markdown DS occupancy ###Code #dmax = 4.0, DO NOT use surface atom group, you will get the wrong answer dmax = 4.0 start = 0 #end = 1000 end = bDSmr1_len - 1 s_time = timeit.default_timer() bDSmr1_occdict = aa_frmcount(bsa_bDSmr1, DS_bDSmr1, dmax, MR1_bsaDS, start, end) timeit.default_timer() - s_time with open('bDSmr1_occdict.json', 'w') as fp: json.dump(bDSmr1_occdict, fp) with open('bDSmr1_occdict.json', 'r') as fp: bDSmr1_occdict = json.load(fp) len(bDSmr1_occdict.keys()) DSmr1_occ = {key:bDSmr1_occdict[key][1] for key, value in bDSmr1_occdict.items()} #test_TA bDSmr1_occdict moccg95_bDSmr1 = [] for key, value in DSmr1_occ.items(): if value > 0.95: moccg95_bDSmr1.append(key) len(moccg95_bDSmr1) tt_DSmr1 = [] #cnt_sav = [] ssbDSmr1 = [] for i in range(len(a_a)): count = 0 for j in range(len(moccg95_bDSmr1)): if (moccg95_bDSmr1[j].find(a_a[i]) != -1) == True: #print(a_a[i]) #print(moccg95_bDSmr1[j]) tt_DSmr1.append(moccg95_bDSmr1[j]) #cnt_sav.append(j) count += 1 ssbDSmr1.append(str(str(a_a[i])+" "+str(count))) ssbDSmr1 #tt_ta moccg95_bDSmr1[71] len(cnt_sav) # Sorting of AA in prot_polymer_analysis functions # Grouping of residues in Smith et al #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] #polar_res = ['ASN', 'CYS', 'CYX','ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] #neg_res = ['ASP', 'GLU'] #pos_res = ['ARG', 'HIP', 'LYS'] #all_res = [pos_res, neg_res, polar_res, hydrophobic_res, aromatic_res] # Put the AA count in a pandas dataframe ta128_dg , ta128_ji = AA_list_org(ssbDSmr1) #prot95_mocc = pd.DataFrame(data=ta128_dg, index=None, columns=['Amino_acids']) new_lfDSmr1 = pd.Series(data=ta128_ji, index=None) prot95_mocc['BSA/DS MR = 1'] = new_lfDSmr1 prot95_mocc prot95_mocc['BSA/DS MR = 1'][:].sum() totres_DSmr1 = prot95_mocc['BSA/DS MR = 1'][:].sum() # Number of positively charged residues (SASA) #pos_res = ['ARG', 'HIP', 'LYS'] pos_DSmr1 = prot95_mocc['BSA/DS MR = 1'][0:3].sum()/totres_DSmr1 # Number of negatively charged residues (SASA) #neg_res = ['ASP', 'GLU'] neg_DSmr1 = prot95_mocc['BSA/DS MR = 1'][3:5].sum()/totres_DSmr1 # Number of polar residues (SASA) #polar_res = ['ASN', 'CYS', 'CYX' 'ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] pol_DSmr1 = prot95_mocc['BSA/DS MR = 1'][5:16].sum()/totres_DSmr1 # Number of hydrophobic residues (SASA) #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] hb_DSmr1 = prot95_mocc['BSA/DS MR = 1'][16:25].sum()/totres_DSmr1 # Number of aromatic residues (SASA) #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] arm_DSmr1 = prot95_mocc['BSA/DS MR = 1'][25:30].sum()/totres_DSmr1 ###Output _____no_output_____ ###Markdown BSA/DS MR = 5 Mean Occ Load Coordinates and Trajectory ###Code MR5_bsaDS = mda.Universe('BSA_DS_ions/BSA_DS_MR_5/trial_1/DS_MR5nosol.pdb' ,'BSA_DS_ions/BSA_DS_MR_5/trial_1/centnoPBC_MR5DS.xtc') MR5_bsaDS ###Output _____no_output_____ ###Markdown Check that we are on the first frame ###Code MR5_bsaDS.trajectory.frame # Each frame was saved with dt being 20 ps (20 ps * 10000 frames = 200,000 ps = 200 ns) # Length of trajectory bDSmr5_len = len(MR5_bsaDS.trajectory) bDSmr5_len # Select protein bsa_bDSmr5 = MR5_bsaDS.select_atoms("protein") bsa_bDSmr5 # Select DS molecules DS_bDSmr5 = MR5_bsaDS.select_atoms("resname ROH ADN LDN") DS_bDSmr5 # Select K+ ions K_bDSmr5 = MR5_bsaDS.select_atoms("resname K") K_bDSmr5 ###Output _____no_output_____ ###Markdown DS occupancy ###Code #dmax = 4.0, DO NOT use surface atom group, you will get the wrong answer dmax = 4.0 start = 0 #end = 1000 end = bDSmr5_len - 1 s_time = timeit.default_timer() bDSMR5_occdict = aa_frmcount(bsa_bDSmr5, DS_bDSmr5, dmax, MR5_bsaDS, start, end) timeit.default_timer() - s_time with open('bDSMR5_occdict.json', 'w') as fp: json.dump(bDSMR5_occdict, fp) with open('bDSMR5_occdict.json', 'r') as fp: bDSMR5_occdict = json.load(fp) len(bDSMR5_occdict.keys()) DSmr5_occ = {key:bDSMR5_occdict[key][1] for key, value in bDSMR5_occdict.items()} #test_TA bDSmr5_occdict moccg95_bDSmr5 = [] for key, value in DSmr5_occ.items(): if value > 0.95: moccg95_bDSmr5.append(key) len(moccg95_bDSmr5) tt_DSmr5 = [] #cnt_sav = [] ssbDSmr5 = [] for i in range(len(a_a)): count = 0 for j in range(len(moccg95_bDSmr5)): if (moccg95_bDSmr5[j].find(a_a[i]) != -1) == True: #print(a_a[i]) #print(moccg95_bDSmr5[j]) tt_DSmr5.append(moccg95_bDSmr5[j]) #cnt_sav.append(j) count += 1 ssbDSmr5.append(str(str(a_a[i])+" "+str(count))) ssbDSmr5 #tt_ta moccg95_bDSmr5[71] len(cnt_sav) # Sorting of AA in prot_polymer_analysis functions # Grouping of residues in Smith et al #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] #polar_res = ['ASN', 'CYS', 'CYX','ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] #neg_res = ['ASP', 'GLU'] #pos_res = ['ARG', 'HIP', 'LYS'] #all_res = [pos_res, neg_res, polar_res, hydrophobic_res, aromatic_res] # Put the AA count in a pandas dataframe ta128_dg , ta128_ji = AA_list_org(ssbDSmr5) #prot95_mocc = pd.DataFrame(data=ta128_dg, index=None, columns=['Amino_acids']) new_lfDSmr5 = pd.Series(data=ta128_ji, index=None) prot95_mocc['BSA/DS MR = 5'] = new_lfDSmr5 prot95_mocc prot95_mocc['BSA/DS MR = 5'][:].sum() totres_DSmr5 = prot95_mocc['BSA/DS MR = 5'][:].sum() # Number of positively charged residues (SASA) #pos_res = ['ARG', 'HIP', 'LYS'] pos_DSmr5 = prot95_mocc['BSA/DS MR = 5'][0:3].sum()/totres_DSmr5 # Number of negatively charged residues (SASA) #neg_res = ['ASP', 'GLU'] neg_DSmr5 = prot95_mocc['BSA/DS MR = 5'][3:5].sum()/totres_DSmr5 # Number of polar residues (SASA) #polar_res = ['ASN', 'CYS', 'CYX' 'ASH', 'GLH','GLN', 'SER', 'THR','GLY','HIE','HID'] pol_DSmr5 = prot95_mocc['BSA/DS MR = 5'][5:16].sum()/totres_DSmr5 # Number of hydrophobic residues (SASA) #hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'PRO','PHE', 'TRP','MET','TYR'] hb_DSmr5 = prot95_mocc['BSA/DS MR = 5'][16:25].sum()/totres_DSmr5 # Number of aromatic residues (SASA) #aromatic_res = ['PHE', 'TRP', 'TYR', 'HID','HIE'] arm_DSmr5 = prot95_mocc['BSA/DS MR = 5'][25:30].sum()/totres_DSmr5 ###Output _____no_output_____ ###Markdown Plotting Low MR ###Code TA128_sAA np.sum(DSmr5_sAA) TA32_sAA = np.array([neg_TAmr32, pos_TAmr32, pol_TAmr32, hb_TAmr32, arm_TAmr32]) SDS16_sAA = np.array([neg_SDSmr16, pos_SDSmr16, pol_SDSmr16, hb_SDSmr16, arm_SDSmr16]) DSmr1_sAA = np.array([neg_DSmr1, pos_DSmr1, pol_DSmr1, hb_DSmr1, arm_DSmr1]) np.sum(DSmr1_sAA) np.sum(SDS16_sAA) np.sum(TA32_sAA) from matplotlib import rcParams from matplotlib.ticker import FixedLocator, FixedFormatter, AutoMinorLocator plt.rcParams["font.weight"] = "bold" plt.rcParams["axes.labelweight"] = "bold" plt.rcParams['font.family'] = "Arial" aa_types = ["Negative", "Positive", "Polar", "Hydrophobic","Aromatic"] fig, ax = plt.subplots(figsize=(9,9)) #x_mr = np.arange(len(frac_totavg)) +1 x_pos = np.arange(5)+1 width = 0.15 # Set position of bar on X axis r1 = np.arange(len(aa_types)) r2 = [x + width for x in r1] r3 = [x + width for x in r2] r4 = [x + width for x in r3] r5 = [x + (0.1*width) for x in r2] ax.bar(r1, TA32_sAA, width, label='BSA/TA MR = 32', color='saddlebrown', capsize=5, edgecolor='black') ax.bar(r2, SDS16_sAA, width, label='BSA/SDS MR = 16', color='yellowgreen', capsize=5, edgecolor='black') ax.bar(r3, DSmr1_sAA, width, label='BSA/DS MR = 1', color='goldenrod', capsize=5, edgecolor='black') #ax.set_xlabel('Catalase/Dextran Sulphate Molar Ratio', fontsize=20, labelpad=7) # Add an x-label to the axes. ax.set_ylabel('Fraction of Surface Residues w/ >95% occupancy', fontsize=20, labelpad=7) # Add a y-label to the axes. #ax.set_title("Simple Plot") # Add a title to the axes font = font_manager.FontProperties(style='normal', size=20) ax.legend(frameon=False, fontsize=20, prop=font) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2) ax.xaxis.set_tick_params(direction='out', width=2, length=15, labelsize=18, labelrotation=40) x_formatter = FixedFormatter(aa_types) x_locator = FixedLocator(r5) ax.xaxis.set_major_locator(x_locator) ax.xaxis.set_major_formatter(x_formatter) ax.yaxis.set_minor_locator(AutoMinorLocator()) ax.yaxis.set_tick_params(which='major', direction='inout', width=2, length=15, labelsize=18) ax.yaxis.set_tick_params(which='minor', direction='inout', width=2, length=6, labelsize=18) #ax.xaxis.set_tick_params(which='minor', bottom=False) #ax.set_xticklabels(fontsize=16) #xlims = (0, 6) ylims = (0, 0.8) #ax.set_xlim(xlims) ax.set_ylim(ylims) fig.savefig('FracOcc_AAgrpLowMR.jpg',bbox_inches='tight', dpi=400) ###Output _____no_output_____ ###Markdown Plotting High MR ###Code TA128_sAA np.sum(DSmr5_sAA) TA128_sAA = np.array([neg_taMR128, pos_taMR128, pol_taMR128, hb_taMR128, arm_taMR128]) SDS128_sAA = np.array([neg_sdsMR128, pos_sdsMR128, pol_sdsMR128, hb_sdsMR128, arm_sdsMR128]) DSmr5_sAA = np.array([neg_DSmr5, pos_DSmr5, pol_DSmr5, hb_DSmr5, arm_DSmr5]) from matplotlib import rcParams from matplotlib.ticker import FixedLocator, FixedFormatter, AutoMinorLocator plt.rcParams["font.weight"] = "bold" plt.rcParams["axes.labelweight"] = "bold" plt.rcParams['font.family'] = "Arial" aa_types = ["Negative", "Positive", "Polar", "Hydrophobic","Aromatic"] fig, ax = plt.subplots(figsize=(9,9)) #x_mr = np.arange(len(frac_totavg)) +1 x_pos = np.arange(5)+1 width = 0.15 # Set position of bar on X axis r1 = np.arange(len(aa_types)) r2 = [x + width for x in r1] r3 = [x + width for x in r2] r4 = [x + width for x in r3] r5 = [x + (0.1*width) for x in r2] ax.bar(r1, TA128_sAA, width, label='BSA/TA MR = 128', color='seagreen', capsize=5, edgecolor='black') ax.bar(r2, SDS128_sAA, width, label='BSA/SDS MR = 128', color='skyblue', capsize=5, edgecolor='black') ax.bar(r3, DSmr5_sAA, width, label='BSA/DS MR = 5', color='orange', capsize=5, edgecolor='black') #ax.set_xlabel('Catalase/Dextran Sulphate Molar Ratio', fontsize=20, labelpad=7) # Add an x-label to the axes. ax.set_ylabel('Fraction of Surface Residues w/ >95% occupancy', fontsize=20, labelpad=7) # Add a y-label to the axes. #ax.set_title("Simple Plot") # Add a title to the axes font = font_manager.FontProperties(style='normal', size=20) ax.legend(frameon=False, fontsize=20, prop=font) for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2) ax.xaxis.set_tick_params(direction='out', width=2, length=15, labelsize=18, labelrotation=40) x_formatter = FixedFormatter(aa_types) x_locator = FixedLocator(r5) ax.xaxis.set_major_locator(x_locator) ax.xaxis.set_major_formatter(x_formatter) ax.yaxis.set_minor_locator(AutoMinorLocator()) ax.yaxis.set_tick_params(which='major', direction='inout', width=2, length=15, labelsize=18) ax.yaxis.set_tick_params(which='minor', direction='inout', width=2, length=6, labelsize=18) #ax.xaxis.set_tick_params(which='minor', bottom=False) #ax.set_xticklabels(fontsize=16) #xlims = (0, 6) ylims = (0, 0.6) #ax.set_xlim(xlims) ax.set_ylim(ylims) fig.savefig('FracOcc_AAgrp.jpg',bbox_inches='tight', dpi=400) ###Output _____no_output_____
StanfordAlgorithmSeries/maximum_weight_IS.ipynb
###Markdown 'mwis.txt' describes the weights of the vertices in a path graph (with the weights listed in the order in which vertices appear in the path). It has the following format:[number_of_vertices][weight of first vertex][weight of second vertex]...For example, the third line of the file is "6395702," indicating that the weight of the second vertex of the graph is 6395702.Your task in this problem is to run the dynamic programming algorithm (and the reconstruction procedure) from lecture on this data set. The question is: of the vertices 1, 2, 3, 4, 17, 117, 517, and 997, which ones belong to the maximum-weight independent set? (By "vertex 1" we mean the first vertex of the graph---there is no vertex 0.) In the box below, enter a 8-bit string, where the ith bit should be 1 if the ith of these 8 vertices is in the maximum-weight independent set, and 0 otherwise. For example, if you think that the vertices 1, 4, 17, and 517 are in the maximum-weight independent set and the other four vertices are not, then you should enter the string 10011010 in the box below. ###Code with open('mwis.txt','r') as f: lines = f.readlines() num = int(lines[0]) weights = list(map(int, lines[1:])) # run maximum weight max_weight = np.zeros(num+1,dtype=int) max_weight[0] = 0 max_weight[1] = weights[1] for i in range(2,num+1): max_weight[i] = max(max_weight[i-1], weights[i-1]+max_weight[i-2]) max_weight[-1],max_weight[-2] v_set = [] i = num while i >= 1: if max_weight[i-1] >= max_weight[i-2] + weights[i-1]: i -= 1 else: v_set.append(i) i -= 2 ans = '' for i in [1, 2, 3, 4, 17, 117, 517, 997]: ans += str(int(i in v_set)) ans ###Output _____no_output_____
TensorFlow Advanced Techniques Specialization/Course-3/Advanced Computer Vision with TensorFlow/Week-3/Copy_of_C3W3_Assignment.ipynb
###Markdown Week 3 Assignment: Image Segmentation of Handwritten DigitsIn this week's assignment, you will build a model that predicts the segmentation masks (pixel-wise label map) of handwritten digits. This model will be trained on the [M2NIST dataset](https://www.kaggle.com/farhanhubble/multimnistm2nist), a multi digit MNIST. If you've done the ungraded lab on the CamVid dataset, then many of the steps here will look familiar.You will build a Convolutional Neural Network (CNN) from scratch for the downsampling path and use a Fully Convolutional Network, FCN-8, to upsample and produce the pixel-wise label map. The model will be evaluated using the intersection over union (IOU) and Dice Score. Finally, you will download the model and upload it to the grader in Coursera to get your score for the assignment. ExercisesWe've given you some boilerplate code to work with and these are the 5 exercises you need to fill out before you can successfully get the segmentation masks.* [Exercise 1 - Define the Basic Convolution Block](exercise-1)* [Exercise 2 - Define the Downsampling Path](exercise-2)* [Exercise 3 - Define the FCN-8 decoder](exercise-3)* [Exercise 4 - Compile the Model](exercise-4)* [Exercise 5 - Model Training](exercise-5) ImportsAs usual, let's start by importing the packages you will use in this lab. ###Code try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import os import zipfile import PIL.Image, PIL.ImageFont, PIL.ImageDraw import numpy as np from matplotlib import pyplot as plt import tensorflow as tf import tensorflow_datasets as tfds from sklearn.model_selection import train_test_split print("Tensorflow version " + tf.__version__) ###Output _____no_output_____ ###Markdown Download the dataset [M2NIST](https://www.kaggle.com/farhanhubble/multimnistm2nist) is a **multi digit** [MNIST](http://yann.lecun.com/exdb/mnist/). Each image has up to 3 digits from MNIST digits and the corresponding labels file has the segmentation masks.The dataset is available on [Kaggle](https://www.kaggle.com) and you can find it [here](https://www.kaggle.com/farhanhubble/multimnistm2nist)To make it easier for you, we're hosting it on Google Cloud so you can download without Kaggle credentials. ###Code # download zipped dataset !wget --no-check-certificate \ https://storage.googleapis.com/laurencemoroney-blog.appspot.com/m2nist.zip \ -O /tmp/m2nist.zip # find and extract to a local folder ('/tmp/training') local_zip = '/tmp/m2nist.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp/training') zip_ref.close() ###Output _____no_output_____ ###Markdown Load and Preprocess the Dataset This dataset can be easily preprocessed since it is available as **Numpy Array Files (.npy)**1. **combined.npy** has the image files containing the multiple MNIST digits. Each image is of size **64 x 84** (height x width, in pixels).2. **segmented.npy** has the corresponding segmentation masks. Each segmentation mask is also of size **64 x 84**.This dataset has **5000** samples and you can make appropriate training, validation, and test splits as required for the problem.With that, let's define a few utility functions for loading and preprocessing the dataset. ###Code BATCH_SIZE = 32 def read_image_and_annotation(image, annotation): ''' Casts the image and annotation to their expected data type and normalizes the input image so that each pixel is in the range [-1, 1] Args: image (numpy array) -- input image annotation (numpy array) -- ground truth label map Returns: preprocessed image-annotation pair ''' image = tf.cast(image, dtype=tf.float32) image = tf.reshape(image, (image.shape[0], image.shape[1], 1,)) annotation = tf.cast(annotation, dtype=tf.int32) image = image / 127.5 image -= 1 return image, annotation def get_training_dataset(images, annos): ''' Prepares shuffled batches of the training set. Args: images (list of strings) -- paths to each image file in the train set annos (list of strings) -- paths to each label map in the train set Returns: tf Dataset containing the preprocessed train set ''' training_dataset = tf.data.Dataset.from_tensor_slices((images, annos)) training_dataset = training_dataset.map(read_image_and_annotation) training_dataset = training_dataset.shuffle(512, reshuffle_each_iteration=True) training_dataset = training_dataset.batch(BATCH_SIZE) training_dataset = training_dataset.repeat() training_dataset = training_dataset.prefetch(-1) return training_dataset def get_validation_dataset(images, annos): ''' Prepares batches of the validation set. Args: images (list of strings) -- paths to each image file in the val set annos (list of strings) -- paths to each label map in the val set Returns: tf Dataset containing the preprocessed validation set ''' validation_dataset = tf.data.Dataset.from_tensor_slices((images, annos)) validation_dataset = validation_dataset.map(read_image_and_annotation) validation_dataset = validation_dataset.batch(BATCH_SIZE) validation_dataset = validation_dataset.repeat() return validation_dataset def get_test_dataset(images, annos): ''' Prepares batches of the test set. Args: images (list of strings) -- paths to each image file in the test set annos (list of strings) -- paths to each label map in the test set Returns: tf Dataset containing the preprocessed validation set ''' test_dataset = tf.data.Dataset.from_tensor_slices((images, annos)) test_dataset = test_dataset.map(read_image_and_annotation) test_dataset = test_dataset.batch(BATCH_SIZE, drop_remainder=True) return test_dataset def load_images_and_segments(): ''' Loads the images and segments as numpy arrays from npy files and makes splits for training, validation and test datasets. Returns: 3 tuples containing the train, val, and test splits ''' #Loads images and segmentation masks. images = np.load('/tmp/training/combined.npy') segments = np.load('/tmp/training/segmented.npy') #Makes training, validation, test splits from loaded images and segmentation masks. train_images, val_images, train_annos, val_annos = train_test_split(images, segments, test_size=0.2, shuffle=True) val_images, test_images, val_annos, test_annos = train_test_split(val_images, val_annos, test_size=0.2, shuffle=True) return (train_images, train_annos), (val_images, val_annos), (test_images, test_annos) ###Output _____no_output_____ ###Markdown You can now load the preprocessed dataset and define the training, validation, and test sets. ###Code # Load Dataset train_slices, val_slices, test_slices = load_images_and_segments() # Create training, validation, test datasets. training_dataset = get_training_dataset(train_slices[0], train_slices[1]) validation_dataset = get_validation_dataset(val_slices[0], val_slices[1]) test_dataset = get_test_dataset(test_slices[0], test_slices[1]) ###Output _____no_output_____ ###Markdown Let's Take a Look at the DatasetYou may want to visually inspect the dataset before and after training. Like above, we've included utility functions to help show a few images as well as their annotations (i.e. labels). ###Code # Visualization Utilities # there are 11 classes in the dataset: one class for each digit (0 to 9) plus the background class n_classes = 11 # assign a random color for each class colors = [tuple(np.random.randint(256, size=3) / 255.0) for i in range(n_classes)] def fuse_with_pil(images): ''' Creates a blank image and pastes input images Args: images (list of numpy arrays) - numpy array representations of the images to paste Returns: PIL Image object containing the images ''' widths = (image.shape[1] for image in images) heights = (image.shape[0] for image in images) total_width = sum(widths) max_height = max(heights) new_im = PIL.Image.new('RGB', (total_width, max_height)) x_offset = 0 for im in images: pil_image = PIL.Image.fromarray(np.uint8(im)) new_im.paste(pil_image, (x_offset,0)) x_offset += im.shape[1] return new_im def give_color_to_annotation(annotation): ''' Converts a 2-D annotation to a numpy array with shape (height, width, 3) where the third axis represents the color channel. The label values are multiplied by 255 and placed in this axis to give color to the annotation Args: annotation (numpy array) - label map array Returns: the annotation array with an additional color channel/axis ''' seg_img = np.zeros( (annotation.shape[0],annotation.shape[1], 3) ).astype('float') for c in range(n_classes): segc = (annotation == c) seg_img[:,:,0] += segc*( colors[c][0] * 255.0) seg_img[:,:,1] += segc*( colors[c][1] * 255.0) seg_img[:,:,2] += segc*( colors[c][2] * 255.0) return seg_img def show_annotation_and_prediction(image, annotation, prediction, iou_list, dice_score_list): ''' Displays the images with the ground truth and predicted label maps. Also overlays the metrics. Args: image (numpy array) -- the input image annotation (numpy array) -- the ground truth label map prediction (numpy array) -- the predicted label map iou_list (list of floats) -- the IOU values for each class dice_score_list (list of floats) -- the Dice Score for each class ''' new_ann = np.argmax(annotation, axis=2) true_img = give_color_to_annotation(new_ann) pred_img = give_color_to_annotation(prediction) image = image + 1 image = image * 127.5 image = np.reshape(image, (image.shape[0], image.shape[1],)) image = np.uint8(image) images = [image, np.uint8(pred_img), np.uint8(true_img)] metrics_by_id = [(idx, iou, dice_score) for idx, (iou, dice_score) in enumerate(zip(iou_list, dice_score_list)) if iou > 0.0 and idx < 10] metrics_by_id.sort(key=lambda tup: tup[1], reverse=True) # sorts in place display_string_list = ["{}: IOU: {} Dice Score: {}".format(idx, iou, dice_score) for idx, iou, dice_score in metrics_by_id] display_string = "\n".join(display_string_list) plt.figure(figsize=(15, 4)) for idx, im in enumerate(images): plt.subplot(1, 3, idx+1) if idx == 1: plt.xlabel(display_string) plt.xticks([]) plt.yticks([]) plt.imshow(im) def show_annotation_and_image(image, annotation): ''' Displays the image and its annotation side by side Args: image (numpy array) -- the input image annotation (numpy array) -- the label map ''' new_ann = np.argmax(annotation, axis=2) seg_img = give_color_to_annotation(new_ann) image = image + 1 image = image * 127.5 image = np.reshape(image, (image.shape[0], image.shape[1],)) image = np.uint8(image) images = [image, seg_img] images = [image, seg_img] fused_img = fuse_with_pil(images) plt.imshow(fused_img) def list_show_annotation(dataset, num_images): ''' Displays images and its annotations side by side Args: dataset (tf Dataset) -- batch of images and annotations num_images (int) -- number of images to display ''' ds = dataset.unbatch() plt.figure(figsize=(20, 15)) plt.title("Images And Annotations") plt.subplots_adjust(bottom=0.1, top=0.9, hspace=0.05) for idx, (image, annotation) in enumerate(ds.take(num_images)): plt.subplot(5, 5, idx + 1) plt.yticks([]) plt.xticks([]) show_annotation_and_image(image.numpy(), annotation.numpy()) ###Output _____no_output_____ ###Markdown You can view a subset of the images from the dataset with the `list_show_annotation()` function defined above. Run the cells below to see the image on the left and its pixel-wise ground truth label map on the right. ###Code # get 10 images from the training set list_show_annotation(training_dataset, 10) # get 10 images from the validation set list_show_annotation(validation_dataset, 10) ###Output _____no_output_____ ###Markdown You see from the images above the colors assigned to each class (i.e 0 to 9 plus the background). If you don't like these colors, feel free to rerun the cell where `colors` is defined to get another set of random colors. Alternatively, you can assign the RGB values for each class instead of relying on random values. Define the Model As discussed in the lectures, the image segmentation model will have two paths:1. **Downsampling Path** - This part of the network extracts the features in the image. This is done through a series of convolution and pooling layers. The final output is a reduced image (because of the pooling layers) with the extracted features. You will build a custom CNN from scratch for this path.2. **Upsampling Path** - This takes the output of the downsampling path and generates the predictions while also converting the image back to its original size. You will use an FCN-8 decoder for this path. Define the Basic Convolution Block **Exercise 1**Please complete the function below to build the basic convolution block for our CNN. This will have two [Conv2D](https://keras.io/api/layers/convolution_layers/convolution2d/) layers each followed by a [LeakyReLU](https://keras.io/api/layers/activation_layers/leaky_relu/), then [max pooled](https://keras.io/api/layers/pooling_layers/max_pooling2d/) and [batch-normalized](https://keras.io/api/layers/normalization_layers/batch_normalization/). Use the functional syntax to stack these layers.$$Input -> Conv2D -> LeakyReLU -> Conv2D -> LeakyReLU -> MaxPooling2D -> BatchNormalization$$When defining the Conv2D layers, note that our data inputs will have the 'channels' dimension last. You may want to check the `data_format` argument in the [docs](https://keras.io/api/layers/convolution_layers/convolution2d/) regarding this. Take note of the `padding` argument too like you did in the ungraded labs.Lastly, to use the `LeakyReLU` activation, you **do not** need to nest it inside an `Activation` layer (e.g. `x = tf.keras.layers.Activation(tf.keras.layers.LeakyReLU()(x)`). You can simply stack the layer directly instead (e.g. `x = tf.keras.layers.LeakyReLU()(x)`) ###Code # parameter describing where the channel dimension is found in our dataset IMAGE_ORDERING = 'channels_last' def conv_block(input, filters, kernel_size, pooling_size, pool_strides): ''' Args: input (tensor) -- batch of images or features filters (int) -- number of filters of the Conv2D layers kernel_size (int) -- kernel_size setting of the Conv2D layers pooling_size (int) -- pooling size of the MaxPooling2D layers pool_strides (int) -- strides setting of the MaxPooling2D layers Returns: (tensor) max pooled and batch-normalized features of the input ''' ### START CODE HERE ### # use the functional syntax to stack the layers as shown in the diagram above x = tf.keras.layers.Conv2D(filters, kernel_size, padding='same', data_format=IMAGE_ORDERING)(input) x = tf.keras.layers.LeakyReLU()(x) x = tf.keras.layers.Conv2D(filters, kernel_size, padding='same', data_format=IMAGE_ORDERING)(x) x = tf.keras.layers.LeakyReLU()(x) x = tf.keras.layers.MaxPooling2D(pool_size=(pooling_size, pooling_size), strides=pool_strides)(x) x = tf.keras.layers.BatchNormalization()(x) ### END CODE HERE ### return x # TEST CODE: test_input = tf.keras.layers.Input(shape=(64,84, 1)) test_output = conv_block(test_input, 32, 3, 2, 2) test_model = tf.keras.Model(inputs=test_input, outputs=test_output) print(test_model.summary()) # free up test resources del test_input, test_output, test_model ###Output _____no_output_____ ###Markdown **Expected Output**:Please pay attention to the *(type)* and *Output Shape* columns. The *Layer* name beside the type may be different depending on how many times you ran the cell (e.g. `input_7` can be `input_1`)```txtModel: "functional_1"_________________________________________________________________Layer (type) Output Shape Param =================================================================input_1 (InputLayer) [(None, 64, 84, 1)] 0 _________________________________________________________________conv2d (Conv2D) (None, 64, 84, 32) 320 _________________________________________________________________leaky_re_lu (LeakyReLU) (None, 64, 84, 32) 0 _________________________________________________________________conv2d_1 (Conv2D) (None, 64, 84, 32) 9248 _________________________________________________________________leaky_re_lu_1 (LeakyReLU) (None, 64, 84, 32) 0 _________________________________________________________________max_pooling2d (MaxPooling2D) (None, 32, 42, 32) 0 _________________________________________________________________batch_normalization (BatchNo (None, 32, 42, 32) 128 =================================================================Total params: 9,696Trainable params: 9,632Non-trainable params: 64_________________________________________________________________None``` Define the Downsampling Path **Exercise 2**Now that we've defined the building block of our encoder, you can now build the downsampling path. Please complete the function below to create the encoder. This should chain together five convolution building blocks to create a feature extraction CNN minus the fully connected layers.*Notes*: 1. To optimize processing or to make the output dimensions of each layer easier to work with, it is sometimes advisable to apply some zero-padding to the input image. With the boilerplate code we have provided below, we have padded the input width to 96 pixels using the [ZeroPadding2D layer](https://keras.io/api/layers/reshaping_layers/zero_padding2d/). This works well if you're going to use the first ungraded lab of this week as reference. This is not required however. You can remove it later and see how it will affect your parameters. For instance, you might need to pass in a non-square kernel size to the decoder in Exercise 3 (e.g. `(4,5)`) to match the output dimensions of Exercise 2. 2. We recommend keeping the pool size and stride parameters constant at 2. ###Code def FCN8(input_height=64, input_width=84): ''' Defines the downsampling path of the image segmentation model. Args: input_height (int) -- height of the images width (int) -- width of the images Returns: (tuple of tensors, tensor) tuple of tensors -- features extracted at blocks 3 to 5 tensor -- copy of the input ''' img_input = tf.keras.layers.Input(shape=(input_height,input_width, 1)) ### START CODE HERE ### # pad the input image width to 96 pixels x = tf.keras.layers.ZeroPadding2D(((0, 0), (0, 96-input_width)))(img_input) # Block 1 x = conv_block(x, 32, 5, 2, 2) # Block 2 x = conv_block(x, 64, 5, 2, 2) # Block 3 x = conv_block(x, 128, 5, 2, 2) # save the feature map at this stage f3 = x # Block 4 x = conv_block(x, 256, 5, 2, 2) # save the feature map at this stage f4 = x # Block 5 x = conv_block(x, 256, 5, 2, 2) # save the feature map at this stage f5 = x ### END CODE HERE ### return (f3, f4, f5), img_input # TEST CODE: test_convs, test_img_input = FCN8() test_model = tf.keras.Model(inputs=test_img_input, outputs=[test_convs, test_img_input]) print(test_model.summary()) del test_convs, test_img_input, test_model ###Output _____no_output_____ ###Markdown **Expected Output**:You should see the layers of your `conv_block()` being repeated 5 times like the output below.```txtModel: "functional_3"_________________________________________________________________Layer (type) Output Shape Param =================================================================input_3 (InputLayer) [(None, 64, 84, 1)] 0 _________________________________________________________________zero_padding2d (ZeroPadding2 (None, 64, 96, 1) 0 _________________________________________________________________conv2d_2 (Conv2D) (None, 64, 96, 32) 320 _________________________________________________________________leaky_re_lu_2 (LeakyReLU) (None, 64, 96, 32) 0 _________________________________________________________________conv2d_3 (Conv2D) (None, 64, 96, 32) 9248 _________________________________________________________________leaky_re_lu_3 (LeakyReLU) (None, 64, 96, 32) 0 _________________________________________________________________max_pooling2d_1 (MaxPooling2 (None, 32, 48, 32) 0 _________________________________________________________________batch_normalization_1 (Batch (None, 32, 48, 32) 128 _________________________________________________________________conv2d_4 (Conv2D) (None, 32, 48, 64) 18496 _________________________________________________________________leaky_re_lu_4 (LeakyReLU) (None, 32, 48, 64) 0 _________________________________________________________________conv2d_5 (Conv2D) (None, 32, 48, 64) 36928 _________________________________________________________________leaky_re_lu_5 (LeakyReLU) (None, 32, 48, 64) 0 _________________________________________________________________max_pooling2d_2 (MaxPooling2 (None, 16, 24, 64) 0 _________________________________________________________________batch_normalization_2 (Batch (None, 16, 24, 64) 256 _________________________________________________________________conv2d_6 (Conv2D) (None, 16, 24, 128) 73856 _________________________________________________________________leaky_re_lu_6 (LeakyReLU) (None, 16, 24, 128) 0 _________________________________________________________________conv2d_7 (Conv2D) (None, 16, 24, 128) 147584 _________________________________________________________________leaky_re_lu_7 (LeakyReLU) (None, 16, 24, 128) 0 _________________________________________________________________max_pooling2d_3 (MaxPooling2 (None, 8, 12, 128) 0 _________________________________________________________________batch_normalization_3 (Batch (None, 8, 12, 128) 512 _________________________________________________________________conv2d_8 (Conv2D) (None, 8, 12, 256) 295168 _________________________________________________________________leaky_re_lu_8 (LeakyReLU) (None, 8, 12, 256) 0 _________________________________________________________________conv2d_9 (Conv2D) (None, 8, 12, 256) 590080 _________________________________________________________________leaky_re_lu_9 (LeakyReLU) (None, 8, 12, 256) 0 _________________________________________________________________max_pooling2d_4 (MaxPooling2 (None, 4, 6, 256) 0 _________________________________________________________________batch_normalization_4 (Batch (None, 4, 6, 256) 1024 _________________________________________________________________conv2d_10 (Conv2D) (None, 4, 6, 256) 590080 _________________________________________________________________leaky_re_lu_10 (LeakyReLU) (None, 4, 6, 256) 0 _________________________________________________________________conv2d_11 (Conv2D) (None, 4, 6, 256) 590080 _________________________________________________________________leaky_re_lu_11 (LeakyReLU) (None, 4, 6, 256) 0 _________________________________________________________________max_pooling2d_5 (MaxPooling2 (None, 2, 3, 256) 0 _________________________________________________________________batch_normalization_5 (Batch (None, 2, 3, 256) 1024 =================================================================Total params: 2,354,784Trainable params: 2,353,312Non-trainable params: 1,472_________________________________________________________________None``` Define the FCN-8 decoder **Exercise 3**Now you can define the upsampling path taking the outputs of convolutions at each stage as arguments. This will be very similar to what you did in the ungraded lab (VGG16-FCN8-CamVid) so you can refer to it if you need a refresher. * Note: remember to set the `data_format` parameter for the Conv2D layers. Here is also the diagram you saw in class on how it should work: ###Code def fcn8_decoder(convs, n_classes): # features from the encoder stage f3, f4, f5 = convs # number of filters n = 512 # add convolutional layers on top of the CNN extractor. o = tf.keras.layers.Conv2D(n , (7 , 7) , activation='relu' , padding='same', name="conv6", data_format=IMAGE_ORDERING)(f5) o = tf.keras.layers.Dropout(0.5)(o) o = tf.keras.layers.Conv2D(n , (1 , 1) , activation='relu' , padding='same', name="conv7", data_format=IMAGE_ORDERING)(o) o = tf.keras.layers.Dropout(0.5)(o) o = tf.keras.layers.Conv2D(n_classes, (1, 1), activation='relu' , padding='same', data_format=IMAGE_ORDERING)(o) ### START CODE HERE ### # Upsample `o` above and crop any extra pixels introduced o = tf.keras.layers.Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2))(o) o = tf.keras.layers.Cropping2D(cropping=(1, 1))(o) # load the pool 4 prediction and do a 1x1 convolution to reshape it to the same shape of `o` above o2 = f4 o2 = tf.keras.layers.Conv2D(n_classes, (1, 1), activation='relu', padding='same', data_format=IMAGE_ORDERING)(o2) # add the results of the upsampling and pool 4 prediction o = tf.keras.layers.Add()([o, o2]) # upsample the resulting tensor of the operation you just did o = tf.keras.layers.Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2))(o) o = tf.keras.layers.Cropping2D(cropping=(1, 1))(o) # load the pool 3 prediction and do a 1x1 convolution to reshape it to the same shape of `o` above o2 = f3 o2 = tf.keras.layers.Conv2D(n_classes , ( 1 , 1 ) , activation='relu' , padding='same', data_format=IMAGE_ORDERING)(o2) # add the results of the upsampling and pool 3 prediction o = tf.keras.layers.Add()([o, o2]) # upsample up to the size of the original image o = tf.keras.layers.Conv2DTranspose(n_classes, kernel_size=(8, 8), strides=(8, 8))(o) o = tf.keras.layers.Cropping2D(((0, 0), (0, 96-84)))(o) # append a sigmoid activation o = (tf.keras.layers.Activation('sigmoid'))(o) ### END CODE HERE ### return o # TEST CODE test_convs, test_img_input = FCN8() test_fcn8_decoder = fcn8_decoder(test_convs, 11) print(test_fcn8_decoder.shape) del test_convs, test_img_input, test_fcn8_decoder ###Output _____no_output_____ ###Markdown **Expected Output:**```txt(None, 64, 84, 11)``` Define the Complete ModelThe downsampling and upsampling paths can now be combined as shown below. ###Code # start the encoder using the default input size 64 x 84 convs, img_input = FCN8() # pass the convolutions obtained in the encoder to the decoder dec_op = fcn8_decoder(convs, n_classes) # define the model specifying the input (batch of images) and output (decoder output) model = tf.keras.Model(inputs = img_input, outputs = dec_op) model.summary() ###Output _____no_output_____ ###Markdown Compile the Model **Exercise 4**Compile the model using an appropriate loss, optimizer, and metric._**Note:** There is a current issue with the grader accepting certain loss functions. We will be upgrading it but while in progress, please use this syntax:_```loss=''```*instead of:*```loss=tf.keras.losses.``` ###Code ### START CODE HERE ### model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9, nesterov=True), metrics=['accuracy']) ### END CODE HERE ### ###Output _____no_output_____ ###Markdown Model Training **Exercise 5**You can now train the model. Set the number of epochs and observe the metrics returned at each iteration. You can also terminate the cell execution if you think your model is performing well already. ###Code # OTHER THAN SETTING THE EPOCHS NUMBER, DO NOT CHANGE ANY OTHER CODE ### START CODE HERE ### EPOCHS = 350 ### END CODE HERE ### steps_per_epoch = 4000//BATCH_SIZE validation_steps = 800//BATCH_SIZE test_steps = 200//BATCH_SIZE history = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, validation_data=validation_dataset, validation_steps=validation_steps, epochs=EPOCHS) ###Output _____no_output_____ ###Markdown **Expected Output:**The losses should generally be decreasing and the accuracies should generally be increasing. For example, observing the first 4 epochs should output something similar:```txtEpoch 1/70125/125 [==============================] - 6s 50ms/step - loss: 0.5542 - accuracy: 0.8635 - val_loss: 0.5335 - val_accuracy: 0.9427Epoch 2/70125/125 [==============================] - 6s 47ms/step - loss: 0.2315 - accuracy: 0.9425 - val_loss: 0.3362 - val_accuracy: 0.9427Epoch 3/70125/125 [==============================] - 6s 47ms/step - loss: 0.2118 - accuracy: 0.9426 - val_loss: 0.2592 - val_accuracy: 0.9427Epoch 4/70125/125 [==============================] - 6s 47ms/step - loss: 0.1782 - accuracy: 0.9431 - val_loss: 0.1770 - val_accuracy: 0.9432``` ###Code import tensorflow.keras.backend as K K.clear_session() ###Output _____no_output_____ ###Markdown Model Evaluation Make PredictionsLet's get the predictions using our test dataset as input and print the shape. ###Code results = model.predict(test_dataset, steps=test_steps) print(results.shape) ###Output _____no_output_____ ###Markdown As you can see, the resulting shape is `(192, 64, 84, 11)`. This means that for each of the 192 images that we have in our test set, there are 11 predictions generated (i.e. one for each class: 0 to 1 plus background). Thus, if you want to see the *probability* of the upper leftmost pixel of the 1st image belonging to class 0, then you can print something like `results[0,0,0,0]`. If you want the probability of the same pixel at class 10, then do `results[0,0,0,10]`. ###Code print(results[0,0,0,0]) print(results[0,0,0,10]) ###Output _____no_output_____ ###Markdown What we're interested in is to get the *index* of the highest probability of each of these 11 slices and combine them in a single image. We can do that by getting the [argmax](https://numpy.org/doc/stable/reference/generated/numpy.argmax.html) at this axis. ###Code results = np.argmax(results, axis=3) print(results.shape) ###Output _____no_output_____ ###Markdown The new array generated per image now only specifies the indices of the class with the highest probability. Let's see the output class of the upper most left pixel. As you might have observed earlier when you inspected the dataset, the upper left corner is usually just part of the background (class 10). The actual digits are written somewhere in the middle parts of the image. ###Code print(results[0,0,0]) # prediction map for image 0 print(results[0,:,:]) ###Output _____no_output_____ ###Markdown We will use this `results` array when we evaluate our predictions. MetricsWe showed in the lectures two ways to evaluate your predictions. The *intersection over union (IOU)* and the *dice score*. Recall that:$$IOU = \frac{area\_of\_overlap}{area\_of\_union}$$$$Dice Score = 2 * \frac{area\_of\_overlap}{combined\_area}$$The code below does that for you as you've also seen in the ungraded lab. A small smoothing factor is introduced in the denominators to prevent possible division by zero. ###Code def class_wise_metrics(y_true, y_pred): ''' Computes the class-wise IOU and Dice Score. Args: y_true (tensor) - ground truth label maps y_pred (tensor) - predicted label maps ''' class_wise_iou = [] class_wise_dice_score = [] smoothing_factor = 0.00001 for i in range(n_classes): intersection = np.sum((y_pred == i) * (y_true == i)) y_true_area = np.sum((y_true == i)) y_pred_area = np.sum((y_pred == i)) combined_area = y_true_area + y_pred_area iou = (intersection) / (combined_area - intersection + smoothing_factor) class_wise_iou.append(iou) dice_score = 2 * ((intersection) / (combined_area + smoothing_factor)) class_wise_dice_score.append(dice_score) return class_wise_iou, class_wise_dice_score ###Output _____no_output_____ ###Markdown Visualize Predictions ###Code # place a number here between 0 to 191 to pick an image from the test set integer_slider = 105 ds = test_dataset.unbatch() ds = ds.batch(200) images = [] y_true_segments = [] for image, annotation in ds.take(2): y_true_segments = annotation images = image iou, dice_score = class_wise_metrics(np.argmax(y_true_segments[integer_slider], axis=2), results[integer_slider]) show_annotation_and_prediction(image[integer_slider], annotation[integer_slider], results[integer_slider], iou, dice_score) ###Output _____no_output_____ ###Markdown Compute IOU Score and Dice Score of your model ###Code cls_wise_iou, cls_wise_dice_score = class_wise_metrics(np.argmax(y_true_segments, axis=3), results) average_iou = 0.0 for idx, (iou, dice_score) in enumerate(zip(cls_wise_iou[:-1], cls_wise_dice_score[:-1])): print("Digit {}: IOU: {} Dice Score: {}".format(idx, iou, dice_score)) average_iou += iou grade = average_iou * 10 print("\nGrade is " + str(grade)) PASSING_GRADE = 60 if (grade>PASSING_GRADE): print("You passed!") else: print("You failed. Please check your model and re-train") ###Output _____no_output_____ ###Markdown Save the ModelOnce you're satisfied with the results, you will need to save your model so you can upload it to the grader in the Coursera classroom. After running the cell below, please look for `student_model.h5` in the File Explorer on the left and download it. Then go back to the Coursera classroom and upload it to the Lab item that points to the autograder of Week 3. ###Code model.save("model.h5") # You can also use this cell as a shortcut for downloading your model from google.colab import files files.download("model.h5") ###Output _____no_output_____
Moringa_Data_Science_Prep_W3_Independent_Project_2021_09_Lawrence_Ondieki_Python_Notebook.ipynb
###Markdown ###Code ###Output _____no_output_____ ###Markdown MTN Cote d'Ivoire infrastructure upgrade strategy ###Code ###Output _____no_output_____ ###Markdown OverviewHow will MTN Cote d'Ivoire go about the upgrade of its infrastructure strategy? Import Numpy and Pandas library ###Code # Numpy and Pandas import numpy as np import pandas as pd ###Output _____no_output_____ ###Markdown Cells DescriptionCell data structure is in an excel file cell_geo_description.xlsx.We import the file into a Pandas DataFrame to understand the data structure. ###Code with pd.ExcelFile("cells_geo_description.xlsx") as xls: cdesc_df = pd.read_excel(xls, "Sheet1") cdesc_df ###Output _____no_output_____ ###Markdown CELLS are hosted in SITES that are located in CITIES IN a REGION of Cote de'Ivore Import Cell details into a dataframe Cells ###Code # Loading our dataframe from the CSV file with open('cells_geo.csv','r') as f: Cells = pd.read_csv(f,sep=';',index_col=0, encoding='utf-8') Cells ###Output _____no_output_____ ###Markdown Call Data Record Description (data structure) ###Code with pd.ExcelFile("CDR_description.xlsx") as xls: cdr_desc_df = pd.read_excel(xls, "Sheet1") cdr_desc_df ###Output _____no_output_____ ###Markdown Call Data Records(CDR) - Analysis CDR for first day from late 2012-05-06 night to earli 2012-05-07 morning ###Code #import data from csv file and make a dataframe. with open('Telcom_dataset.csv','r') as f: ds1_df = pd.read_csv(f, index_col=False, encoding='utf-8') ds1_df # Most used site in late night 2012-05-06 to early morning 2012-05-07 most_used_site=(ds1_df.groupby(['SITE_ID']).sum().loc[lambda df: df['VALUE'] > 0]).sort_values('VALUE',ascending=False) most_df=pd.DataFrame(most_used_site) most_df.head() Cells.set_index('SITE_CODE') city_df=pd.merge(Cells,most_df,left_on='SITE_CODE', right_on='SITE_ID') most_used_city=(city_df.groupby(['VILLES']).sum().loc[lambda df: df['VALUE'] > 0]).sort_values('VALUE',ascending=False) most_used_city.head() ds1_df.loc[(pd.to_datetime(ds1_df['DATETIME']) > pd.Timestamp('2012-05-07 00:00')) & (pd.to_datetime(ds1_df['DATETIME']) < pd.Timestamp('2012-05-07 17:00'))] ds1_df['SITE_ID'] ###Output _____no_output_____ ###Markdown CDR 20120508 ###Code with open('Telcom_dataset2.csv','r') as f: ds2_df = pd.read_csv(f, index_col=0, encoding='utf-8') ds2_df # Most used site in late night 2012-05-07 to early morning 2012-05-08 most_used_site2=(ds2_df.groupby(['SITE_ID']).sum().loc[lambda df: df['VALUE'] > 0]).sort_values('VALUE',ascending=False) most_df2=pd.DataFrame(most_used_site2) most_df2.head() city_df2=pd.merge(Cells,most_df,left_on='SITE_CODE', right_on='SITE_ID') most_used_city2=(city_df2.groupby(['VILLES']).sum().loc[lambda df: df['VALUE'] > 0]).sort_values('VALUE',ascending=False) most_used_city2.head() ds2_df.loc[(pd.to_datetime(ds2_df['DATE_TIME']) > pd.Timestamp('2012-05-08 00:00')) & (pd.to_datetime(ds2_df['DATE_TIME']) < pd.Timestamp('2012-05-08 17:00'))] ###Output _____no_output_____ ###Markdown CDR 20120509 ###Code with open('Telcom_dataset3.csv','r') as f: ds3_df = pd.read_csv(f, index_col=0, encoding='utf-8') ds3_df # Most used site in late night 2012-05-08 to early morning 2012-05-09 most_used_site3=(ds3_df.groupby(['SIET_ID']).sum().loc[lambda df: df['VALUE'] > 0]).sort_values('VALUE',ascending=False) most_df3=pd.DataFrame(most_used_site3) most_df3.head() city_df3=pd.merge(Cells,most_df3,left_on='SITE_CODE', right_on='SIET_ID') most_used_city3=(city_df3.groupby(['VILLES']).sum().loc[lambda df: df['VALUE'] > 0]).sort_values('VALUE',ascending=False) most_used_city3.head() ds3_df.loc[(pd.to_datetime(ds3_df['DATE_TIME']) > pd.Timestamp('2012-05-09 00:00')) & (pd.to_datetime(ds3_df['DATE_TIME']) < pd.Timestamp('2012-05-09 17:00'))] ###Output _____no_output_____
Notebooks/3. Model | Cosine Distance | Single Variable Keyword Extraction Method.ipynb
###Markdown Name: Deepak Vadithala Course: MSc Data Science Project Name: MOOC Recommender System Notes:This notebook contains the analysis of the **Cosine Similarity** model. Mutiple variables **(Role and Skill Scores)** are used to predict the course category. **Role and skill** are combined together. Removed noise from the course description field and kept only the keywords which are important.Skill Score is calculated using the similarity between the skills from LinkedIn compared with the course description with keywords from Coursera.*Model Source Code Path: /mooc-recommender/Model/Cosine_Distance.py**Github Repo: https://github.com/iamdv/mooc-recommender* ###Code # **************************** IMPORTANT **************************** ''' This cell configuration settings for the Notebook. You can run one role at a time to evaluate the performance of the model Change the variable names to run for multiple roles In this model: 1. cosine distance is calculated between the skills and the course description with the weight of 70%. And each skill has a weighted score based on the popularity of the skill. This is derived by endorsements of the respective skill by other linkedin connections. 2. cosine distance is calcuated between the role and the course name with with the weight of 30%. ''' # ******************************************************************* # For each role a list of category names are grouped. # Please don't change these variables label_DataScientist = ['Data Science','Data Analysis','Data Mining','Data Visualization'] label_SoftwareDevelopment = ['Software Development','Computer Science', 'Programming Languages', 'Algorithms and Data Structures', 'Information Technology'] label_DatabaseAdministrator = ['Databases'] label_Cybersecurity = ['Cybersecurity'] label_FinancialAccountant = ['Finance', 'Accounting'] label_MachineLearning = ['Machine Learning', 'Deep Learning'] label_Musician = ['Music'] label_Dietitian = ['Nutrition & Wellness'] label_Psychologist = ['Psychology'] # ******************************************************************* # ******************************************************************* # Environment and Config Variables. Change these variables as required. my_fpath_courses = "../Data/main_coursera.csv" my_fpath_skills_DataScientist = "../Data/Cosine-Distance/Single-Variable/CosDist_DataScientist.csv" my_fpath_skills_SoftwareDevelopment = "../Data/Cosine-Distance/Single-Variable/CosDist_SoftwareDevelopment.csv" my_fpath_skills_DatabaseAdministrator = "../Data/Cosine-Distance/Single-Variable/CosDist_DatabaseAdministrator.csv" my_fpath_skills_Cybersecurity = "../Data/Cosine-Distance/Single-Variable/CosDist_Cybersecurity.csv" my_fpath_skills_FinancialAccountant = "../Data/Cosine-Distance/Single-Variable/CosDist_FinancialAccountant.csv" my_fpath_skills_MachineLearning = "../Data/Cosine-Distance/Single-Variable/CosDist_MachineLearning.csv" my_fpath_skills_Musician = "../Data/Cosine-Distance/Single-Variable/CosDist_Musician.csv" my_fpath_skills_Dietitian = "../Data/Cosine-Distance/Single-Variable/CosDist_Dietitian.csv" my_fpath_skills_Psychologist = "../Data/Cosine-Distance/Single-Variable/CosDist_Psychologist.csv" # ******************************************************************* # ******************************************************************* # Weighting Variables. Change them as per the requirement. my_role_weight = 0 my_skill_weight = 1 # ******************************************************************* # Importing required modules/packages import pandas as pd import numpy as np from matplotlib import pyplot as plt from sklearn.feature_extraction.text import TfidfVectorizer import nltk, string # Downloading the stopwords like i, me, and, is, the etc. nltk.download('stopwords') # Loading courses and skills data from the CSV files df_courses = pd.read_csv(my_fpath_courses) df_DataScientist = pd.read_csv(my_fpath_skills_DataScientist) df_DataScientist = df_DataScientist.drop('Role', 1) df_DataScientist.columns = ['Course Id', 'DataScientist_Skill_Score', 'DataScientist_Role_Score', 'DataScientist_Keyword_Score'] df_SoftwareDevelopment = pd.read_csv(my_fpath_skills_SoftwareDevelopment) df_SoftwareDevelopment = df_SoftwareDevelopment.drop('Role', 1) df_SoftwareDevelopment.columns = ['Course Id','SoftwareDevelopment_Skill_Score', 'SoftwareDevelopment_Role_Score', 'SoftwareDevelopment_Keyword_Score'] df_DatabaseAdministrator = pd.read_csv(my_fpath_skills_DatabaseAdministrator) df_DatabaseAdministrator = df_DatabaseAdministrator.drop('Role', 1) df_DatabaseAdministrator.columns = ['Course Id','DatabaseAdministrator_Skill_Score', 'DatabaseAdministrator_Role_Score', 'DatabaseAdministrator_Keyword_Score'] df_Cybersecurity = pd.read_csv(my_fpath_skills_Cybersecurity) df_Cybersecurity = df_Cybersecurity.drop('Role', 1) df_Cybersecurity.columns = ['Course Id','Cybersecurity_Skill_Score', 'Cybersecurity_Role_Score', 'Cybersecurity_Keyword_Score'] df_FinancialAccountant = pd.read_csv(my_fpath_skills_FinancialAccountant) df_FinancialAccountant = df_FinancialAccountant.drop('Role', 1) df_FinancialAccountant.columns = ['Course Id','FinancialAccountant_Skill_Score', 'FinancialAccountant_Role_Score', 'FinancialAccountant_Keyword_Score'] df_MachineLearning = pd.read_csv(my_fpath_skills_MachineLearning) df_MachineLearning = df_MachineLearning.drop('Role', 1) df_MachineLearning.columns = ['Course Id','MachineLearning_Skill_Score', 'MachineLearning_Role_Score', 'MachineLearning_Keyword_Score'] df_Musician = pd.read_csv(my_fpath_skills_Musician) df_Musician = df_Musician.drop('Role', 1) df_Musician.columns = ['Course Id','Musician_Skill_Score', 'Musician_Role_Score', 'Musician_Keyword_Score'] df_Dietitian = pd.read_csv(my_fpath_skills_Dietitian) df_Dietitian = df_Dietitian.drop('Role', 1) df_Dietitian.columns = ['Course Id','Dietitian_Skill_Score', 'Dietitian_Role_Score','Dietitian_Keyword_Score'] df_Psychologist = pd.read_csv(my_fpath_skills_Psychologist) df_Psychologist = df_Psychologist.drop('Role', 1) df_Psychologist.columns = ['Course Id','Psychologist_Skill_Score', 'Psychologist_Role_Score', 'Psychologist_Keyword_Score'] # Merging the csv files df_cosdist = df_DataScientist.merge(df_SoftwareDevelopment, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_DatabaseAdministrator, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_Cybersecurity, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_FinancialAccountant, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_MachineLearning, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_Musician, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_Dietitian, on = 'Course Id', how = 'outer') df_cosdist = df_cosdist.merge(df_Psychologist, on = 'Course Id', how = 'outer') # Exploring data dimensionality, feature names, and feature types. print(df_courses.shape,"\n") print(df_cosdist.shape,"\n") print(df_courses.columns, "\n") print(df_cosdist.shape,"\n") print(df_courses.describe(), "\n") print(df_cosdist.describe(), "\n") # Quick check to see if the dataframe showing the right results df_cosdist.head(20) # Joining two dataframes - Courses and the Cosein Similarity Results based on the 'Course Id' variable. # Inner joins: Joins two tables with the common rows. This is a set operateion. df_courses_score = df_courses.merge(df_cosdist, on ='Course Id', how='inner') print(df_courses_score.shape,"\n") # Tranforming and shaping the data to create the confusion matrix for the ROLE: DATA SCIENTIST y_actu_DataScientist = '' y_pred_DataScientist = '' df_courses_score['DataScientist_Final_Score'] = (df_courses_score['DataScientist_Role_Score'] * my_role_weight) + (df_courses_score['DataScientist_Keyword_Score'] * my_skill_weight) df_courses_score['DataScientist_Predict'] = (df_courses_score['DataScientist_Final_Score'] >= 0.5) df_courses_score['DataScientist_Label'] = df_courses_score.Category.isin(label_DataScientist) y_pred_DataScientist = pd.Series(df_courses_score['DataScientist_Predict'], name='Predicted') y_actu_DataScientist = pd.Series(df_courses_score['DataScientist_Label'], name='Actual') df_confusion_DataScientist = pd.crosstab(y_actu_DataScientist, y_pred_DataScientist , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: SOFTWARE ENGINEER/DEVELOPER y_actu_SoftwareDevelopment = '' y_pred_SoftwareDevelopment = '' df_courses_score['SoftwareDevelopment_Final_Score'] = (df_courses_score['SoftwareDevelopment_Role_Score'] * my_role_weight) + (df_courses_score['SoftwareDevelopment_Keyword_Score'] * my_skill_weight) df_courses_score['SoftwareDevelopment_Predict'] = (df_courses_score['SoftwareDevelopment_Final_Score'] >= 0.5) df_courses_score['SoftwareDevelopment_Label'] = df_courses_score.Category.isin(label_SoftwareDevelopment) y_pred_SoftwareDevelopment = pd.Series(df_courses_score['SoftwareDevelopment_Predict'], name='Predicted') y_actu_SoftwareDevelopment = pd.Series(df_courses_score['SoftwareDevelopment_Label'], name='Actual') df_confusion_SoftwareDevelopment = pd.crosstab(y_actu_SoftwareDevelopment, y_pred_SoftwareDevelopment , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: DATABASE DEVELOPER/ADMINISTRATOR y_actu_DatabaseAdministrator = '' y_pred_DatabaseAdministrator = '' df_courses_score['DatabaseAdministrator_Final_Score'] = (df_courses_score['DatabaseAdministrator_Role_Score'] * my_role_weight) + (df_courses_score['DatabaseAdministrator_Keyword_Score'] * my_skill_weight) df_courses_score['DatabaseAdministrator_Predict'] = (df_courses_score['DatabaseAdministrator_Final_Score'] >= 0.5) df_courses_score['DatabaseAdministrator_Label'] = df_courses_score.Category.isin(label_DatabaseAdministrator) y_pred_DatabaseAdministrator = pd.Series(df_courses_score['DatabaseAdministrator_Predict'], name='Predicted') y_actu_DatabaseAdministrator = pd.Series(df_courses_score['DatabaseAdministrator_Label'], name='Actual') df_confusion_DatabaseAdministrator = pd.crosstab(y_actu_DatabaseAdministrator, y_pred_DatabaseAdministrator , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: CYBERSECURITY CONSULTANT y_actu_Cybersecurity = '' y_pred_Cybersecurity = '' df_courses_score['Cybersecurity_Final_Score'] = (df_courses_score['Cybersecurity_Role_Score'] * my_role_weight) + (df_courses_score['Cybersecurity_Keyword_Score'] * my_skill_weight) df_courses_score['Cybersecurity_Predict'] = (df_courses_score['Cybersecurity_Final_Score'] >= 0.5) df_courses_score['Cybersecurity_Label'] = df_courses_score.Category.isin(label_Cybersecurity) y_pred_Cybersecurity = pd.Series(df_courses_score['Cybersecurity_Predict'], name='Predicted') y_actu_Cybersecurity = pd.Series(df_courses_score['Cybersecurity_Label'], name='Actual') df_confusion_Cybersecurity = pd.crosstab(y_actu_Cybersecurity, y_pred_Cybersecurity , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: FINANCIAL ACCOUNTANT y_actu_FinancialAccountant = '' y_pred_FinancialAccountant = '' df_courses_score['FinancialAccountant_Final_Score'] = (df_courses_score['FinancialAccountant_Role_Score'] * my_role_weight) + (df_courses_score['FinancialAccountant_Keyword_Score'] * my_skill_weight) df_courses_score['FinancialAccountant_Predict'] = (df_courses_score['FinancialAccountant_Final_Score'] >= 0.5) df_courses_score['FinancialAccountant_Label'] = df_courses_score.Category.isin(label_FinancialAccountant) y_pred_FinancialAccountant = pd.Series(df_courses_score['FinancialAccountant_Predict'], name='Predicted') y_actu_FinancialAccountant = pd.Series(df_courses_score['FinancialAccountant_Label'], name='Actual') df_confusion_FinancialAccountant = pd.crosstab(y_actu_FinancialAccountant, y_pred_FinancialAccountant , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: MACHINE LEARNING ENGINEER y_actu_MachineLearning = '' y_pred_MachineLearning = '' df_courses_score['MachineLearning_Final_Score'] = (df_courses_score['MachineLearning_Role_Score'] * my_role_weight) + (df_courses_score['MachineLearning_Keyword_Score'] * my_skill_weight) df_courses_score['MachineLearning_Predict'] = (df_courses_score['MachineLearning_Final_Score'] >= 0.5) df_courses_score['MachineLearning_Label'] = df_courses_score.Category.isin(label_MachineLearning) y_pred_MachineLearning = pd.Series(df_courses_score['MachineLearning_Predict'], name='Predicted') y_actu_MachineLearning = pd.Series(df_courses_score['MachineLearning_Label'], name='Actual') df_confusion_MachineLearning = pd.crosstab(y_actu_MachineLearning, y_pred_MachineLearning , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: MUSICIAN y_actu_Musician = '' y_pred_Musician = '' df_courses_score['Musician_Final_Score'] = (df_courses_score['Musician_Role_Score'] * my_role_weight) + (df_courses_score['Musician_Keyword_Score'] * my_skill_weight) df_courses_score['Musician_Predict'] = (df_courses_score['Musician_Final_Score'] >= 0.5) df_courses_score['Musician_Label'] = df_courses_score.Category.isin(label_Musician) y_pred_Musician = pd.Series(df_courses_score['Musician_Predict'], name='Predicted') y_actu_Musician = pd.Series(df_courses_score['Musician_Label'], name='Actual') df_confusion_Musician = pd.crosstab(y_actu_Musician, y_pred_Musician , rownames=['Actual'], colnames=['Predicted'], margins=False) # Tranforming and shaping the data to create the confusion matrix for the ROLE: NUTRITIONIST/DIETITIAN y_actu_Dietitian = '' y_pred_Dietitian = '' df_courses_score['Dietitian_Final_Score'] = (df_courses_score['Dietitian_Role_Score'] * my_role_weight) + (df_courses_score['Dietitian_Keyword_Score'] * my_skill_weight) df_courses_score['Dietitian_Predict'] = (df_courses_score['Dietitian_Final_Score'] >= 0.5) df_courses_score['Dietitian_Label'] = df_courses_score.Category.isin(label_Dietitian) y_pred_Dietitian = pd.Series(df_courses_score['Dietitian_Predict'], name='Predicted') y_actu_Dietitian = pd.Series(df_courses_score['Dietitian_Label'], name='Actual') df_confusion_Dietitian = pd.crosstab(y_actu_Dietitian, y_pred_Dietitian , rownames=['Actual'], colnames=['Predicted'], margins=False) df_confusion_DataScientist df_confusion_SoftwareDevelopment df_confusion_DatabaseAdministrator df_confusion_Cybersecurity df_confusion_FinancialAccountant df_confusion_MachineLearning df_confusion_Musician df_confusion_Dietitian # Performance summary for the ROLE: DATA SCIENTIST try: tn_DataScientist = df_confusion_DataScientist.iloc[0][False] except: tn_DataScientist = 0 try: tp_DataScientist = df_confusion_DataScientist.iloc[1][True] except: tp_DataScientist = 0 try: fn_DataScientist = df_confusion_DataScientist.iloc[1][False] except: fn_DataScientist = 0 try: fp_DataScientist = df_confusion_DataScientist.iloc[0][True] except: fp_DataScientist = 0 total_count_DataScientist = tn_DataScientist + tp_DataScientist + fn_DataScientist + fp_DataScientist print('Data Scientist Accuracy Rate : ', '{0:.2f}'.format((tn_DataScientist + tp_DataScientist) / total_count_DataScientist * 100)) print('Data Scientist Misclassifcation Rate : ', '{0:.2f}'.format((fn_DataScientist + fp_DataScientist) / total_count_DataScientist * 100)) print('Data Scientist True Positive Rate : ', '{0:.2f}'.format(tp_DataScientist / (tp_DataScientist + fn_DataScientist) * 100)) print('Data Scientist False Positive Rate : ', '{0:.2f}'.format(fp_DataScientist / (tn_DataScientist + fp_DataScientist) * 100)) # Performance summary for the ROLE: SOFTWARE ENGINEER try: tn_SoftwareDevelopment = df_confusion_SoftwareDevelopment.iloc[0][False] except: tn_SoftwareDevelopment = 0 try: tp_SoftwareDevelopment = df_confusion_SoftwareDevelopment.iloc[1][True] except: tp_SoftwareDevelopment = 0 try: fn_SoftwareDevelopment = df_confusion_SoftwareDevelopment.iloc[1][False] except: fn_SoftwareDevelopment = 0 try: fp_SoftwareDevelopment = df_confusion_SoftwareDevelopment.iloc[0][True] except: fp_SoftwareDevelopment = 0 total_count_SoftwareDevelopment = tn_SoftwareDevelopment + tp_SoftwareDevelopment + fn_SoftwareDevelopment + fp_SoftwareDevelopment print('Software Engineer Accuracy Rate : ', '{0:.2f}'.format((tn_SoftwareDevelopment + tp_SoftwareDevelopment) / total_count_SoftwareDevelopment * 100)) print('Software Engineer Misclassifcation Rate : ', '{0:.2f}'.format((fn_SoftwareDevelopment + fp_SoftwareDevelopment) / total_count_SoftwareDevelopment * 100)) print('Software Engineer True Positive Rate : ', '{0:.2f}'.format(tp_SoftwareDevelopment / (tp_SoftwareDevelopment + fn_SoftwareDevelopment) * 100)) print('Software Engineer False Positive Rate : ', '{0:.2f}'.format(fp_SoftwareDevelopment / (tn_SoftwareDevelopment + fp_SoftwareDevelopment) * 100)) # Performance summary for the ROLE: DATABASE DEVELOPER/ ADMINISTRATOR try: tn_DatabaseAdministrator = df_confusion_DatabaseAdministrator.iloc[0][False] except: tn_DatabaseAdministrator = 0 try: tp_DatabaseAdministrator = df_confusion_DatabaseAdministrator.iloc[1][True] except: tp_DatabaseAdministrator = 0 try: fn_DatabaseAdministrator = df_confusion_DatabaseAdministrator.iloc[1][False] except: fn_DatabaseAdministrator = 0 try: fp_DatabaseAdministrator = df_confusion_DatabaseAdministrator.iloc[0][True] except: fp_DatabaseAdministrator = 0 total_count_DatabaseAdministrator = tn_DatabaseAdministrator + tp_DatabaseAdministrator + fn_DatabaseAdministrator + fp_DatabaseAdministrator print('Database Administrator Accuracy Rate : ', '{0:.2f}'.format((tn_DatabaseAdministrator + tp_DatabaseAdministrator) / total_count_DatabaseAdministrator * 100)) print('Database Administrator Misclassifcation Rate : ', '{0:.2f}'.format((fn_DatabaseAdministrator + fp_DatabaseAdministrator) / total_count_DatabaseAdministrator * 100)) print('Database Administrator True Positive Rate : ', '{0:.2f}'.format(tp_DatabaseAdministrator / (tp_DatabaseAdministrator + fn_DatabaseAdministrator) * 100)) print('Database Administrator False Positive Rate : ', '{0:.2f}'.format(fp_DatabaseAdministrator / (tn_DatabaseAdministrator + fp_DatabaseAdministrator) * 100)) # Performance summary for the ROLE: CYBERSECURITY CONSULTANT try: tn_Cybersecurity = df_confusion_Cybersecurity.iloc[0][False] except: tn_Cybersecurity = 0 try: tp_Cybersecurity = df_confusion_Cybersecurity.iloc[1][True] except: tp_Cybersecurity = 0 try: fn_Cybersecurity = df_confusion_Cybersecurity.iloc[1][False] except: fn_Cybersecurity = 0 try: fp_Cybersecurity = df_confusion_Cybersecurity.iloc[0][True] except: fp_Cybersecurity = 0 total_count_Cybersecurity = tn_Cybersecurity + tp_Cybersecurity + fn_Cybersecurity + fp_Cybersecurity print('Cybersecurity Consultant Accuracy Rate : ', '{0:.2f}'.format((tn_Cybersecurity + tp_Cybersecurity) / total_count_Cybersecurity * 100)) print('Cybersecurity Consultant Misclassifcation Rate : ', '{0:.2f}'.format((fn_Cybersecurity + fp_Cybersecurity) / total_count_Cybersecurity * 100)) print('Cybersecurity Consultant True Positive Rate : ', '{0:.2f}'.format(tp_Cybersecurity / (tp_Cybersecurity + fn_Cybersecurity) * 100)) print('Cybersecurity Consultant False Positive Rate : ', '{0:.2f}'.format(fp_Cybersecurity / (tn_Cybersecurity + fp_Cybersecurity) * 100)) # Performance summary for the ROLE: FINANCIAL ACCOUNTANT try: tn_FinancialAccountant = df_confusion_FinancialAccountant.iloc[0][False] except: tn_FinancialAccountant = 0 try: tp_FinancialAccountant = df_confusion_FinancialAccountant.iloc[1][True] except: tp_FinancialAccountant = 0 try: fn_FinancialAccountant = df_confusion_FinancialAccountant.iloc[1][False] except: fn_FinancialAccountant = 0 try: fp_FinancialAccountant = df_confusion_FinancialAccountant.iloc[0][True] except: fp_FinancialAccountant = 0 total_count_FinancialAccountant = tn_FinancialAccountant + tp_FinancialAccountant + fn_FinancialAccountant + fp_FinancialAccountant print('Financial Accountant Consultant Accuracy Rate : ', '{0:.2f}'.format((tn_FinancialAccountant + tp_FinancialAccountant) / total_count_FinancialAccountant * 100)) print('Financial Accountant Consultant Misclassifcation Rate : ', '{0:.2f}'.format((fn_FinancialAccountant + fp_FinancialAccountant) / total_count_FinancialAccountant * 100)) print('Financial Accountant Consultant True Positive Rate : ', '{0:.2f}'.format(tp_FinancialAccountant / (tp_FinancialAccountant + fn_FinancialAccountant) * 100)) print('Financial Accountant Consultant False Positive Rate : ', '{0:.2f}'.format(fp_FinancialAccountant / (tn_FinancialAccountant + fp_FinancialAccountant) * 100)) # Performance summary for the ROLE: MACHINE LEARNING ENGINEER try: tn_MachineLearning = df_confusion_MachineLearning.iloc[0][False] except: tn_MachineLearning = 0 try: tp_MachineLearning = df_confusion_MachineLearning.iloc[1][True] except: tp_MachineLearning = 0 try: fn_MachineLearning = df_confusion_MachineLearning.iloc[1][False] except: fn_MachineLearning = 0 try: fp_MachineLearning = df_confusion_MachineLearning.iloc[0][True] except: fp_MachineLearning = 0 total_count_MachineLearning = tn_MachineLearning + tp_MachineLearning + fn_MachineLearning + fp_MachineLearning print('Machine Learning Engineer Accuracy Rate : ', '{0:.2f}'.format((tn_MachineLearning + tp_MachineLearning) / total_count_MachineLearning * 100)) print('Machine Learning Engineer Misclassifcation Rate : ', '{0:.2f}'.format((fn_MachineLearning + fp_MachineLearning) / total_count_MachineLearning * 100)) print('Machine Learning Engineer True Positive Rate : ', '{0:.2f}'.format(tp_MachineLearning / (tp_MachineLearning + fn_MachineLearning) * 100)) print('Machine Learning Engineer False Positive Rate : ', '{0:.2f}'.format(fp_MachineLearning / (tn_MachineLearning + fp_MachineLearning) * 100)) # Performance summary for the ROLE: MUSICIAN try: tn_Musician = df_confusion_Musician.iloc[0][False] except: tn_Musician = 0 try: tp_Musician = df_confusion_Musician.iloc[1][True] except: tp_Musician = 0 try: fn_Musician = df_confusion_Musician.iloc[1][False] except: fn_Musician = 0 try: fp_Musician = df_confusion_Musician.iloc[0][True] except: fp_Musician = 0 total_count_Musician = tn_Musician + tp_Musician + fn_Musician + fp_Musician print('Musician Accuracy Rate : ', '{0:.2f}'.format((tn_Musician + tp_Musician) / total_count_Musician * 100)) print('Musician Misclassifcation Rate : ', '{0:.2f}'.format((fn_Musician + fp_Musician) / total_count_Musician * 100)) print('Musician True Positive Rate : ', '{0:.2f}'.format(tp_Musician / (tp_Musician + fn_Musician) * 100)) print('Musician False Positive Rate : ', '{0:.2f}'.format(fp_Musician / (tn_Musician + fp_Musician) * 100)) # Performance summary for the ROLE: DIETITIAN try: tn_Dietitian = df_confusion_Dietitian.iloc[0][False] except: tn_Dietitian = 0 try: tp_Dietitian = df_confusion_Dietitian.iloc[1][True] except: tp_Dietitian = 0 try: fn_Dietitian = df_confusion_Dietitian.iloc[1][False] except: fn_Dietitian = 0 try: fp_Dietitian = df_confusion_Dietitian.iloc[0][True] except: fp_Dietitian = 0 total_count_Dietitian = tn_Dietitian + tp_Dietitian + fn_Dietitian + fp_Dietitian print('Dietitian Accuracy Rate : ', '{0:.2f}'.format((tn_Dietitian + tp_Dietitian) / total_count_Dietitian * 100)) print('Dietitian Misclassifcation Rate : ', '{0:.2f}'.format((fn_Dietitian + fp_Dietitian) / total_count_Dietitian * 100)) print('Dietitian True Positive Rate : ', '{0:.2f}'.format(tp_Dietitian / (tp_Dietitian + fn_Dietitian) * 100)) print('Dietitian False Positive Rate : ', '{0:.2f}'.format(fp_Dietitian / (tn_Dietitian + fp_Dietitian) * 100)) ###Output Dietitian Accuracy Rate : 47.72 Dietitian Misclassifcation Rate : 52.28 Dietitian True Positive Rate : 100.00 Dietitian False Positive Rate : 52.90
FF3 test.ipynb
###Markdown factor순서는 ticker순서임. 따로 real value뽑아줘야함. ###Code start8=0 start9=0 end9=0 real = np.array([]) cnt = ind_tv for i in tqdm(range(12)): test_ind = dates[cnt:min(cnt+12, length)] temp = factors[factors.date.apply(lambda x: x in test_ind)].ret.values if i < 8: start8+= len(temp) if i ==8: start9 = start8+len(temp) if i==9: end9 = start9+len(temp) real = np.append(real, temp) cnt += 12 ###Output 100%|██████████| 12/12 [00:22<00:00, 1.86s/it] ###Markdown R_squared ###Code res = pd.DataFrame(index = periods) for i in model_ml: temp = MLrs[MLrs.model == i].r_squared.rename(i) temp.index = periods res = pd.concat([res, temp], axis=1) for j in model_nn: temp = NNrs[NNrs.model == j].r_squared.rename(j) temp.index = periods res = pd.concat([res, temp], axis=1) # res.to_csv("[Results]R_sqared_time varies.csv") def ros(predicted): predicted = np.append(predicted[:start9], predicted[end9:]) real2 = np.append(real[:start9], real[end9:]) ros = 1- ((predicted - real2 )**2).mean()/(real2 **2).mean() return ros for i in names: temp = preds[i].values print(ros(temp)) factors.columns factors.describe() factors.corr() ###Output _____no_output_____ ###Markdown complexity ###Code def comp(name): res = MLrs[MLrs.model == name] return res.complexity fig = plt.figure(figsize =(12,9), dpi = 200) # plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.2) ax1 = fig.add_subplot(3,2,1) ax1.set(title='Elastic Net', ylabel='Featrues selected' ,xlabel = 'periods') ax1.plot(periods, comp('en')) ax2 = fig.add_subplot(3,2,3) ax2.set(title='PLS', ylabel='Number of components' ,xlabel = 'periods') ax2.plot(periods, comp('pls')) ax3 = fig.add_subplot(3,2,4) ax3.set(title='PCR', ylabel='Number of components' ,xlabel = 'periods') ax3.plot(periods, comp('pcr')) ax4 = fig.add_subplot(3,2,5) ax4.set(title='Random Forest', ylabel='Maximum Depth' ,xlabel = 'periods') ax4.plot(periods, comp('rf')) ax5 = fig.add_subplot(3,2,6) ax5.set(title='Gradient Boost Regression', ylabel='max features' ,xlabel = 'periods') ax5.plot(periods, comp('gbr')) fig.tight_layout() fig.show() ###Output _____no_output_____ ###Markdown DM test ###Code len(preds), len(real) names = preds.columns def dm_test(actual_lst, pred1_lst, pred2_lst, h = 1, crit="MSE", power = 2): # Routine for checking errors def error_check(): rt = 0 msg = "" # Check if h is an integer if (not isinstance(h, int)): rt = -1 msg = "The type of the number of steps ahead (h) is not an integer." return (rt,msg) # Check the range of h if (h < 1): rt = -1 msg = "The number of steps ahead (h) is not large enough." return (rt,msg) len_act = len(actual_lst) len_p1 = len(pred1_lst) len_p2 = len(pred2_lst) # Check if lengths of actual values and predicted values are equal if (len_act != len_p1 or len_p1 != len_p2 or len_act != len_p2): rt = -1 msg = "Lengths of actual_lst, pred1_lst and pred2_lst do not match." return (rt,msg) # Check range of h if (h >= len_act): rt = -1 msg = "The number of steps ahead is too large." return (rt,msg) # Check if criterion supported if (crit != "MSE" and crit != "MAPE" and crit != "MAD" and crit != "poly"): rt = -1 msg = "The criterion is not supported." return (rt,msg) # Check if every value of the input lists are numerical values from re import compile as re_compile comp = re_compile("^\d+?\.\d+?$") def compiled_regex(s): """ Returns True is string is a number. """ if comp.match(s) is None: return s.isdigit() return True for actual, pred1, pred2 in zip(actual_lst, pred1_lst, pred2_lst): is_actual_ok = compiled_regex(str(abs(actual))) is_pred1_ok = compiled_regex(str(abs(pred1))) is_pred2_ok = compiled_regex(str(abs(pred2))) if (not (is_actual_ok and is_pred1_ok and is_pred2_ok)): msg = "An element in the actual_lst, pred1_lst or pred2_lst is not numeric." rt = -1 return (rt,msg) return (rt,msg) from scipy.stats import t import collections import pandas as pd import numpy as np # Initialise lists e1_lst = [] e2_lst = [] d_lst = [] # convert every value of the lists into real values actual_lst = pd.Series(actual_lst).apply(lambda x: float(x)).tolist() pred1_lst = pd.Series(pred1_lst).apply(lambda x: float(x)).tolist() pred2_lst = pd.Series(pred2_lst).apply(lambda x: float(x)).tolist() # Length of lists (as real numbers) T = float(len(actual_lst)) # construct d according to crit if (crit == "MSE"): for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst): e1_lst.append((actual - p1)**2) e2_lst.append((actual - p2)**2) for e1, e2 in zip(e1_lst, e2_lst): d_lst.append(e1 - e2) elif (crit == "MAD"): for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst): e1_lst.append(abs(actual - p1)) e2_lst.append(abs(actual - p2)) for e1, e2 in zip(e1_lst, e2_lst): d_lst.append(e1 - e2) elif (crit == "MAPE"): for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst): e1_lst.append(abs((actual - p1)/actual)) e2_lst.append(abs((actual - p2)/actual)) for e1, e2 in zip(e1_lst, e2_lst): d_lst.append(e1 - e2) elif (crit == "poly"): for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst): e1_lst.append(((actual - p1))**(power)) e2_lst.append(((actual - p2))**(power)) for e1, e2 in zip(e1_lst, e2_lst): d_lst.append(e1 - e2) # Mean of d mean_d = pd.Series(d_lst).mean() # Find autocovariance and construct DM test statistics def autocovariance(Xi, N, k, Xs): autoCov = 0 T = float(N) for i in np.arange(0, N-k): autoCov += ((Xi[i+k])-Xs)*(Xi[i]-Xs) return (1/(T))*autoCov gamma = [] for lag in range(0,h): gamma.append(autocovariance(d_lst,len(d_lst),lag,mean_d)) # 0, 1, 2 V_d = (gamma[0] + 2*sum(gamma[1:]))/T DM_stat=V_d**(-0.5)*mean_d harvey_adj=((T+1-2*h+h*(h-1)/T)/T)**(0.5) DM_stat = harvey_adj*DM_stat # Find p-value p_value = 2*t.cdf(-abs(DM_stat), df = T - 1) # Construct named tuple for return dm_return = collections.namedtuple('dm_return', 'DM p_value') rt = dm_return(DM = DM_stat, p_value = p_value) return rt dm_res = pd.DataFrame(columns = names, index= names) p_res = pd.DataFrame(columns = names, index= names) for i in range(len(names)): name1 = names[i] pred1 = preds[name1] for j in range(i+1, len(names)): name2 = names[j] pred2 = preds[name2] dts = dm_test(pool_test2, pred1.values, pred2.values, h=12) dm_res.loc[name1, name2] = dts[0] p_res.loc[name1, name2] = dts[1] dm_res.to_csv("[results]dm_stat2.csv") p_res.to_csv("[results]p_value2.csv") ###Output _____no_output_____ ###Markdown 포트폴리오 만들어서 분석 ###Code date_order = np.array([]) ticker_order = np.array([]) name_order = np.array([]) returns = np.array([]) beta = np.array([]) cnt = ind_tv for i in tqdm(range(12)): test_ind = dates[cnt:min(cnt+12, length)] temp = factors[factors.date.apply(lambda x: x in test_ind)] date_order = np.append(date_order, temp.date.values) ticker_order = np.append(ticker_order, temp.ticker.values) name_order = np.append(name_order, temp.name.values) returns = np.append(returns, temp.ret.values) beta = np.append(beta, temp.beta.values) cnt += 12 real_date2 = pd.DataFrame({'date':date_order, 'ticker':ticker_order, 'name':name_order, 'ret':returns, 'beta':beta}) real_date2 = pd.merge(real_date2, prices[['date','ticker','ret','smb*hml']], how='left', on =['date','ticker']) len(real_date) non = real_date[real_date['smb*hml'].isna()] ind_non = non.index def find_miss(dateticker): return real_date2[(real_date2.ticker == dateticker.ticker ) & (real_date2.date == dateticker.date)]['smb*hml'].values[0] real_date.loc[ind_non, 'smb*hml'] = non.apply(find_miss, axis=1) real_date.to_csv('[result]portfolio_file.csv', index=False) ports = pd.concat([real_date, preds], axis =1) ports ross = pd.DataFrame(columns = names) ress = [] for i in range(1,7): stock = ports[ports['smb*hml']==i] r = [rs(stock.ret, stock[name]) for name in names] ress.append(r) pd.DataFrame(ress, columns = names).to_csv('FF3_portfolios.csv') names def rs(pre, re): ros = 1- ((pre - re )**2).mean()/(re **2).mean() return ros r2 = [rs(real_date.ret, preds[name]) for name in names] r2 d_temp = ports.date.unique() ports = pd.concat([ports, nn3], axis=1) names = np.append(np.array(names), ['net6']) names rnk= pd.DataFrame() for i in names: size_rnk = ports[i].rank(pct=True) ports[i+'bin'] = pd.cut(size_rnk, bins=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], include_lowest=True, labels=[1,2,3,4,5,6,7,8,9,10]).astype('int') mlport = pd.DataFrame() for i in names: temp = [] for j in range(1,11): po = ports[ports[i+'bin']==j] po_pred = po[i].mean() po_avg = po.ret.mean() po_std = po.ret.std() po_sr = po_avg/po_std temp.append([po_pred, po_avg, po_std, po_sr]) temp2 = pd.DataFrame(temp,columns = [i+'_pred',i+'_avg',i+'_std',i+'_SR']) mlport = pd.concat([mlport, temp2], axis=1) mlport.to_csv('ml_portfolios.csv') mlport2_short = pd.DataFrame() for i in names: temp = [] po1 = ports[ports[i+'bin']==1][['ret',i]] po2 = ports[ports[i+'bin']==10][['ret',i]] po = pd.concat([-po1,po2]) po_pred = po[i].mean() po_avg = po.ret.mean() po_std = po.ret.std() po_sr = po_avg/po_std temp.append([po_pred, po_avg, po_std, po_sr]) temp2 = pd.DataFrame(temp,columns = [i+'_pred',i+'_avg',i+'_std',i+'_SR']) mlport2_short = pd.concat([mlport2_short, temp2], axis=1) mlport2_short.to_csv('ml_ports_short.csv') po.std() nn2= pd.read_csv("NN_test_res_rolling0_11.csv") nn3= pd.read_csv("NN_test_pred_net6.csv") ros(nn3) res1 temp = [] size_rnk = nn3['net6'].rank(pct=True) nn3['net6'+'bin'] = pd.cut(size_rnk, bins=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], include_lowest=True, labels=[1,2,3,4,5,6,7,8,9,10]).astype('int') for j in range(1,11): po = nn3[nn3['net6'+'bin']==j] po_pred = po['net6'].mean() po_avg = po.ret.mean() po_std = po.ret.std() po_sr = po_avg/po_std temp.append([po_pred, po_avg, po_std, po_sr]) temp2 = pd.DataFrame(temp,columns = [i+'_pred',i+'_avg',i+'_std',i+'_SR']) nn3 mlport ###Output _____no_output_____
Section3_VariableCharacteristics/03.3_RareLabels.ipynb
###Markdown Rare Labels Labels that occur rarelyCategorical variables are those which values are selected from a group of categories, also called labels. Different labels appear in the dataset with different frequencies. Some categories appear a lot in the dataset, whereas some other categories appear only in a few number of observations.For example, in dataset with information about loan applicants where one of the variables is "city" where the applicant lives, cities like 'New York' may appear a lot in the data because New York has a huge population, whereas smaller towns like 'Leavenworth' will appear only on a few occasions (if at all, population < 2000 people), because the population there is very small. A borrower is more likely to live in New York, because far more people live in New York.In fact, categorical variables often contain a few dominant labels that account for the majority of the observations and a large number of labels that appear only seldomly. Are Rare Labels in a categorical variable a problem?Rare values can add a lot of information or none at all. For example, consider a stockholder meeting where each person can vote in proportion to their number of shares. One of the shareholders owns 50% of the stock, and the other 999 shareholders own the remaining 50%. The outcome of the vote is largely influenced by the shareholder who holds the majority of the stock. The remaining shareholders may have an impact collectively, but they have almost no impact individually. The same occurs in real life datasets. The label that is over-represented in the dataset tends to dominate the outcome, and those that are under-represented may have no impact individually, but could have an impact if considered collectively.More specifically,- Rare values in categorical variables tend to cause over-fitting, particularly in tree based methods.- A big number of infrequent labels adds noise, with little information, therefore causing over-fitting.- Rare labels may be present in training set, but not in test set, therefore causing over-fitting to the train set.- Rare labels may appear in the test set, and not in the train set. Thus, the machine learning model will not know how to evaluate it. **Note** Sometimes rare values, are indeed important. For example, if we are building a model to predict fraudulent loan applications, which are by nature rare, then a rare value in a certain variable, may be indeed very predictive. This rare value could be telling us that the observation is most likely a fraudulent application, and therefore we would choose not to ignore it. Real Life example: The Mercedes-Benz Greener Manufacturing challenge in KaggleDaimler’s Mercedes-Benz cars are leaders in the premium car industry. With a huge selection of car features and options, customers can choose the customized Mercedes-Benz of their dreams.To ensure the safety and reliability of each and every unique car configuration before they hit the road, Daimler’s engineers have developed a robust testing system. Testing time varies depending on the different car features and options. The task is to predict the time it takes for a car with certain features to pass the testing. Therefore it is a regression problem: we need to estimate a continuous variable.The dataset contains a set of car features, the variable names are masked, so it is impossible to find out what each one of them means. The variable to predict is _y_: time to pass the testing. ====================================================================================================To download the Mercedes-Benz Greener Manufacturing dataset from Kaggle go ahead to this [website](https://www.kaggle.com/c/mercedes-benz-greener-manufacturing/data)Scroll down to the bottom of the page, and click on the link 'train.csv.zip', and then click the 'download' blue button towards the right of the screen, to download the dataset.Unzip it, and save it to a directory of your choice.**Note that you need to be logged in to Kaggle in order to download the datasets**.==================================================================================================== In this Demo:We will:- Learn to identify rare labels in a dataset- Understand how difficult it is to derive reliable information from them.- Visualise the uneven distribution of rare labels between train and test sets ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt # to separate data intro train and test sets from sklearn.model_selection import train_test_split # let's load the dataset with the variables # we need for this demo # 'X1', 'X2', 'X3', 'X6' are the categorical # variables in this dataset # "y" is the target: time to pass the quality tests data = pd.read_csv('../mercedesbenz.csv', usecols=['X1', 'X2', 'X3', 'X6', 'y']) data.head() ###Output _____no_output_____ ###Markdown Variable names and variable values are masked intentionally by Mercedes Benz. This is common practice, it is done to protect intellectual property and / or personal information. ###Code # let's look at the different number of labels # in each variable (cardinality) cols_to_use = ['X1', 'X2', 'X3', 'X6'] for col in cols_to_use: print('variable: ', col, ' number of labels: ', len(data[col].unique())) print('total cars: ', len(data)) ###Output variable: X1 number of labels: 27 variable: X2 number of labels: 44 variable: X3 number of labels: 7 variable: X6 number of labels: 12 total cars: 4209 ###Markdown X1 shows 27 different values, X2 shows 44 different categories, and so on. ###Code # let's plot how frequently each label # appears in the dataset # in other words, the percentage of cars that # show each label total_cars = len(data) # for each categorical variable for col in cols_to_use: # count the number of cars per category # and divide by total cars # aka percentage of cars per category temp_df = pd.Series(data[col].value_counts() / total_cars) # make plot with the above percentages fig = temp_df.sort_values(ascending=False).plot.bar() fig.set_xlabel(col) # add a line at 5 % fig.axhline(y=0.05, color='red') fig.set_ylabel('percentage of cars') plt.show() ###Output _____no_output_____ ###Markdown For each of the above categorical variables, some labels appear in more than 10% of the cars and many appear in less than 10% or even 5% of the cars. These are infrequent labels or **Rare Values** and could cause over-fitting. How is the target, "time to pass testing", related to these categories?In the following cells, I would like to understand the mean time to pass the test that present on average, the cars that display the different categories. Keep on reading, it will become clearer. ###Code # the following function calculates: # 1) the percentage of cars per category # 2) the mean time to pass testing per category def calculate_perc_and_passtime(df, var): # total number of cars total_cars = len(df) # percentage of cars per category temp_df = pd.Series(df[var].value_counts() / total_cars).reset_index() temp_df.columns = [var, 'perc_cars'] # add the mean to pass testing time # the target in this dataset is called 'y' temp_df = temp_df.merge(df.groupby([var])['y'].mean().reset_index(), on=var, how='left') return temp_df # now we use the function for the variable 'X3' temp_df = calculate_perc_and_passtime(data, 'X3') temp_df ###Output _____no_output_____ ###Markdown The above dataframe contains the percentage of cars that show each one of the labels in X3, and the mean time to pass the testing for those cars. In other words, ~46% of cars in the dataset show the label c for X3, and a mean time to pass the test of 101 seconds. ###Code # Now I create a function to plot of the # label frequency and mean time to pass testing. # This will help us visualise the relationship between the # target and the labels def plot_categories(df, var): fig, ax = plt.subplots(figsize=(8, 4)) plt.xticks(df.index, df[var], rotation=0) ax2 = ax.twinx() ax.bar(df.index, df["perc_cars"], color='lightgrey') ax2.plot(df.index, df["y"], color='green', label='Seconds') ax.axhline(y=0.05, color='red') ax.set_ylabel('percentage of cars per category') ax.set_xlabel(var) ax2.set_ylabel('Time to pass testing, in seconds') plt.show() plot_categories(temp_df, 'X3') ###Output _____no_output_____ ###Markdown Cars where X3 is f pass the test quicker, whereas cars with the category d take a longer time to pass the test.Cars where X3 is b, take around 100 seconds to pass the test. However, b is present in less than 5% of the cars. Why is this important? Because if we do not have a lot of cars to learn from, we could be under or over-estimating the effect if b on the time to pass a test.In other words, how confident am I to generalise that cars with the label b for X3 take around 100 seconds to pass the test, when I only have a few cars to learn from? ###Code # let's plot the remaining categorical variables for col in cols_to_use: if col !='X3': # re using the functions I created temp_df = calculate_perc_and_passtime(data, col) plot_categories(temp_df, col) ###Output _____no_output_____ ###Markdown Let's look at variable X2: Most of the categories in X2 are present n less than 5% of cars. In addition, "y" varies a lot across those rare categories. The mean value of y goes up and down over the infrequent categories. In fact, it looks quite noisy. This rare labels could indeed be very predictive, or they could be introducing noise rather than information. And because the labels are under-represented, we can't be sure whether they have a true impact on the time to pass the test. We could be under or over-estimating their impact due to the fact that we have information for few cars.**Note:** This plot would bring more value, if we plotted the errors of the mean time to pass the test. it would give us an idea of how much the mean value of the target varies within each label. Why don't you go ahead and add the standard deviation to the plot? Rare labels: grouping under a new labelOne common way of working with rare or infrequent values, is to group them under an umbrella category called 'Rare' or 'Other'. In this way, we are able to understand the "collective" affect of the infrequent labels on the target.See below. ###Code # I will replace all the labels that appear in less than 10% # of the cars by the label 'rare' def group_rare_labels(df, var): total_cars = len(df) # first I calculate the 10% of cars for each category temp_df = pd.Series(df[var].value_counts() / total_cars) # now I create a dictionary to replace the rare labels with the # string 'rare' grouping_dict = { k: ('rare' if k not in temp_df[temp_df >= 0.1].index else k) for k in temp_df.index } # now I replace the rare categories tmp = df[var].map(grouping_dict) return tmp # group rare labels in X1 data['X1_grouped'] = group_rare_labels(data, 'X1') data[['X1', 'X1_grouped']].head(10) # let's plot X1 with the grouped categories # re-using the functions I created above temp_df = calculate_perc_and_passtime(data, 'X1_grouped') plot_categories(temp_df, 'X1_grouped') ###Output _____no_output_____ ###Markdown "Rare" now contains the overall influence of all the infrequent categories on the time to pass the test. ###Code # let's plot the original X1 for comparison temp_df = calculate_perc_and_passtime(data, 'X1') plot_categories(temp_df, 'X1') ###Output _____no_output_____ ###Markdown Only 4 categories of X1 are relatively common across the different cars. The remaining are now gouped into 'rare' which captures the average time to pass the test for all the infrequent labels. ###Code # let's group and plot the remaining categorical variables for col in cols_to_use[1:]: # re using the functions I created data[col+'_grouped'] = group_rare_labels(data, col) temp_df = calculate_perc_and_passtime(data, col+'_grouped') plot_categories(temp_df, col+'_grouped') ###Output _____no_output_____ ###Markdown Here we can see for example, that cars with the the label f for variable X3, tend to spend less time in testing, and all the infrequent labels together tend to behave overall like the features c and a, in terms of time to pass the test.**Note:** Ideally, we would also like to have the standard deviation / interquantile range for the time to pass the test, to get an idea of how variable the time to pass is for each category. Rare labels lead to uneven distribution of categories in train and test setsSimilarly to highly cardinal variables, rare or infrequent labels often land only on the training set, or only on the testing set. If present only in the training set, they may lead to over-fitting. If present only on the testing set, the machine learning algorithm will not know how to handle them, as they have not seen the rare labels during training. Let's explore this further. ###Code # let's separate into training and testing set X_train, X_test, y_train, y_test = train_test_split(data[cols_to_use], data.y, test_size=0.3, random_state=0) X_train.shape, X_test.shape # Let's find labels present only in the training set # I will use X2 as example unique_to_train_set = [ x for x in X_train['X2'].unique() if x not in X_test['X2'].unique() ] print(unique_to_train_set) ###Output ['l', 'aa', 'au', 'o', 'af', 'c', 'am'] ###Markdown There are 7 categories present in the train set and are not present in the test set. ###Code # Let's find labels present only in the test set unique_to_test_set = [ x for x in X_test['X2'].unique() if x not in X_train['X2'].unique() ] print(unique_to_test_set) ###Output ['ar', 'j']
pytorch_ipynb/mechanics/transferlearning-vgg16.ipynb
###Markdown Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.- Author: Sebastian Raschka- GitHub Repository: https://github.com/rasbt/deeplearning-models Model Zoo -- Transfer Learning Example (VGG-16) ###Code %load_ext watermark %watermark -a 'Sebastian Raschka' -v -p torch,torchvision import torch import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets from torchvision import transforms from torch.utils.data import DataLoader ####################################### ### PRE-TRAINED MODELS AVAILABLE HERE ## https://pytorch.org/docs/stable/torchvision/models.html from torchvision import models ####################################### if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True ###Output _____no_output_____ ###Markdown Loading an Example Dataset In this example, we are going to work with CIFAR-10, because you are familiar with it and it is easier (smaller) than ImageNet. However, note that in a "real-world application", images with dimension > 224x224 are recommended. Here, we resize the images as a workaround- Note that due to the average pooling in the final layer, it is also possible to feed in 32x32-pixel images directly. However, I noticed that the performance is rather low (~65% test accuracy after 10 and 100 epochs). ###Code ########################## ### SETTINGS ########################## # Device DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print('Device:', DEVICE) NUM_CLASSES = 10 # Hyperparameters random_seed = 1 learning_rate = 0.0001 num_epochs = 10 batch_size = 128 ########################## ### MNIST DATASET ########################## custom_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) ## Note that this particular normalization scheme is ## necessary since it was used for pre-training ## the network on ImageNet. ## These are the channel-means and standard deviations ## for z-score normalization. train_dataset = datasets.CIFAR10(root='data', train=True, transform=custom_transform, download=True) test_dataset = datasets.CIFAR10(root='data', train=False, transform=custom_transform) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, num_workers=8, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, num_workers=8, shuffle=False) # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break ###Output Device: cuda:0 Files already downloaded and verified Image batch dimensions: torch.Size([128, 3, 224, 224]) Image label dimensions: torch.Size([128]) ###Markdown Loading the Pre-Trained Model ###Code model = models.vgg16(pretrained=True) model ###Output _____no_output_____ ###Markdown Freezing the Model ###Code for param in model.parameters(): param.requires_grad = False ###Output _____no_output_____ ###Markdown Assume we want to train the penultimate layer: ###Code model.classifier[3].requires_grad = True ###Output _____no_output_____ ###Markdown Now, replace the output layer with your own output layer (here, we actually add two more output layers): ###Code model.classifier[6] = nn.Sequential( nn.Linear(4096, 512), nn.ReLU(), nn.Dropout(0.5), nn.Linear(512, NUM_CLASSES)) ###Output _____no_output_____ ###Markdown Training (as usual) ###Code model = model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters()) def compute_accuracy(model, data_loader): model.eval() correct_pred, num_examples = 0, 0 for i, (features, targets) in enumerate(data_loader): features = features.to(DEVICE) targets = targets.to(DEVICE) logits = model(features) _, predicted_labels = torch.max(logits, 1) num_examples += targets.size(0) correct_pred += (predicted_labels == targets).sum() return correct_pred.float()/num_examples * 100 def compute_epoch_loss(model, data_loader): model.eval() curr_loss, num_examples = 0., 0 with torch.no_grad(): for features, targets in data_loader: features = features.to(DEVICE) targets = targets.to(DEVICE) logits = model(features) loss = F.cross_entropy(logits, targets, reduction='sum') num_examples += targets.size(0) curr_loss += loss curr_loss = curr_loss / num_examples return curr_loss start_time = time.time() for epoch in range(num_epochs): model.train() for batch_idx, (features, targets) in enumerate(train_loader): features = features.to(DEVICE) targets = targets.to(DEVICE) ### FORWARD AND BACK PROP logits = model(features) cost = F.cross_entropy(logits, targets) optimizer.zero_grad() cost.backward() ### UPDATE MODEL PARAMETERS optimizer.step() ### LOGGING if not batch_idx % 50: print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f' %(epoch+1, num_epochs, batch_idx, len(train_loader), cost)) model.eval() with torch.set_grad_enabled(False): # save memory during inference print('Epoch: %03d/%03d | Train: %.3f%% | Loss: %.3f' % ( epoch+1, num_epochs, compute_accuracy(model, train_loader), compute_epoch_loss(model, train_loader))) print('Time elapsed: %.2f min' % ((time.time() - start_time)/60)) print('Total Training Time: %.2f min' % ((time.time() - start_time)/60)) with torch.set_grad_enabled(False): # save memory during inference print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader))) %matplotlib inline import matplotlib.pyplot as plt classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') for batch_idx, (features, targets) in enumerate(test_loader): features = features targets = targets break logits = model(features.to(DEVICE)) _, predicted_labels = torch.max(logits, 1) def unnormalize(tensor, mean, std): for t, m, s in zip(tensor, mean, std): t.mul_(s).add_(m) return tensor n_images = 10 fig, axes = plt.subplots(nrows=1, ncols=n_images, sharex=True, sharey=True, figsize=(20, 2.5)) orig_images = features[:n_images] for i in range(n_images): curr_img = orig_images[i].detach().to(torch.device('cpu')) curr_img = unnormalize(curr_img, torch.tensor([0.485, 0.456, 0.406]), torch.tensor([0.229, 0.224, 0.225])) curr_img = curr_img.permute((1, 2, 0)) axes[i].imshow(curr_img) axes[i].set_title(classes[predicted_labels[i]]) ###Output _____no_output_____
notebooks/benchmark/Benchmark.ipynb
###Markdown Load the [StructuralVariantAnnotation](https://bioconductor.org/packages/devel/bioc/vignettes/StructuralVariantAnnotation/inst/doc/vignettes.html) package ###Code suppressPackageStartupMessages(require(StructuralVariantAnnotation)) suppressPackageStartupMessages(require(dplyr)) suppressPackageStartupMessages(require(ggplot2)) ###Output _____no_output_____ ###Markdown [SV type inference](https://github.com/PapenfussLab/gridss/blob/7b1fedfed32af9e03ed5c6863d368a821a4c699f/example/simple-event-annotation.RL9) ###Code infer_svtype <- function(gr) { gr$svtype <- ifelse( seqnames(gr) != seqnames(partner(gr)), "TRA", # Using TRA instead of ITX or BP ifelse( gr$insLen >= abs(gr$svLen) * 0.7, "INS", ifelse( strand(gr) == strand(partner(gr)), "INV", ifelse(xor( start(gr) < start(partner(gr)), strand(gr) == "-" ), "DEL", "DUP") ) ) ) return(gr) } load_bed <- function(bed_file) { bed_regions <- rtracklayer::import(bed_file) # set NCBI seqlevels seqlevelsStyle(bed_regions) <- "NCBI" return(bed_regions) } load_bedpe <- function(bedpe_file, filter_regions) { bedpe_gr <- pairs2breakpointgr(rtracklayer::import(bedpe_file)) bedpe_gr <- filter_regions(bedpe_gr, load_bed(filter_regions), mode='remove') return(bedpe_gr) } load_vcf <- function(vcf_file, svtype, caller, filter_regions) { # Load VCF file vcf_gr <- VariantAnnotation::readVcf(vcf_file) # set NCBI seqlevels seqlevelsStyle(vcf_gr) <- 'NCBI' if(caller=='survivor') { # update info header info(header(vcf_gr)) <- rbind(info(header(vcf_gr)), data.frame(Number = '4', Type = 'String', Description = 'DELLY CT')) # update info info(vcf_gr) <- cbind(info(vcf_gr), data.frame(CT=factor(rep('3to5',nrow(info(vcf_gr))), levels=c('5to5', '3to3', '3to5', '5to3')))) # TRA idx <- which(info(vcf_gr)$SVTYPE=='TRA') info(vcf_gr)$CT[idx[seq(1, length(idx), by=2)]] <- '3to3' info(vcf_gr)$CT[idx[seq(2, length(idx), by=2)]] <- '5to5' # INV idx <- which(info(vcf_gr)[['SVTYPE']]=='INV') info(vcf_gr)$CT[idx[seq(1, length(idx), by=2)]] <- '3to5' info(vcf_gr)$CT[idx[seq(2, length(idx), by=2)]] <- '5to3' # other SVTYPEs idx <- which(!info(vcf_gr)[['SVTYPE']]%in%c('INV','TRA')) info(vcf_gr)$CT[idx] <- '3to5' # SURVIVOR simSV assigns LowQual to all artificial SVs vcf_gr <- vcf_gr[rowRanges(vcf_gr)$FILTER%in%c("LowQual")] if(svtype == 'INS') { info(vcf_gr)$END <- end(ranges(rowRanges(vcf_gr))) } }else{ # Keep only SVs that passed the filtering (PASS or .) vcf_gr <- vcf_gr[rowRanges(vcf_gr)$FILTER%in%c("PASS",".")] } if (caller == 'lumpy') { # Read evidence support as a proxy for QUAL support <- unlist(info(vcf_gr)$SU) fixed(vcf_gr)$QUAL <- support } else if (caller == 'delly') { # Split-read support plus Paired-end read support as a proxy for QUAL sr_support <- info(vcf_gr)$SR sr_support[is.na(vcf_gr)] <- 0 fixed(vcf_gr)$QUAL <- sr_support + info(vcf_gr)$PE } vcf_gr <- breakpointRanges(vcf_gr) vcf_gr <- infer_svtype(vcf_gr) # Select only one SV type vcf_gr <- vcf_gr[which(vcf_gr$svtype == svtype)] # Select SVs >= 50 bp if(!svtype %in% c('TRA','INS')) { vcf_gr <- vcf_gr[abs(vcf_gr$svLen) >= 50] } #Filter regions vcf_gr <- filter_regions(vcf_gr, load_bed(filter_regions), mode='remove') return(vcf_gr) } filter_regions <- function(regions_to_filter, ref_regions, mode='remove') { print(length(regions_to_filter)) if (mode == 'keep') { result <- regions_to_filter[overlapsAny(regions_to_filter, ref_regions) & overlapsAny(partner(regions_to_filter), ref_regions), ] } else if (mode == 'remove'){ result <- regions_to_filter[!( overlapsAny(regions_to_filter, ref_regions) | overlapsAny(partner(regions_to_filter), ref_regions) ), ] } print(length(result)) return(result) } ###Output _____no_output_____ ###Markdown Load the truth set ###Code truth_set <- load_vcf(truth_set_file, svtype, 'survivor', regions_for_filtering) # truth_set <- truth_set[truth_set$sourceId==svtype] length(truth_set) ###Output Warning message: “info fields with no header: CHR2,END,SVLEN,SVMETHOD,SVTYPE,AF” Warning message: “info fields with no header: CHR2,END,SVLEN,SVMETHOD,SVTYPE,AF,CT” Warning message: “info fields with no header: CHR2,END,SVLEN,SVMETHOD,SVTYPE,AF,CT” Warning message: “info fields with no header: CHR2,END,SVLEN,SVMETHOD,SVTYPE,AF,CT” Warning message: “info fields with no header: CHR2,END,SVLEN,SVMETHOD,SVTYPE,AF,CT” Warning message: “info fields with no header: CHR2,END,SVLEN,SVMETHOD,SVTYPE,AF,CT” Warning message: “info fields with no header: CHR2,END,SVLEN,SVMETHOD,SVTYPE,AF,CT” Warning message in .breakpointRanges(x, ...): “Found 1000 duplicate row names (duplicates renamed).” ###Markdown Load the SV callsets ###Code sv_regions <- list() sv_regions[['CNN']] <- load_bedpe(callsets[['CNN']], regions_for_filtering) sv_regions[['GRIDSS']] <- load_vcf(callsets[['GRIDSS']], svtype, 'gridss', regions_for_filtering) sv_regions[['Manta']] <- load_vcf(callsets[['Manta']], svtype, 'manta', regions_for_filtering) sv_regions[['Lumpy']] <- load_vcf(callsets[['Lumpy']], svtype, 'lumpy', regions_for_filtering) sv_regions[['DELLY']] <- load_vcf(callsets[['DELLY']], svtype, 'delly', regions_for_filtering) # exclude callsets with zero calls sv_regions <- sv_regions[sapply(sv_regions, length)!=0] # rename NA. column of CNN into svLen names(mcols(sv_regions[['CNN']]))[names(mcols(sv_regions[['CNN']]))=='NA.'] <- 'svLen' ###Output _____no_output_____ ###Markdown Add SV caller name ###Code for (c in names(sv_regions)) { sv_regions[[c]]$caller <- c } seqlengths(truth_set) <- c("12" = 2000000, "22" = 2000000) for (c in names(sv_regions)) { seqlengths(sv_regions[[c]]) <- c("12" = 2000000, "22" = 2000000) } hits <- findBreakpointOverlaps( sv_regions[['CNN']], sv_regions[['CNN']], # read pair based callers make imprecise calls. # A margin around the call position is required when matching with the truth set maxgap = 100, # Since we added a maxgap, we also need to restrict the mismatch between the # size of the events. We don't want to match a 100bp deletion with a # 5bp duplication. This will happen if we have a 100bp margin but don't also # require an approximate size match as well sizemargin = 0.25, ignore.strand = TRUE, # We also don't want to match a 20bp deletion with a 20bp deletion 80bp away # by restricting the margin based on the size of the event, we can make sure # that simple events actually do overlap restrictMarginToSizeMultiple = 0.5 ) data <- as.data.frame(hits) getfirst <- function(x){x[1]} get_best_qual <- function(x){x[which.max(sv_regions[['CNN']]$QUAL[x])]} aggdata <- aggregate(data$queryHits, by=list(data$subjectHits), FUN=get_best_qual) sv_regions[['CNN']] <- sv_regions[['CNN']][unique(aggdata$x)] sv_regions[['CNN']] <- sv_regions[['CNN']][sv_regions[['CNN']]$partner %in% names(sv_regions[['CNN']])] ###Output _____no_output_____ ###Markdown Compute overlap ###Code for (c in names(sv_regions)) { if(svtype == 'INS'){ sv_regions[[c]]$truth_matches <- countBreakpointOverlaps( sv_regions[[c]], truth_set, # using a smaller margin for insertions, insertion location should be precise maxgap = 5, # sizemargin cannot be used for insertions # sizemargin = 0.25, ignore.strand = TRUE, restrictMarginToSizeMultiple = 0.5, # countOnlyBest cannot be used for insertions # countOnlyBest = TRUE ) }else{ sv_regions[[c]]$truth_matches <- countBreakpointOverlaps( sv_regions[[c]], truth_set, # read pair based callers make imprecise calls. # A margin around the call position is required when matching with the truth set maxgap = 200, # Since we added a maxgap, we also need to restrict the mismatch between the # size of the events. We don't want to match a 100bp deletion with a # 5bp duplication. This will happen if we have a 100bp margin but don't also # require an approximate size match as well sizemargin = 0.25, ignore.strand = TRUE, # We also don't want to match a 20bp deletion with a 20bp deletion 80bp away # by restricting the margin based on the size of the event, we can make sure # that simple events actually do overlap restrictMarginToSizeMultiple = 0.5, # Some callers make duplicate calls and will sometimes report a variant multiple # times with slightly different bounds. countOnlyBest prevents these being # double-counted as multiple true positives. countOnlyBest = TRUE ) } } sv_regions_unlisted <- unlist(GRangesList(sv_regions)) ###Output _____no_output_____ ###Markdown Plotting Precision and Recall as in [StructuralVariantAnnotation vignette](https://bioconductor.org/packages/devel/bioc/vignettes/StructuralVariantAnnotation/inst/doc/vignettes.html) ###Code main.title <- paste("SURVIVOR test data", mode, svtype, sep='-') names(main.title) <- c(sample) ggplot( as.data.frame(sv_regions_unlisted) %>% dplyr::select(QUAL, caller, truth_matches) %>% dplyr::group_by(caller, QUAL) %>% dplyr::summarise(calls = n(), tp = sum(truth_matches > 0)) %>% dplyr::group_by(caller) %>% dplyr::arrange(dplyr::desc(QUAL)) %>% dplyr::mutate( cum_tp = cumsum(tp), cum_n = cumsum(calls), cum_fp = cum_n - cum_tp, precision = cum_tp / cum_n, recall = cum_tp / length(truth_set) ) ) + aes(x = recall, y = precision, colour = caller) + geom_point() + geom_line() + scale_y_continuous(labels = scales::percent) + scale_x_continuous( labels = scales::percent, sec.axis = sec_axis( ~ (.) * length(truth_set), name = "true positives") ) + labs(title = main.title[sample]) filename <- paste("SURVIVOR", mode, svtype, sep='_') ggsave(file=paste(filename, 'png', sep='.')) make_percent <- function(x){ signif(x*100,digits = 4) } ###Output _____no_output_____ ###Markdown Summarize results ###Code res.df <- as.data.frame(sv_regions_unlisted) %>% dplyr::select(caller, truth_matches) %>% dplyr::group_by(caller) %>% dplyr::summarise(calls = n(), TP = sum(truth_matches > 0)) %>% dplyr::group_by(caller) %>% dplyr::mutate( FP = calls - TP, precision = signif(TP / calls, digits = 4), recall = signif(TP / length(truth_set), digits = 4) ) res.df$F1_score = with(res.df, 2 * (precision * recall) / (precision + recall)) res.df$precision <- make_percent(res.df$precision) res.df$recall <- make_percent(res.df$recall) res.df$F1_score <- make_percent(res.df$F1_score) res.df write.csv(file=paste(filename, 'csv', sep='.'), res.df, quote=FALSE, row.names=FALSE) ###Output _____no_output_____
Identifying_Medical_Diagnoses.ipynb
###Markdown Read Images and Labels ###Code for dirname, _, filenames in os.walk('/content/drive/My Drive/Kaggle'): for filename in filenames: image = cv2.imread(os.path.join(dirname, filename)) image = cv2.resize(image, (img_rows, img_cols)) splitted_dir = dirname.split('/') image_dir = splitted_dir[-2] label = splitted_dir[-1] if image_dir == 'train': train_images.append(image) if label == 'NORMAL': train_labels.append(0) else: train_labels.append(1) elif image_dir == 'val': val_images.append(image) if label == 'NORMAL': val_labels.append(0) else: val_labels.append(1) else: test_images.append(image) if label == 'NORMAL': test_labels.append(1) else: test_labels.append(0) train_images = np.array(train_images) train_labels = np.array(train_labels) val_images = np.array(val_images) val_images = np.array(val_images) test_images = np.array(test_images) test_images = np.array(test_images) train_labels = to_categorical(train_labels, num_classes = 2) val_labels = to_categorical(val_labels, num_classes = 2) test_labels = to_categorical(test_labels, num_classes = 2) ###Output _____no_output_____ ###Markdown Data Normalization ###Code train_images = train_images / 255.0 ###Output _____no_output_____ ###Markdown Data Visualization ###Code f,ax = plt.subplots(3,3) f.subplots_adjust(0,0,2,2) for i in range(0,3,1): for j in range(0,3,1): rnd_number = randint(0,len(train_images)) ax[i,j].imshow(train_images[rnd_number]) ax[i,j].axis('off') ###Output _____no_output_____ ###Markdown Building CNN Architecture ###Code model = Sequential() model.add(Conv2D(filters = 64, kernel_size=(5,5), padding = 'Same', activation='relu', input_shape=(224,224,3))) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size=(5,5), activation='relu', input_shape=(224,224,3))) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters = 64, kernel_size=(5,5), activation='relu', input_shape=(224,224,3))) model.add(BatchNormalization()) model.add(Conv2D(filters = 16, kernel_size=(5,5), activation='relu', input_shape=(224,224,3))) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters = 16, kernel_size=(5,5), activation='relu', input_shape=(224,224,3))) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters = 32, kernel_size=(5,5), activation='relu', input_shape=(224,224,3))) model.add(MaxPool2D(pool_size=(2,2),strides=(1,1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 16, kernel_size=(5,5), padding = 'Same' ,activation='relu', input_shape=(224,224,3))) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters = 32, kernel_size=(5,5), padding = 'Same', activation='relu', input_shape=(224,224,3))) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu",kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))) model.add(Dropout(0.5)) model.add(Dense(2,activation='softmax',kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))) ###Output _____no_output_____ ###Markdown Summary of Architecture ###Code model.summary() ###Output Model: "sequential_19" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_123 (Conv2D) (None, 224, 224, 64) 4864 _________________________________________________________________ max_pooling2d_97 (MaxPooling (None, 112, 112, 64) 0 _________________________________________________________________ batch_normalization_50 (Batc (None, 112, 112, 64) 256 _________________________________________________________________ conv2d_124 (Conv2D) (None, 108, 108, 64) 102464 _________________________________________________________________ max_pooling2d_98 (MaxPooling (None, 54, 54, 64) 0 _________________________________________________________________ conv2d_125 (Conv2D) (None, 50, 50, 64) 102464 _________________________________________________________________ batch_normalization_51 (Batc (None, 50, 50, 64) 256 _________________________________________________________________ conv2d_126 (Conv2D) (None, 46, 46, 16) 25616 _________________________________________________________________ max_pooling2d_99 (MaxPooling (None, 23, 23, 16) 0 _________________________________________________________________ conv2d_127 (Conv2D) (None, 19, 19, 16) 6416 _________________________________________________________________ max_pooling2d_100 (MaxPoolin (None, 9, 9, 16) 0 _________________________________________________________________ conv2d_128 (Conv2D) (None, 5, 5, 32) 12832 _________________________________________________________________ max_pooling2d_101 (MaxPoolin (None, 4, 4, 32) 0 _________________________________________________________________ batch_normalization_52 (Batc (None, 4, 4, 32) 128 _________________________________________________________________ conv2d_129 (Conv2D) (None, 4, 4, 16) 12816 _________________________________________________________________ max_pooling2d_102 (MaxPoolin (None, 2, 2, 16) 0 _________________________________________________________________ conv2d_130 (Conv2D) (None, 2, 2, 32) 12832 _________________________________________________________________ max_pooling2d_103 (MaxPoolin (None, 1, 1, 32) 0 _________________________________________________________________ dropout_5 (Dropout) (None, 1, 1, 32) 0 _________________________________________________________________ flatten_4 (Flatten) (None, 32) 0 _________________________________________________________________ dense_9 (Dense) (None, 256) 8448 _________________________________________________________________ dropout_6 (Dropout) (None, 256) 0 _________________________________________________________________ dense_10 (Dense) (None, 2) 514 ================================================================= Total params: 289,906 Trainable params: 289,586 Non-trainable params: 320 _________________________________________________________________ ###Markdown Adam Optimizer with lr = 0.001 ###Code optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999) model.compile(optimizer = optimizer , loss = "binary_crossentropy", metrics=["accuracy"]) ###Output WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead. WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3657: The name tf.log is deprecated. Please use tf.math.log instead. WARNING:tensorflow:From /tensorflow-1.15.0/python3.6/tensorflow_core/python/ops/nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where ###Markdown Save the model if validation accuracy improved ###Code filepath="/content/drive/My Drive/Kaggle/epochs:{epoch:03d}-val_acc:{val_accuracy:.3f}.h5" checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max') earlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None, restore_best_weights=False) callbacks_list = [checkpoint,earlystopping] ###Output _____no_output_____ ###Markdown Fit the model ###Code model.fit(train_images,train_labels, batch_size=128, epochs = 5) model.save("IdentifyingMedicalDiagnosesCNN.h5") score = model.evaluate(val_images, val_labels, verbose=0) ###Output _____no_output_____
12/code.ipynb
###Markdown 12. Implementing a Multilayer Artificial Neural Network from Scratch ###Code %run -i 'watermark.py' ###Output 2020-01-02 12:07:22 ---------------------- python 3.6.7 ---------------------- numpy 1.16.2 scipy 1.1.0 pandas 0.25.1 matplotlib 3.1.1 imageio 2.5.0 ---------------------- ipython 7.8.0 ---------------------- sklearn 0.20.4 tensorflow 1.13.1 nltk 3.2.4 ###Markdown 12.01. Modeling complex functions with artificial neural entworks 12.01.01. Single-layer neural network recap 12.01.02. Introducing the multilayer neural network architecture 12.01.03. Activating a neural network via forward propagation ###Code def phi(z): return 1 / (1 + np.exp(-z)) z0, z1 = -8, 8 z = np.linspace(z0, z1, 100) fig, ax = plt.subplots(figsize=(5, 5), facecolor='w') ax.set_facecolor('w') ax.plot(z, phi(z), color=c['b'], label=r'$\phi (z) = \frac{1}{1 + e^{-z}}$') ax.grid(True) ax.legend(loc='lower right') ax.set(title='Sigmoid function', xlabel=r'$z$', ylabel=r'$\phi(z)$') ax.set_xlim(z0, z1) plt.tight_layout() # plt.savefig('fig/1204.png') plt.show() ###Output _____no_output_____ ###Markdown 12.02. Classifying handwritten digits 12.02.01 Obtaining and preparing the MNIST dataset ###Code """ import os import struct import numpy def load_mnist(path, kind='train'): """Load MNIST data drom 'path'""" labels_path = os.path.join(path, '%s-labels.idx1-ubyte' % kind) images_path = os.path.join(path, '%s-images.idx3-ubyte' % kind) with open(labels_path, 'rb') as lbpath: magic, n = struct.unpack('>II', lbpath.read(8)) labels = np.fromfile(lbpath, dtype=np.uint8) with open(images_path, 'rb') as imgpath: magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16)) images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784) images = ((images / 255.) - .5) * 2 return images, labels """ """ X_train, y_train = load_mnist('data', kind='train') print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1])) X_test, y_test = load_mnist('data', kind='t10k') print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1])) """ """ import matplotlib.pyplot as plt fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True, facecolor='w') ax = ax.flatten() for i in range(10): img = X_train[y_train == i][0].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.show() """ """ fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True, facecolor='w') ax = ax.flatten() for i in range(25): img = X_train[y_train == 7][i].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.show() """ """ import numpy as np np.savez_compressed('data/mnist_scaled.npz', X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test) """ mnist = np.load('data/mnist_scaled.npz') mnist.files X_train, y_train, X_test, y_test = [mnist[f] for f in mnist.files] ###Output _____no_output_____ ###Markdown 12.02.02. Implementing a multilayer perceptron ###Code import numpy as np import sys class NeuralNetMLP(object): """ Feedforward neural network / Multi-layer perceptron classifier. Parameters ------------ n_hidden : int (default: 30) Number of hidden units. l2 : float (default: 0.) Lambda value for L2-regularization. No regularization if l2=0. (default) epochs : int (default: 100) Number of passes over the training set. eta : float (default: 0.001) Learning rate. shuffle : bool (default: True) Shuffles training data every epoch if True to prevent circles. minibatch_size : int (default: 1) Number of training examples per minibatch. seed : int (default: None) Random seed for initializing weights and shuffling. Attributes ----------- eval_ : dict Dictionary collecting the cost, training accuracy, and validation accuracy for each epoch during training. """ def __init__(self, n_hidden=30, l2=0., epochs=100, eta=0.001, shuffle=True, minibatch_size=1, seed=None): self.random = np.random.RandomState(seed) self.n_hidden = n_hidden self.l2 = l2 self.epochs = epochs self.eta = eta self.shuffle = shuffle self.minibatch_size = minibatch_size def _onehot(self, y, n_classes): """Encode labels into one-hot representation Parameters ------------ y : array, shape = [n_examples] Target values. Returns ----------- onehot : array, shape = (n_examples, n_labels) """ onehot = np.zeros((n_classes, y.shape[0])) for idx, val in enumerate(y.astype(int)): onehot[val, idx] = 1. return onehot.T def _sigmoid(self, z): """Compute logistic function (sigmoid)""" return 1. / (1. + np.exp(-np.clip(z, -250, 250))) def _forward(self, X): """Compute forward propagation step""" # step 1: net input of hidden layer # [n_examples, n_features] dot [n_features, n_hidden] # -> [n_examples, n_hidden] z_h = np.dot(X, self.w_h) + self.b_h # step 2: activation of hidden layer a_h = self._sigmoid(z_h) # step 3: net input of output layer # [n_examples, n_hidden] dot [n_hidden, n_classlabels] # -> [n_examples, n_classlabels] z_out = np.dot(a_h, self.w_out) + self.b_out # step 4: activation output layer a_out = self._sigmoid(z_out) return z_h, a_h, z_out, a_out def _compute_cost(self, y_enc, output): """Compute cost function. Parameters ---------- y_enc : array, shape = (n_examples, n_labels) one-hot encoded class labels. output : array, shape = [n_examples, n_output_units] Activation of the output layer (forward propagation) Returns --------- cost : float Regularized cost """ L2_term = (self.l2 * (np.sum(self.w_h ** 2.) + np.sum(self.w_out ** 2.))) term1 = -y_enc * (np.log(output)) term2 = (1. - y_enc) * np.log(1. - output) cost = np.sum(term1 - term2) + L2_term # If you are applying this cost function to other # datasets where activation # values maybe become more extreme (closer to zero or 1) # you may encounter "ZeroDivisionError"s due to numerical # instabilities in Python & NumPy for the current implementation. # I.e., the code tries to evaluate log(0), which is undefined. # To address this issue, you could add a small constant to the # activation values that are passed to the log function. # # For example: # # term1 = -y_enc * (np.log(output + 1e-5)) # term2 = (1. - y_enc) * np.log(1. - output + 1e-5) return cost def predict(self, X): """Predict class labels Parameters ----------- X : array, shape = [n_examples, n_features] Input layer with original features. Returns: ---------- y_pred : array, shape = [n_examples] Predicted class labels. """ z_h, a_h, z_out, a_out = self._forward(X) y_pred = np.argmax(z_out, axis=1) return y_pred def fit(self, X_train, y_train, X_valid, y_valid): """ Learn weights from training data. Parameters ----------- X_train : array, shape = [n_examples, n_features] Input layer with original features. y_train : array, shape = [n_examples] Target class labels. X_valid : array, shape = [n_examples, n_features] Sample features for validation during training y_valid : array, shape = [n_examples] Sample labels for validation during training Returns: ---------- self """ n_output = np.unique(y_train).shape[0] # number of class labels n_features = X_train.shape[1] ######################## # Weight initialization ######################## # weights for input -> hidden self.b_h = np.zeros(self.n_hidden) self.w_h = self.random.normal(loc=0.0, scale=0.1, size=(n_features, self.n_hidden)) # weights for hidden -> output self.b_out = np.zeros(n_output) self.w_out = self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden, n_output)) epoch_strlen = len(str(self.epochs)) # for progress formatting self.eval_ = {'cost': [], 'train_acc': [], 'valid_acc': []} y_train_enc = self._onehot(y_train, n_output) # iterate over training epochs for i in range(self.epochs): # iterate over minibatches indices = np.arange(X_train.shape[0]) if self.shuffle: self.random.shuffle(indices) for start_idx in range(0, indices.shape[0] - self.minibatch_size + 1, self.minibatch_size): batch_idx = indices[start_idx:start_idx + self.minibatch_size] # forward propagation z_h, a_h, z_out, a_out = self._forward(X_train[batch_idx]) ################## # Backpropagation ################## # [n_examples, n_classlabels] delta_out = a_out - y_train_enc[batch_idx] # [n_examples, n_hidden] sigmoid_derivative_h = a_h * (1. - a_h) # [n_examples, n_classlabels] dot [n_classlabels, n_hidden] # -> [n_examples, n_hidden] delta_h = (np.dot(delta_out, self.w_out.T) * sigmoid_derivative_h) # [n_features, n_examples] dot [n_examples, n_hidden] # -> [n_features, n_hidden] grad_w_h = np.dot(X_train[batch_idx].T, delta_h) grad_b_h = np.sum(delta_h, axis=0) # [n_hidden, n_examples] dot [n_examples, n_classlabels] # -> [n_hidden, n_classlabels] grad_w_out = np.dot(a_h.T, delta_out) grad_b_out = np.sum(delta_out, axis=0) # Regularization and weight updates delta_w_h = (grad_w_h + self.l2*self.w_h) delta_b_h = grad_b_h # bias is not regularized self.w_h -= self.eta * delta_w_h self.b_h -= self.eta * delta_b_h delta_w_out = (grad_w_out + self.l2*self.w_out) delta_b_out = grad_b_out # bias is not regularized self.w_out -= self.eta * delta_w_out self.b_out -= self.eta * delta_b_out ############# # Evaluation ############# # Evaluation after each epoch during training z_h, a_h, z_out, a_out = self._forward(X_train) cost = self._compute_cost(y_enc=y_train_enc, output=a_out) y_train_pred = self.predict(X_train) y_valid_pred = self.predict(X_valid) train_acc = ((np.sum(y_train == y_train_pred)).astype(np.float) / X_train.shape[0]) valid_acc = ((np.sum(y_valid == y_valid_pred)).astype(np.float) / X_valid.shape[0]) sys.stderr.write('\r%0*d/%d | Cost: %.2f ' '| Train/Valid Acc.: %.2f%%/%.2f%% ' % (epoch_strlen, i+1, self.epochs, cost, train_acc*100, valid_acc*100)) sys.stderr.flush() self.eval_['cost'].append(cost) self.eval_['train_acc'].append(train_acc) self.eval_['valid_acc'].append(valid_acc) return self nn = NeuralNetMLP(n_hidden=100, l2=.01, epochs=200, eta=.0005, minibatch_size=100, shuffle=True, seed=1) nn.fit(X_train=X_train[:55000], y_train=y_train[:55000], X_valid=X_train[55000:], y_valid=y_train[55000:]) fig, ax = plt.subplots(facecolor='w') ax.plot(range(nn.epochs), nn.eval_['cost']) ax.set_ylabel('Cost') ax.set_xlabel('Epochs') ax.grid(True) plt.tight_layout() plt.show() fig, ax = plt.subplots(facecolor='w') ax.plot(range(nn.epochs), nn.eval_['train_acc'], label='training') ax.plot(range(nn.epochs), nn.eval_['valid_acc'], label='validation', linestyle='--') ax.set_ylabel('Accuracy') ax.set_xlabel('Epochs') ax.legend(loc='lower right') ax.grid(True) plt.tight_layout() plt.show() y_test_pred = nn.predict(X_test) acc = (np.sum(y_test == y_test_pred).astype(np.float) / X_test.shape[0]) print('Test accuracy: %.2f%%' % (acc * 100)) # l2 = .1 nn01 = NeuralNetMLP(n_hidden=100, l2=.1, epochs=200, eta=.0005, minibatch_size=100, shuffle=True, seed=1) nn01.fit(X_train=X_train[:55000], y_train=y_train[:55000], X_valid=X_train[55000:], y_valid=y_train[55000:]) fig, ax = plt.subplots(facecolor='w') ax.plot(range(nn01.epochs), nn01.eval_['train_acc'], label='training') ax.plot(range(nn01.epochs), nn01.eval_['valid_acc'], label='validation', linestyle='--') ax.set_ylabel('Accuracy') ax.set_xlabel('Epochs') ax.legend(loc='lower right') ax.grid(True) plt.tight_layout() plt.show() miscl_img = X_test[y_test != y_test_pred][:25] correct_lab = y_test[y_test != y_test_pred][:25] miscl_lab = y_test_pred[y_test != y_test_pred][:25] fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True, facecolor='w', figsize=(10, 10)) ax = ax.flatten() for i in range(25): img = miscl_img[i].reshape(28, 28) ax[i].imshow(img, cmap='Greys', interpolation='nearest') ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i])) ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.show() ###Output _____no_output_____
numeric-types/.ipynb_checkpoints/ex 2.4 hexadecimal output-checkpoint.ipynb
###Markdown **Hexadecimal output:** Exercise 2.4Hexadecimal numbers are fairly common in the world of computers. Actually, that’s not entirely true: some programmers use them all of the time. Other programmers, typically using high-level languages and doing things such as Web development, barely ever remember how to use them.Now, the fact is that I barely use hexadecimal numbers in my day-to-day work. And even if I were to need them, I could use Python’s built-in hex function and 0x prefix. The former takes an integer and returns a hex string; the latter allows me to enter a number using hexadecimal notation, which can be more convenient. Thus, 0x50 is 80, and hex(80) will return the string 0x50.For this exercise, you need to write a program that takes a hex number and returns the decimal equivalent. That is, if the user enters 50, then we will assume that it is a hex number (equal to 0x50), and will print the value 80 on the screen. ###Code conversion_table = { 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15 } def conver_hex_char_to_dec_num(hex_char): _hex_char = hex_char.upper() if '0' <= _hex_char <= '9': dec = int(_hex_char) elif 'A' <= _hex_char <= 'F': dec = conversion_table[_hex_char] else: raise ValueError("invalid hex char {0}".format(hex_char)) return dec def hex_to_dec(hex_str): result = None for hex_char in hex_str: dec_num = conver_hex_char_to_dec_num(hex_char) if result is None: result = dec_num else: result = result * 16 + dec_num return result print hex_to_dec('ff') dir('A') #'A'.__hash__() ###Output _____no_output_____
in-class/week-13-regex-part-3-actual-demo.ipynb
###Markdown Regular Expressions – Part 3 Capturing into DFPages we need to visit:- Live Testing: https://regex101.com/- Sandeep's REGEX Tip sheet Some dummy text: 12534 127 ab aba abba [email protected] abbba, abbbbba, (518) 469-4581 abcde.The dog is a not a hog. ABA ABBA ABBBA.Ab_CD123 123456 and 12456 tor 12531245134562. 123867584789. $40.44 or $3 or $52,583.08 or $610,235.11The cat sat down and called 514-957-3453 while the other caaaaaat purred. This cat is in California while this caaaat is in Iraq, but none are in ct. My dog prefers cat food to dog food but hates fish food.My food tastes yummy!AB_cd -.,!@ $%^&*();\/|_^@1 (917) 488-5410*!dsar2d1I told him to search the thesaurus where [email protected] he'd be able to [email protected] find words like them.abcdefgczhijklmnopqrstuvwxyz ABCDEFGCZHIJKLMNOPQRSTUVWXYZA h0g is a hog.Dog and dog and DOG. His number is 415.458.9163.&^%@ 129 ###Code ## place dummy text into a variable called mydoc mydoc = ''' 12534 127 ab aba abba [email protected] abbba, abbbbba, (518) 469-4581 abcde.The dog is a not a hog. ABA ABBA ABBBA. Ab_CD123 123456 and 12456 tor 12531245134562. 123867584789. $40.44 or $3 or $52,583.08 or $610,235.11 The cat sat down and called 514-957-3453 while the other caaaaaat purred. This cat is in California while this caaaat is in Iraq, but none are in ct. My dog prefers cat food to dog food but hates fish food.My food tastes yummy! AB_cd <+>-.,!@# $%^&*();\/|_^@1# (917) 488-5410 *!dsar2d1 I told him to search the thesaurus where [email protected] he'd be able to [email protected] find words like them. abcdefgczhijklmnopqrstuvwxyz ABCDEFGCZHIJKLMNOPQRSTUVWXYZ A h0g is a hog. Dog and dog and DOG. His number is 415.458.9163. &^%@ 129 abba ''' ## call mydoc mydoc ## import libraries import re import pandas as pd ###Output _____no_output_____ ###Markdown Is a value present? ###Code ## example list - run it mylist = [1, 2, 3, 4, 5, 6, 8, 7, 9] ## is the number 1 in the list? 1 in mylist ## is the number 12 in the list? 12 in mylist ### is "12534" in mydoc "12534" in mydoc ## is "x34" in mydoc "x34" in mydoc ###Output _____no_output_____ ###Markdown Let's actually use a regex pattern ###Code ## find all words and digits (excluding symbols) pat = re.compile("\w+") ## let's run it on our mydoc string re.search(pat, mydoc) ## put out the actual match ###Output _____no_output_____ ###Markdown ```findall```Returns a list of all found items``` result = your_pattern.findall(doc_to_search)``` ###Code ## use findall to find all matching patterns x = pat.findall(mydoc) x ## what object is it? type(x) ###Output _____no_output_____ ###Markdown Import document(s) to scan ###Code ## access external document ## store text into a variable with open("demo.txt", "r") as text_obj: text = text_obj.read() text ## print text print(text) ## call text text ## find all phone numbers tel_pat = re.compile("\(?\d{3}\)?[-\s\.]?\d{3}[-\s\.]?\d{4}") tel_numbers = tel_pat.findall(text) tel_numbers ## call our tel_pat to see someting interesting tel_pat ###Output _____no_output_____ ###Markdown Flags- ```re.IGNORECASE``` or ```re.I``` for ignore case- ```re.MULTILINE¶``` or ```re.M``` for multiline- ```re.DOTALL``` or ```re.S``` for period includes new lines- ```re.VERBOSE``` or ```re.X``` for breaking up the regexMore on flags. ###Code ## find all words at the beginning of a line pat1 = re.compile(r"^[a-z]+\b") ## print all matches pat1.findall(text) ## find all words at the beginning of a line ## ^ or $ need multiline flag pat2 = re.compile(r"^[a-z]+\b", re.M) ## print all matches pat2.findall(text) ## find all words at the beginning of a line regardless of case pat3 = re.compile(r"^[a-z]+\b", re.M | re.I) ## find all matches for pat3 pat3.findall(text) ###Output _____no_output_____ ###Markdown Capture Mock Table dataPlace animals, ID and color and place into dataframe ###Code ## call text text ## print it print(text) ## MATCH ALL animal data mock_pat = re.compile("^[a-z]+\s+\d+\s\w+$", re.MULTILINE) mock_t = mock_pat.findall(text) mock_t df = pd.DataFrame(mock_t) df ## Make it easier to read with VERBOSE mock_pat = re.compile("""(^[a-z]+) # capture animal name \s+ # exclude capture of space (\d+) # capture ID number \s # exclude capture of space (\w+)$ # capature animal color """, re.M | re.VERBOSE) mock_table = mock_pat.findall(text) mock_table ## type of object for item in mock_table: print(type(item)) ## export to df df1 = pd.DataFrame(mock_table) df1.columns = ["Animal", "ID", "Color"] df1 ###Output _____no_output_____ ###Markdown Capture Weapons dataPlace weapons dataframe ###Code ## pull in weapons.txt with open("weapons.txt", "r") as w: wtext = w.read() wtext print(wtext) ## regex to find pattern w_pat = re.compile("""(?P<weapon_type>^\D+) ## capture weapons type \s # exclude space (?P<y_2000>[,\d]+) # capture 2000 \s # exclude space (?P<y_2001>[,\d]+) # capture 2001 \s (?P<y_2002>[,\d]+) # capture 2002 \s (?P<y_2003>[,\d]+) # capture 2003 \s (?P<y_2004>[,\d]+) # capture 2004 \s (?P<totals>[,\w]+) #capture totals """, re.X | re.M | re.I) ## test with search re.search(w_pat, wtext) ## CAPTURE into weapons table w_table = re.findall(w_pat, wtext) w_table df = pd.DataFrame(w_table) df ## how do we capture the first tuble item as a list? list(w_table[0]) list(w_table[1:]) ## convert into df df = pd.DataFrame(w_table[1:]) df.columns = list(w_table[0]) df ## export as CSV wtext ###Output _____no_output_____ ###Markdown Find key data points embedded in text demo text: ###Code some_text = ''' Name: Sulekha Ram-Junnarkar Proin elementum, ipsum id ullamcorper euismod, diam diam laoreet urna. Date: January 5, 2011 Eu tempus eros augue quis nulla. Location: Berkeley, Ca Name: Peter Smith Proin elementum, ipsum id ullamcorper euismod, diam diam laoreet urna. Date: March 24, 1988 Eu tempus eros augue quis nulla. Location: New York, NY Name: American Artist Proin elementum, ipsum id ullamcorper euismod, diam diam laoreet urna. Date: July 1, 2022 Eu tempus eros augue quis nulla. Location: Long Island City, NY Name: Jill F. Doe Proin elementum, ipsum id ullamcorper euismod, diam diam laoreet urna. Date: December 16, 2010 Eu tempus eros augue quis nulla. Location: Miami, Florida Eu tempus eros augue quis nulla. Eu tempus eros augue quis nulla. Eu tempus eros augue quis nulla. Eu tempus eros augue quis nulla. ''' ## patterns: name_pat = re.compile("Name:\s((?P<First_name>\w+)\s?(?P<Middle_name>\w+\.)?\s(?P<Last_name>\w+(-\w+)?))", re.I |re.M) date_pat = re.compile("Date:\s(\w+\s\d{1,2},\s\d{4})", re.I |re.M) loc_pat = re.compile("Location:\s([\sa-z]+,\s\w+)", re.I | re.M) ## find dates dates_list = date_pat.findall(some_text) dates_list ## find names names = name_pat.findall(some_text) names ## pulling in multiple capture groups names_list = [name[0] for name in names] names_list ## location loc_list = loc_pat.findall(some_text) loc_list ## export df = pd.DataFrame(list(zip(names_list, loc_list, dates_list)), columns = ["name", "location", "date"]) df patter.match(some_text) ###Output _____no_output_____
Keras_fashion_mnist.ipynb
###Markdown ###Code #Importing mnist dataset !pip install keras from keras.datasets import fashion_mnist data=fashion_mnist.load_data() (X_train,y_train), (X_test, y_test)=fashion_mnist.load_data() import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Input, Dense import numpy as np x=np.array([[0,0], [0,1],[1,0], [1,1]],"uint8") y=np.array([[0],[0],[0],[1]],"uint8") model=Sequential() model.add(Dense(units=1,activation="sigmoid",input_dim=2)) model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy']) model.summary() model.fit(x,y,epochs=1000) X_train[0].shape X_train=X_train.reshape((X_train.shape[0],28*28)).astype('float32') X_test=X_test.reshape((X_test.shape[0], 28*28)).astype('float32') #Normalizing values from 0-255 to 0-1 X_train=X_train/255 X_test=X_test/255 from keras.utils import np_utils y_train=np_utils.to_categorical(y_train) y_test=np_utils.to_categorical(y_test) num_classes=y_test.shape[1] num_classes from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout model=Sequential() model.add(Dense(32,input_dim=28*28,activation='relu')) #hidden layer 1 model.add(Dense(64,activation='relu')) #hidden layer 2 model.add(Dense(128,activation='relu')) #hidden layer 3 model.add(Dense(256,activation='relu')) #hidden layer 4 model.add(Dense(512,activation='relu')) #hidden layer 5 model.add(Dense(10,activation='softmax')) model.compile(loss="categorical_crossentropy",optimizer='adam',metrics=['accuracy']) model.summary() model.fit(X_train,y_train,epochs=10,batch_size=200) scores=model.evaluate(X_test,y_test) scores ###Output 313/313 [==============================] - 1s 2ms/step - loss: 0.3654 - accuracy: 0.8769
Appendix/Models/Multi linear.ipynb
###Markdown Linear regression multy ###Code from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split x = df_dummies.iloc[:,:-1] y = df_dummies.iloc[:,-1] from sklearn.preprocessing import StandardScaler sc = StandardScaler() x.iloc[:,:3] = sc.fit_transform(x.iloc[:,:3]) x X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, shuffle=False) X_test from sklearn.linear_model import Ridge ridge = Ridge() ridge.fit(X_train, y_train) train_ridge = ridge.predict(X_train) test_ridge = ridge.predict(X_test) from math import sqrt from sklearn import metrics print('Ridge :', sqrt(metrics.mean_squared_error(y_train, train_ridge))) print('Ridge :', sqrt(metrics.mean_squared_error(y_test, test_ridge))) r = metrics.r2_score(y_train, train_ridge) r test3 df_test = pd.get_dummies(test3,columns=['Platform_Type','Rider_Id'], drop_first=True) df_test.columns = [col.replace(" ", "_") for col in df_test.columns] df_test x = df_test train_ridge = ridge.predict(df_test) ###Output _____no_output_____