path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
interpolation/Bilinear interpolation plots.ipynb | ###Markdown
Bilinear interpolation
###Code
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
plt.style.use("seaborn-notebook")
def rect_grid(Lx, Ly, nx, ny):
"""Generate a structured mesh for a rectangle
The rectangle has dimensions Lx by Ly, and nx nodes in x
and ny nodes in y.
"""
y, x = np.mgrid[-Ly/2:Ly/2:ny*1j, -Lx/2:Lx/2:nx*1j]
els = np.zeros(((nx - 1)*(ny - 1), 4), dtype=int)
for row in range(ny - 1):
for col in range(nx - 1):
cont = row*(nx - 1) + col
els[cont, :] = [cont + row, cont + row + 1,
cont + row + nx + 1, cont + row + nx]
return x.flatten(), y.flatten(), els
def interp_bilinear(coords, f_vals, grid=(10, 10)):
"""Bilinear interpolation for rectangular domains"""
x_min, y_min = np.min(coords, axis=0)
x_max, y_max = np.max(coords, axis=0)
x, y = np.mgrid[-1:1:grid[0]*1j,-1:1:grid[1]*1j]
N0 = (1 - x) * (1 - y)
N1 = (1 + x) * (1 - y)
N2 = (1 + x) * (1 + y)
N3 = (1 - x) * (1 + y)
interp_fun = N0 * f_vals[0] + N1 * f_vals[1] + N2 * f_vals[2] + N3 * f_vals[3]
interp_fun = 0.25*interp_fun
x, y = np.mgrid[x_min:x_max:grid[0]*1j, y_min:y_max:grid[1]*1j]
return x, y, interp_fun
def fun(x, y):
return y**3 + 3*y*x**2
x_coords, y_coords, els = rect_grid(2, 2, 4, 4)
nels = els.shape[0]
z_coords = fun(x_coords, y_coords)
z_min = np.min(z_coords)
z_max = np.max(z_coords)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
x, y = np.mgrid[-1:1:51j,-1:1:51j]
z = fun(x, y)
surf =ax.plot_surface(x, y, z, rstride=1, cstride=1, linewidth=0, alpha=0.6,
cmap="viridis")
plt.colorbar(surf, shrink=0.5, aspect=10)
ax.plot(x_coords, y_coords, z_coords, 'ok')
for k in range(nels):
x_vals = x_coords[els[k, :]]
y_vals = y_coords[els[k, :]]
coords = np.column_stack([x_vals, y_vals])
f_vals = fun(x_vals, y_vals)
x, y, z = interp_bilinear(coords, f_vals, grid=[4, 4])
inter = ax.plot_wireframe(x, y, z, color="black")
plt.xlabel(r"$x$", fontsize=18)
plt.ylabel(r"$y$", fontsize=18)
ax.legend([inter], ["Interpolation"])
plt.show()
from IPython.core.display import HTML
def css_styling():
styles = open('./styles/custom_barba.css', 'r').read()
return HTML(styles)
css_styling()
###Output
_____no_output_____ |
Topic 1 System Fundamentals/4) System Design Basics (1.2.1 - 1.2.3).ipynb | ###Markdown
Topic 1.2 System design basics Components of a computer system 1.2.1 Define the terms: hardware, software, peripheral, network, human resources. Task Write a program that asks the user what these things are, and report whether or not specific words appear in the answer. For example, the definition of hardware needs to include "CPU" and "transistor."To implement this program, you'll need to know how to do the following:- The `str` object has a method `.lower()` which returns the lowercase version of the string- The `in` keyword can be used to check for membership```python'Science'.lower() == 's' True'Psychology'.lower() == 'p' True```
###Code
possibilities = [
["science", "Computer Science"],
["science", "Physical Sciences"],
["science", "Psychology"]
]
# lowercase them first: this is called "normalizing" data
for possibility in possibilities:
possibility[0] = possibility[0].lower()
possibility[1] = possibility[1].lower()
for key, phrase in possibilities:
if key in phrase:
print(f"{key} is in {phrase}")
else:
print(f"No {key} in {phrase}!")
# ^---- joke!
###Output
_____no_output_____
###Markdown
The below demonstrates how you could write this program. You are asked to improve it.
###Code
test_answers = [
"Hardware blah blah CPU blah blah transistor", # should get full marks
"Hardware blah blah CPU", # should get 1 mark
"Hardware blah blah blah blah transistor", # should get 1 mark
"Hardare blah blah" # should get 0 marks
]
keys = ["CPU", "transistor"]
# iterate over test answers
for answer in test_answers:
# assume it's good, looking for reasons why it's not
good_answer = True
for key in keys:
if not key in answer:
good_answer = False
print(f'"{answer}"', end=": ")
if good_answer:
print("Yes, full marks!")
else:
print("No marks")
###Output
"Hardware blah blah CPU blah blah transistor": Yes, full marks!
"Hardware blah blah CPU": No marks
"Hardware blah blah blah blah transistor": No marks
"Hardare blah blah": No marks
|
Games/Monty_hall_Problem.ipynb | ###Markdown
Monty Hall Problem _____ The Monty Hall problem is named for its similarity to the Let's Make a Deal television game show hosted by Monty Hall. The problem is stated as follows. Assume that a room is equipped with three doors. Behind two are goats, and behind the third is a shiny new car. You are asked to pick a door, and will win whatever is behind it. Let's say you pick door 1. Before the door is opened, however, someone who knows what's behind the doors (Monty Hall) opens one of the other two doors, revealing a goat, and asks you if you wish to change your selection to the third door (i.e., the door which neither you picked nor he opened). The Monty Hall problem is deciding whether you do.The correct answer is that you do want to switch. If you do not switch, you have the expected 1/3 chance of winning the car, since no matter whether you initially picked the correct door, Monty will show you a door with a goat. But after Monty has eliminated one of the doors for you, you obviously do not improve your chances of winning to better than 1/3 by sticking with your original choice. If you now switch doors, however, there is a 2/3 chance you will win the car (counterintuitive though it seems).Now let us check the chance using python code.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
Goats = np.array(['Goat: 1', 'Goat: 2'])
Goats
def get_other_goat(Goat):
if Goat == 'Goat: 1':
return 'Goat: 2'
elif Goat == 'Goat: 2':
return 'Goat: 1'
shift_1 = get_other_goat('Goat: 1')
shift_2 = get_other_goat('Goat: 2')
print('If door corresponds to Goat: 1 is selected then open - ',shift_1 )
print('If door corresponds to Goat: 2 is selected then open - ',shift_2 )
Hidden_behind_doors = np.array(['Car', 'Goat: 1', 'Goat: 2'])
Hidden_behind_doors
def monty_hall_game():
contestant_guess = np.random.choice(Hidden_behind_doors)
if contestant_guess == 'Goat: 1':
return [contestant_guess, 'Goat: 2', 'Car']
if contestant_guess == 'Goat: 2':
return [contestant_guess, 'Goat: 1', 'Car']
if contestant_guess == 'Car':
revealed = np.random.choice(Goats)
return [contestant_guess, revealed, get_other_goat(revealed)]
monty_hall_game()
play = []
for i in np.arange(10000):
play.append(monty_hall_game())
Games = pd.DataFrame(play, columns = ['Guess', 'Revealed', 'Remaining'])
Games
Guess_count = Games.pivot_table(index = ['Guess'], aggfunc ='size')
Guess_count
Remaining_count = pd.pivot_table(Games, index = ['Remaining'], aggfunc ='size')
Remaining_count
Data = pd.DataFrame([['Car', Guess_count[0], Remaining_count[0]], ['Goat: 1', Guess_count[1], Remaining_count[1]], ['Goat: 2', Guess_count[2], Remaining_count[2]]], columns=['Item', 'Original Door', 'Remaining Door'])
Data
ax = Data.plot.barh(0)
ax.set_xlabel('Count')
###Output
_____no_output_____ |
challenge-September/load_data_ibm.ipynb | ###Markdown
How to start on IBM Data Science Platform ?1. Create your account and Login2. Click on 'Create New' tab seen on the right corner.3. Click on Notebook4. Choose your Notebook: Python 3.5 and add relevent description if you wish to.5. Done
###Code
# you can install any package on this platform like this:
! pip install opencv-python
# you can install any package on this platform like this:
! pip install tqdm
## Load Libraries
import os
import requests, zipfile, io
# this is the current directory where files will get downloaded
os.getcwd()
# load data into platform
url = requests.get('https://he-s3.s3.amazonaws.com/media/hackathon/deep-learning-challenge-1/identify-the-objects/a0409a00-8-dataset_dp.zip')
data = zipfile.ZipFile(io.BytesIO(url.content))
data.extractall()
# check if the files have been download in current directory
os.listdir()
## load files
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train.head()
###Output
_____no_output_____ |
4. Decision trees RF and XGBoost/XGBoost/Hyperparameter Optimization For Xgboost.ipynb | ###Markdown
Hyperparameter Optimization For Xgboost using RandomizedSearchCVVersión original tomada de [https://github.com/krishnaik06](https://github.com/krishnaik06)Los datos se pueden descargar de [Kaggle](https://www.kaggle.com/shrutimechlearn/churn-modelling)
###Code
import pandas as pd
## Read the Dataset
df=pd.read_csv('Churn_Modelling.csv')
df.head()
## Correlation
import seaborn as sns
import matplotlib.pyplot as plt
#get correlations of each features in dataset
corrmat = df.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(df[top_corr_features].corr(),annot=True,cmap="RdYlGn")
#Get the Independent and Dependent Features
X=df.iloc[:,3:13]
Y=df.iloc[:,13]
geography=pd.get_dummies(X['Geography'],drop_first=True)
geography.head()
gender=pd.get_dummies(X['Gender'],drop_first=True)
gender.head()
## Drop Categorical Features
X=X.drop(['Geography','Gender'],axis=1)
X.head()
X=pd.concat([X,geography,gender],axis=1)
X.head()
###Output
_____no_output_____
###Markdown
**n_estimators** With boosted tree models, models are trained sequentially - where each subsequence tree tries to correct for the errors made by the previous sequence of trees.**max_depth**The max_depth parameter determines how deep each estimator is permitted to build a tree. Typically, increasing tree depth can lead to overfitting if other mitigating steps aren’t taken to prevent it. Like all algorithms, these parameters need to be view holistically. For datasets with complex structure, a deep tree might be required - other parameters like min_child_weight can be increased to mitigate chances of overfitting.**learning_rate**The learning_rate parameter (also referenced in XGboost documentation as eta) controls the magnitude of change that is permitted from one tree to the next. To conceptualize this, you can think of this like learning the golf swing. If you slice the ball after your first shot at your golf lesson, it doesn’t mean you need to dramatically change the way you’re hitting the ball. Typically you want to make small, purposeful adjustments after each shot until you finally get the desired flight bath.**gamma**The gamma is an unbounded parameter from 0 to infinity that is used to control the model’s tendency to overfit. This parameter is also called min_split_loss in the reference documents. Thing of gamma as a complexity controller that prevents other loosely non-conservative parameters from fitting the trees to noise (overfitting).**subsample**The subsample parameter determines how much of the initial dataset is fair game for random sampling during each iteration of the boosting process. The default here is set to 1.0, which means each iteration of the training process can sample 100% of the data.**min_child_weight**he number of samples required to form a leaf node (the end of a branch). A leaf node is the termination of a branch and therefore the decision node of what class a sample belongs to.
###Code
## Hyper Parameter Optimization
params={
"learning_rate" : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] ,
"max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15],
"min_child_weight" : [ 1, 3, 5, 7 ],
"gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ],
"colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ]
}
## Hyperparameter optimization using RandomizedSearchCV
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
import xgboost
def timer(start_time=None):
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print('\n Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))
classifier=xgboost.XGBClassifier()
random_search=RandomizedSearchCV(classifier,param_distributions=params,n_iter=5,scoring='roc_auc',n_jobs=-1,cv=5,verbose=3)
from datetime import datetime
# Here we go
start_time = timer(None) # timing starts from this point for "start_time" variable
random_search.fit(X,Y)
timer(start_time) # timing ends here for "start_time" variable
X.head()
random_search.best_estimator_
random_search.best_params_
classifier=xgboost.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bytree=0.5, gamma=0.4, learning_rate=0.1,
max_delta_step=0, max_depth=6, min_child_weight=7, missing=None,
n_estimators=100, n_jobs=1, nthread=None,
objective='binary:logistic', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, seed=None, silent=True,
subsample=1)
from sklearn.model_selection import cross_val_score
score=cross_val_score(classifier,X,Y,cv=10)
score
score.mean()
###Output
_____no_output_____ |
property_prediction/demo.ipynb | ###Markdown
Property Prediction and Inverse Design--- Hands-on demo--- Goals:* Visualize and manipulate data using `pandas`* Fit and tune models using `sklearn`* Inverse design using `pyswarm` Module imports and global options
###Code
import numpy as np
import pandas as pd
import utils # we define some useful shortcuts here
np.random.seed(0)
pd.options.display.max_rows = 10
###Output
_____no_output_____
###Markdown
0. Importing the datasetWe use the `pandas` library built on top of `numpy` for easy import and manipulation of datasets from a variety of formats (excel, csv, etc). Only the simplest functions are used here. Complete documentation is found [here](https://pandas.pydata.org/). Dataset[Concrete Compressive Strength Data Set]()The input values $X$ are columns 1-8, representing the various compositions of concrete. The target values $y$ are the compressive strengths in the last column, which is a function of the input compositions.$$ y = f(X) $$Our goal is to **approximate** this function $f(.)$ by some function $\hat{f}(\cdot,\theta)$, and then learn $\theta$ using data. NoteMore datasets can be found at [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets.html)
###Code
df = pd.read_excel('./data/Concrete_Data.xls', sheet_name='Sheet1')
df
###Output
_____no_output_____
###Markdown
Train-test splitLet us now split the dataset into training and test sets. We use the `train_test_split()` function from `sklearn.model_selection`, where we simply need to specify the ratio of the test set.
###Code
from sklearn.model_selection import train_test_split
X, y = df[df.columns[:-1]], df[df.columns[-1]]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2
)
###Output
_____no_output_____
###Markdown
1. Linear Regression BaselineWe first consider a linear regression baseline, where we fit a linear model$$ \hat{f}(X,M,c) = X M + c $$ and then minimize the $L^2$ error$$ \min_{M,c} \frac{1}{2} \Vert y - X M - c \Vert^2 $$To do this we simply import the function from `sklearn`.
###Code
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_hat_train = regressor.predict(X_train) # Training set predictions
y_hat_test = regressor.predict(X_test) # Test set predictions
###Output
_____no_output_____
###Markdown
Plot predictionsHereafter, we use simple functions defined in `utils` to do the plots.
###Code
utils.plot_predictions(
y=[y_train, y_test], y_hat=[y_hat_train, y_hat_test],
labels=['Train', 'Test']
)
###Output
_____no_output_____
###Markdown
2. Gradient Boosting RegressionLet us now use a more robust regressor for non-linear regression. Again, we use canned implementations from `sklearn`.
###Code
from sklearn.ensemble import GradientBoostingRegressor
regressor = GradientBoostingRegressor()
regressor.fit(X_train, y_train)
y_hat_train = regressor.predict(X_train)
y_hat_test = regressor.predict(X_test)
###Output
_____no_output_____
###Markdown
Plot predictions
###Code
utils.plot_predictions(
y=[y_train, y_test], y_hat=[y_hat_train, y_hat_test],
labels=['Train', 'Test']
)
###Output
_____no_output_____
###Markdown
Feature importanceDecision tree based ensemble models can also tell us how sensitive (in a very loose sense) the output is to each input parameter.
###Code
utils.plot_feature_importances(
importances=regressor.feature_importances_,
columns=df.columns[:-1])
###Output
_____no_output_____
###Markdown
2.1 OverfittingHere, let us demonstrate overfitting for the gradient boosting regressor. This can be done by drastically increasing the model complexity. One simple way to increase the model complexity is by increasing the `max_depth` parameter in `GradientBoostingRegressor()`
###Code
from sklearn.ensemble import GradientBoostingRegressor
regressor = GradientBoostingRegressor(max_depth=10)
regressor.fit(X_train, y_train)
y_hat_train = regressor.predict(X_train)
y_hat_test = regressor.predict(X_test)
###Output
_____no_output_____
###Markdown
Plot predictionsNotice that although the training error has decreased drastically, the test error actually got a little worse. This is a classic case of *over-fitting*.
###Code
utils.plot_predictions(
y=[y_train, y_test], y_hat=[y_hat_train, y_hat_test],
labels=['Train', 'Test']
)
###Output
_____no_output_____
###Markdown
2.2 Hyper-parameter TuningObserve that `GradientBoostingRegressor()` performed much better than `LinearRegression()`. Can we improve it further?Let us take a quick look at the documentation of `GradientBoostingRegressor()`
###Code
print(GradientBoostingRegressor.__doc__)
###Output
_____no_output_____
###Markdown
Tuning via Random Search CVAs seen above, there are many parameters one can tune (e.g. learning_rate, n_estimators etc.). We call these *hyper-parameters*, in the sense that they are parameters controlling the properties of the regressor, and are not the *trainable* parameters during model fitting. To maximize performance, we have to tune these parameters. To do this, we use *random search cross-validation tuning*. Let us briefly explain each term1. Cross-validation: This refers to scoring the performance of a model under a set of hyper-parameters given the training set. The idea is to further split the training set into two * train set (to be used for training, e.g. 2/3 of original training data) * validation set (to be used for evaluation of accuracy, 1/3 of original training data)By averaging over the 3 possible splits, we can have an average score of this particular selection of hyper-parameters. The goal is maximize this score over the hyper-parameter space. This is called *3-fold* cross-validation.1. Random search: Instead of performing a grid search over all hyper-parameters, it is usually more efficient to randomly sample them from some distributions and at each CV run, we pick a random hyper-parameter combination. Define search spaceThe `scipy.stats` module allows us to specify probabily distributions.
###Code
from scipy import stats
param_distributions = {
'n_estimators': stats.randint(low=10, high=1000),
'max_depth': stats.randint(low=2, high=6),
'min_samples_split': stats.randint(low=2, high=5),
'learning_rate': [1, 0.5, 0.25, 0.1, 0.05, 0.01]
}
###Output
_____no_output_____
###Markdown
Fit a CV-tuned regressor
###Code
from sklearn.model_selection import RandomizedSearchCV
regressor_cv = RandomizedSearchCV(
regressor, param_distributions=param_distributions,
n_iter=50, verbose=1)
regressor_cv.fit(X_train, y_train)
print('Best params: \n', regressor_cv.best_params_)
y_hat_train = regressor_cv.predict(X_train)
y_hat_test = regressor_cv.predict(X_test)
###Output
_____no_output_____
###Markdown
Plot predictions and feature importances
###Code
# Plot predictions and feature importances
utils.plot_predictions(
y=[y_train, y_test],
y_hat=[y_hat_train, y_hat_test],
labels=['Train', 'Test']
)
utils.plot_feature_importances(
importances=regressor_cv.best_estimator_.feature_importances_,
columns=df.columns[:-1])
###Output
_____no_output_____
###Markdown
3. Inverse DesignAfter fitting our model, we perform inverse design. In this demo, we do this using the `pyswarm` module, which is an implementation of the *particle swarm optimization* method. Refit using tuned hyper-parameters
###Code
best_params = regressor_cv.best_params_
regressor = GradientBoostingRegressor()
regressor.set_params(**best_params)
regressor.fit(X, y)
###Output
_____no_output_____
###Markdown
Bounds, objectives and constraintsNext, we define some bounds, objectives and constraints to be used for inverse design.1. Upper bounds for all compositions is $1.5\times$ the 75th percentile of the training data.1. Lower bounds for all compositions is $0.5\times$ the 25th percentile of the training data1. Objective: minimize *Blast Furnace Slag, Fly Ash, Superplasticizer* compositions1. Constraints: * Compressive strength >= 70 MPa * Water <= 150 kg / m^3 * Age <= 30 days
###Code
upper_bounds = np.percentile(X, 75, axis=0) * 1.5
lower_bounds = np.percentile(X, 25, axis=0) * 0.5
def objective(X):
"""
We want to minimize
Blast Furnace Slag, Fly Ash, Superplasticizer
"""
return X[1]**2 + X[2]**2 + X[4]**2
def constraints(X):
"""
We want to following constraints:
1. Compressive strength >= 70 MPa
2. Water <= 150 kg / m^3
3. Age <= 30 days
"""
predicted_strength = regressor.predict(X.reshape(1, -1))
cons_str_lower = predicted_strength - 70
cons_water_upper = 150 - X[3]
cons_age_upper = 30 - X[-1]
return [cons_str_lower, cons_water_upper, cons_age_upper]
###Output
_____no_output_____
###Markdown
Design via particle swarm optimization
###Code
from pyswarm import pso
X_opts = []
n_runs = 5
for n in range(n_runs):
X_opt, _ = pso(
objective, lower_bounds, upper_bounds, f_ieqcons=constraints,
swarmsize=100, maxiter=200)
X_opts.append(X_opt)
X_opts = np.asarray(X_opts)
y_hat_opts = regressor.predict(X_opts).reshape(-1, 1)
data_opt = np.concatenate([X_opts, y_hat_opts], axis=1)
df_predict = pd.DataFrame(columns=df.columns, data=data_opt)
###Output
_____no_output_____
###Markdown
Compare with unseen dataIn fact, our dataset used in this demo is not the full dataset. We took out one sample that satisfies the constraints above and minimizes the objective. Let us now check how close our inverse-design results are to this unseen data point (colored red).
###Code
df_unseen = pd.read_excel('./data/Concrete_Data_unseen.xls', sheet_name='Sheet1')
df_combined = pd.concat([df_predict, df_unseen], ignore_index=True)
df_combined.style.applymap(lambda x: 'color: red', subset=5)
###Output
_____no_output_____ |
Decision-Trees-For-Knowledge-Discovery-With-Scikit-Learn.ipynb | ###Markdown
Discovering structure behind dataLet's understand and model the hidden structure behind data with Decision Trees. In this tutorial, we'll explore and inspect how a model can do its decisions on a car evaluation data set. Decision trees work with simple "if" clauses dichotomically chained together, splitting the data flow recursively on those "if"s until they reach a leaf where we can categorize the data. Such data inspection could be used to reverse engineer the behavior of any function. Since [decision trees](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/) are good algorithms for discovering the structure hidden behind data, we'll use and model the car evaluation data set, for which the prediction problem is a (deterministic) surjective function. This means that the inputs of the examples in the data set cover all the possibilities, and that for each possible input value, there is only one answer to predict (thus, two examples with the same input values would never have a different expected prediction). On the point of view of Data Science, because of the properties of our dataset, we won't need to use a test set nor to use cross validation. Thus, the error we will obtain below at modelizing our dataset would be equal to the true test error if we had a test set.The attribute to predict in the data set could have been, for example, created from a programmatic function and we will basically reverse engineer the logic mapping the inputs to the outputs to recreate the function and to be able to explain it visually. About the Car Evaluation Data SetFor more information: http://archive.ics.uci.edu/ml/datasets/Car+Evaluation OverviewThe Car Evaluation Database was derived from a simple hierarchical decision model originally developed for the demonstration of DEX, M. Bohanec, V. Rajkovic: Expert system for decision making. Sistemica 1(1), pp. 145-157, 1990.). The model evaluates cars according to the following concept structure: - CAR car acceptability: - PRICE overall price: - **buying** buying price - **maint** price of the maintenance - TECH technical characteristics: - COMFORT comfort: - **doors** number of doors - **persons** capacity in terms of persons to carry - **lug_boot** the size of luggage boot - **safety** estimated safety of the carInput attributes are printed in lowercase. Besides the target concept (CAR), the model includes three intermediate concepts: PRICE, TECH, COMFORT. Every concept is in the original model related to its lower level descendants by a set of examples. The Car Evaluation Database contains examples with the structural information removed, i.e., directly relates CAR to the six input attributes: buying, maint, doors, persons, lug_boot, safety. Because of known underlying concept structure, this database may be particularly useful for testing constructive induction and structure discovery methods. Attributes, instances, and Class DistributionNumber of Attributes: 6Missing Attribute Values: none| Attribute | Values ||------------|--------|| buying | v-high, high, med, low || maint | v-high, high, med, low || doors | 2, 3, 4, 5-more || persons | 2, 4, more || lug_boot | small, med, big || safety | low, med, high |Number of Instances: 1728 (Instances completely cover the attribute space.)| class | N | N[%] ||---|---|---|| unacc | 1210 | 70.023 % || acc | 384 | 22.222 % || good | 69 | 3.993 % || v-good | 65 | 3.762 % | We'll now load the car evaluation data set in Python and then train decision trees with Scikit-Learn
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn import tree
import pydot
from io import StringIO
import os
###Output
_____no_output_____
###Markdown
Define the features and preprocess the car evaluation data setWe'll preprocess the attributes into redundant features, such as using an integer index (linear) to represent a value for an attribute, as well as also using a one-hot encoding for each attribute's possible values as new features. Despite the fact that this is redundant, this will help to make the tree smaller since it has more choice on how to split data on each branch.
###Code
# The integer values for features will take
# a range from 0 to n-1 in the lists of possible values:
input_labels = [
["buying", ["vhigh", "high", "med", "low"]],
["maint", ["vhigh", "high", "med", "low"]],
["doors", ["2", "3", "4", "5more"]], # Here indexes are not real values
["persons", ["2", "4", "more"]],
["lug_boot", ["small", "med", "big"]],
["safety", ["low", "med", "high"]],
]
class_names = ["unacc", "acc", "good", "vgood"]
# Load data set
data = np.genfromtxt(os.path.join('data', 'car.data'), delimiter=',', dtype="U")
data_inputs = data[:, :-1]
data_outputs = data[:, -1]
def str_data_to_one_hot(data, input_labels):
"""Convert each feature's string to a flattened one-hot array. """
X_int = LabelEncoder().fit_transform(data.ravel()).reshape(*data.shape)
X_bin = OneHotEncoder().fit_transform(X_int).toarray()
feature_names = []
for a in input_labels:
key = a[0]
for b in a[1]:
value = b
feature_names.append("{}_is_{}".format(key, value))
return X_bin, feature_names
def str_data_to_linear(data, input_labels):
"""Convert each feature's string to an integer index"""
X_lin = np.array([[
input_labels[a][1].index(j) for a, j in enumerate(i)
] for i in data])
# Integer feature indexes will range
# from 0 to n-1 from indexes in the label list:
feature_names = [i[0] + "_index" for i in input_labels]
return X_lin, feature_names
# Take both one-hot and linear versions of input features:
X_one_hot, feature_names_one_hot = str_data_to_one_hot(data_inputs, input_labels)
X_linear_int, feature_names_linear_int = str_data_to_linear(data_inputs, input_labels)
# Put that together:
X = np.concatenate([X_one_hot, X_linear_int], axis=-1)
feature_names = feature_names_one_hot + feature_names_linear_int
# Outputs use indexes, this is not one-hot:
integer_y = np.array([class_names.index(i) for i in data_outputs])
print("Data set's shape,")
print("X.shape, integer_y.shape, len(feature_names), len(class_names):")
print(X.shape, integer_y.shape, len(feature_names), len(class_names))
###Output
Data set's shape,
X.shape, integer_y.shape, len(feature_names), len(class_names):
(1728, 27) (1728,) 27 4
###Markdown
Train a simple decision tree to fit the data set:First, let's define some hyperparameters, such as the depth of the tree.
###Code
max_depth = 6
clf = tree.DecisionTreeClassifier(max_depth=max_depth)
clf = clf.fit(X, integer_y)
print("Decision tree trained!")
accuracy = clf.score(X, integer_y)
print("Errors:", 100 - accuracy * 100, "%")
print("Accuracy:", accuracy * 100, "%")
###Output
Decision tree trained!
Errors: 6.53935185185 %
Accuracy: 93.4606481481 %
###Markdown
Plot and save the tree
###Code
def plot_first_tree(clf, class_names, tree_name):
"""
Plot and save our scikit-learn tree.
"""
graph_save_path = os.path.join(
"exported_sklearn_trees",
"{}".format(tree_name)
)
tree.export_graphviz(clf, out_file="{}.dot".format(graph_save_path))
dotfile = StringIO()
tree.export_graphviz(
clf, out_file=dotfile,
feature_names=feature_names, class_names=class_names,
filled=True, rotate=True
)
pydot.graph_from_dot_data(dotfile.getvalue())[0].write_png("{}.png".format(graph_save_path))
# Plot our simple tree:
plot_first_tree(clf, class_names, tree_name="simple_tree")
###Output
_____no_output_____
###Markdown
Note that [the tree below can also be viewed here online](https://github.com/Vooban/Decision-Trees-For-Knowledge-Discovery/tree/master/exported_sklearn_trees). Plot the importance of each input features of the simple tree:Note here that it is the feature importance according to our simple, shallow tree. A fully complex trees would surely include more of the features/attributes, and with different proportions.
###Code
def feature_importance_chart(clf, classifier_name, feature_names):
sorted_feature_importances, sorted_feature_names = (
zip(*sorted(zip(clf.feature_importances_, feature_names)))
)
plt.figure(figsize=(16, 9))
plt.barh(range(len(sorted_feature_importances)), sorted_feature_importances)
plt.yticks(
range(len(sorted_feature_importances)),
["{}: {:.3}".format(a, b) for a, b in zip(sorted_feature_names, sorted_feature_importances)]
)
plt.title("The Gini feature importance for the {} \n"
"(total decrease in node impurity, weighted by the "
"probability of reaching that node)".format(classifier_name))
plt.show()
feature_importance_chart(clf, "simple tree", feature_names)
###Output
_____no_output_____
###Markdown
Let's now generate a fully perfect (complex) tree Let's [go deeper](http://theinceptionbutton.com/). Let's build a deeper tree. At least, a simple tree like the one above is interesting for having a simplfied view of the true logic behind our data.
###Code
max_depth = None # Full depth
clf = tree.DecisionTreeClassifier(max_depth=max_depth)
clf = clf.fit(X, integer_y)
print("Decision tree trained!")
accuracy = clf.score(X, integer_y)
print("Errors:", 100 - accuracy * 100, "%")
print("Accuracy:", accuracy * 100, "%")
###Output
Decision tree trained!
Errors: 0.0 %
Accuracy: 100.0 %
###Markdown
A plot of the full tree
###Code
plot_first_tree(clf, class_names, tree_name="complex_tree")
###Output
_____no_output_____
###Markdown
Note that [the tree below can also be viewed here online](https://github.com/Vooban/Decision-Trees-For-Knowledge-Discovery/tree/master/exported_sklearn_trees). It would also be possible to [extract the tree as true code and create a function](https://stackoverflow.com/questions/20224526/how-to-extract-the-decision-rules-from-scikit-learn-decision-tree). Finally, the full feature importance:
###Code
feature_importance_chart(clf, "complex tree", feature_names)
###Output
_____no_output_____
###Markdown
ConclusionTo sum up, we managed to get good classification results and to be able to explain those results visually and automatically. Note that it would have been possible to solve a regression problem with the same algorithm, such as predicting a price rather than a category.Such a technique can be useful in reverse engineering an existing system, such as an old one that has been coded in a peculiar programming language and for which the employees who coded it have left. This technique can also be used for data mining, gaining business intelligence, and insights from data.In the case that your data does not represent a pure function like we have here, such as if for two of your input examples it is possible to have two possible different predictions, then a tree cannot model the data set with 100% accuracy. Hopefully, if you are in that situation where the logic behind the data is not perfect, it is possible to [repeat the experiment by using XGBoost](https://github.com/Vooban/Decision-Trees-For-Knowledge-Discovery/blob/master/Decision-Trees-For-Knowledge-Discovery-With-XGBoost.ipynb), which can help by incrementally training many trees to reduce the error and training them in an optimized fashion. The only disadvantage of that is that those boosted forests would be harder to explain due to the fact that you would have many trees.
###Code
# Let's convert this notebook to a README for the GitHub project's title page:
!jupyter nbconvert --to markdown Decision-Trees-For-Knowledge-Discovery-With-Scikit-Learn.ipynb
!mv Decision-Trees-For-Knowledge-Discovery-With-Scikit-Learn.md README.md
###Output
[NbConvertApp] Converting notebook Decision-Trees-For-Knowledge-Discovery-With-Scikit-Learn.ipynb to markdown
[NbConvertApp] Support files will be in Decision-Trees-For-Knowledge-Discovery-With-Scikit-Learn_files/
[NbConvertApp] Making directory Decision-Trees-For-Knowledge-Discovery-With-Scikit-Learn_files
[NbConvertApp] Making directory Decision-Trees-For-Knowledge-Discovery-With-Scikit-Learn_files
[NbConvertApp] Writing 12276 bytes to Decision-Trees-For-Knowledge-Discovery-With-Scikit-Learn.md
|
assignments/assignment 2/submissions/nathan_greenstein/nathan_greenstein.ipynb | ###Markdown
Bikeshare Data WranglingThis assignment loads data from the Citibike Bikeshare, cleans it up, and carries out several basic visualization / analysis operations.
###Code
import pandas as pd
import matplotlib.pyplot as plt
from math import sin, cos, sqrt, atan2, radians
# Load the data
bikeTrips = pd.read_csv("https://s3.amazonaws.com/tripdata/JC-201902-citibike-tripdata.csv.zip")
# Clean up
bikeTrips.drop(['starttime', 'stoptime', 'start station id', 'start station name', 'end station id', 'end station name', 'bikeid', 'usertype'], inplace = True, axis = 1)
bikeTrips.loc[:, 'tripduration':'birth year'] = bikeTrips.loc[:, 'tripduration':'birth year'].apply(lambda x: pd.to_numeric(x, errors='coerce'))
bikeTrips.fillna(-1, inplace = True, axis = 1)
# We'll focus on bike trips 2 hours long or less
shortTrips = bikeTrips.loc[bikeTrips['tripduration'] <= 60*60*2]
# Plot a historgram of short trip durations (log scale)
plt.hist(shortTrips.tripduration.values, log = True)
# Get a numpy array of short trips
shortVals = shortTrips.values
# Calculate the distance of each trip in km, based on https://stackoverflow.com/a/19412565
R = 6373.0 # approximate radius of earth in km
distances = []
for trip in shortVals:
lat1 = radians(trip[1])
lon1 = radians(trip[2])
lat2 = radians(trip[3])
lon2 = radians(trip[4])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
distances.append(distance)
plt.hist(distances)
# Make a scatter plot of trips, where the x axis is trip distance in km, the y axis is trip length in seconds,
# and the color of the dot is the rider's gender
plt.scatter(x = distances, y = shortTrips.tripduration.values, c = shortTrips.gender.values)
###Output
_____no_output_____ |
notebooks/Train Unet Model.ipynb | ###Markdown
Create custom ItemList and LabelList classes to define data loading and display
###Code
class SegmentationPklLabelList(SegmentationLabelList):
def open(self, fn):
x = pkl.load(builtins.open(str(fn),'rb'))[None,...].astype(np.float32)
return ImageSegment(tensor(x))
class SegmentationPklList(SegmentationItemList):
_label_cls,_square_show_res = SegmentationPklLabelList,False
def open(self, fn):
x = pkl.load(builtins.open(str(fn),'rb'))
x = x.transpose([0,3,1,2]).reshape([-1,64,64]).astype(np.float32)
return Image(tensor(x))
def show_xys(self, xs, ys, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show the `xs` (inputs) and `ys` (targets) on a figure of `figsize`."
rows = int(np.ceil(math.sqrt(len(xs))))
axs = subplots(rows, rows, imgsize=imgsize, figsize=figsize)
for x,y,ax in zip(xs, ys, axs.flatten()): Image(torch.clamp(x.data[0:3,:,:]*3.5,0,1)).show(ax=ax, y=y, alpha=0.4,**kwargs)
for ax in axs.flatten()[len(xs):]: ax.axis('off')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Define validation function Validation patchlets all come from a separate patch
###Code
def valid_patch(fn, i=6):
return f'patch_{i}' in str(fn)
def get_mask(fn):
return str(fn).replace('feat','targ')
def exclude_masks(fn):
return not('targ' in str(fn.name))
bs = 32
classes=['No Data',
'Cotton',
'Dates',
'Grass',
'Lucern',
'Maize',
'Pecan',
'Vacant',
'Vineyard',
'Vineyard & Pecan']
src = (SegmentationPklList.from_folder(train_path, extensions=['.pkl'],
recurse=True, convert_mode='L')
.filter_by_func(exclude_masks)
.split_by_rand_pct(0.1)
#.split_by_valid_func(valid_patch)
.label_from_func(get_mask, classes=classes)
)
test_set = (SegmentationPklList.from_folder(test_path, extensions=['.pkl'],
recurse=True, convert_mode='L')
.filter_by_func(exclude_masks)
)
stats_data = src.databunch(bs=128)
x,y = stats_data.one_batch()
means = x.mean(dim=[0,2,3])
stds = x.std(dim=[0,2,3])
###Output
_____no_output_____
###Markdown
Define focal loss function
###Code
from torch import nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, crit, alpha=1, gamma=2):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.crit = crit
def forward(self, inputs, targets, reduction):
loss = self.crit(inputs, targets)
pt = torch.exp(-loss)
F_loss = self.alpha * (1-pt)**self.gamma * loss
if reduction is None:
return F_loss
else:
return torch.mean(F_loss)
###Output
_____no_output_____
###Markdown
Define data augmentation and get databunch
###Code
tfms = get_transforms(
do_flip = True,
flip_vert = True,
max_rotate = 20,
max_zoom = 1.1,
max_lighting = 0.,
max_warp = 0.2,
p_affine = 0.75,
p_lighting = 0.,
xtra_tfms = [cutout(n_holes=(5,10), length=(3, 8), p=0.75, use_on_y=False)]
)
tfms = [tfms[0][1:],[]]# gets rid of resize transformations - they don't work the target mask
data = (src
.transform(tfms,
tfm_y=True)
.add_test(test_set,tfms=None)
.databunch(bs=bs, num_workers = 0)
.normalize(stats=(means,stds)))
###Output
_____no_output_____
###Markdown
View a batch to check data augmentation (targets masks are overlaid with transparency)
###Code
data.show_batch()
###Output
_____no_output_____
###Markdown
Define a custom ResNet that takes the appropriate number of input channels
###Code
in_ch = 8*6 # 8 timepoints x 6 channels (R + G + B + NIR + NDVI + NORM)
def myresnet_func(*args):
myresnet = models.resnet50(pretrained=True)
myresnet.conv1 = torch.nn.Conv2d(in_ch, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
return myresnet
###Output
_____no_output_____
###Markdown
Define some metrics
###Code
def pixel_acc(inputs, targs):
inputs = inputs.argmax(dim=1)[:,None,...]
return (targs[targs!=0]==inputs[targs!=0]).float().mean()
def pixel_acc_per_class(inputs, targs, class_id=None):
inputs = inputs.argmax(dim=1)[:,None,...]
inputs = inputs[targs==class_id]
targs = targs[targs==class_id]
pixels = len(targs)
if pixels > 0:
score = ((targs[targs!=0]==inputs[targs!=0])).float().mean()
else:
score = -1
return pixels, score
###Output
_____no_output_____
###Markdown
Modify the fastai mixup callback to work with image segmentation
###Code
class myMixUpCallback(LearnerCallback):
"Callback that creates the mixed-up input and target."
def __init__(self, learn:Learner, alpha:float=0.4, stack_x:bool=False, stack_y:bool=True):
super().__init__(learn)
self.alpha,self.stack_x,self.stack_y = alpha,stack_x,stack_y
def on_train_begin(self, **kwargs):
if self.stack_y: self.learn.loss_func = myMixUpLoss(self.learn.loss_func)
def on_batch_begin(self, last_input, last_target, train, **kwargs):
"Applies mixup to `last_input` and `last_target` if `train`."
if not train: return
lambd = np.random.beta(self.alpha, self.alpha, last_target.size(0))
lambd = np.concatenate([lambd[:,None], 1-lambd[:,None]], 1).max(1)
lambd = last_input.new(lambd)
shuffle = torch.randperm(last_target.size(0)).to(last_input.device)
x1, y1 = last_input[shuffle], last_target[shuffle]
if self.stack_x:
new_input = [last_input, last_input[shuffle], lambd]
else:
out_shape = [lambd.size(0)] + [1 for _ in range(len(x1.shape) - 1)]
new_input = (last_input * lambd.view(out_shape) + x1 * (1-lambd).view(out_shape))
if self.stack_y:
new_lambd = torch.distributions.utils.broadcast_all(lambd[:,None,None,None], last_target)[0]
#new_target = torch.cat([last_target[:,None].float(), y1[:,None].float(), new_lambd[:,None].float()], 1)
new_target = torch.stack([last_target.float(), y1.float(), new_lambd.float()], 1)
else:
if len(last_target.shape) == 2:
lambd = lambd.unsqueeze(1).float()
new_target = last_target.float() * lambd + y1.float() * (1-lambd)
return {'last_input': new_input, 'last_target': new_target}
def on_train_end(self, **kwargs):
if self.stack_y: self.learn.loss_func = self.learn.loss_func.get_old()
class myMixUpLoss(Module):
"Adapt the loss function `crit` to go with mixup."
def __init__(self, crit, reduction='mean'):
super().__init__()
if hasattr(crit, 'reduction'):
self.crit = crit
self.old_red = crit.reduction
setattr(self.crit, 'reduction', 'none')
else:
self.crit = partial(crit, reduction='none')
self.old_crit = crit
self.reduction = reduction
def forward(self, output, target):
if len(target.size()) >= 5:
loss1, loss2 = self.crit(output,target[:,0].long()), self.crit(output,target[:,1].long())
lambd = target[:,2].contiguous().view(-1)
d = (loss1 * lambd + loss2 * (1-lambd)).mean()
else: d = self.crit(output, target)
if self.reduction == 'mean': return d.mean()
elif self.reduction == 'sum': return d.sum()
return d
def get_old(self):
if hasattr(self, 'old_crit'): return self.old_crit
elif hasattr(self, 'old_red'):
setattr(self.crit, 'reduction', self.old_red)
return self.crit
###Output
_____no_output_____
###Markdown
Create class-weighted focal loss
###Code
train_df = pd.read_csv('../data/Farmpin_training.csv')
inv_freq = np.array(1/(train_df.crop_id.value_counts()/2437))
inv_freq = [0.,*inv_freq]
inv_prop = tensor(inv_freq/sum(inv_freq)).float().cuda()
focal_loss = FocalLoss(crit=CrossEntropyFlat(axis=1,weight=inv_prop,ignore_index=0)) # ignore the no-data class
learn = unet_learner(data,
myresnet_func,
loss_func=focal_loss,
metrics=[pixel_acc],
callback_fns=[partial(myMixUpCallback,alpha=0.4, stack_y=True)])
lr_find(learn)
learn.recorder.plot()
learn.fit_one_cycle(5,max_lr=1e-3, wd=0.3)
learn.save('resnet_50_5_frozen_epochs_balanced_focal_loss_mixup')
learn.unfreeze()
learn.fit_one_cycle(10,max_lr=1e-4, wd=0.3)
learn.save('resnet_50_5_frozen+10+10_unfrozen_epochs_balanced_focal_loss_mixup')
preds, targs = learn.get_preds(DatasetType.Valid)
for c in range(10):
print(f'{c}.{classes[c]:18} {pixel_acc_per_class(preds, targs, class_id=c+1)[1]:0.2f} '
+f' on {pixel_acc_per_class(preds, targs, class_id=c+1)[0]:10} px')
rows = 3
idxs = np.random.randint(0, len(preds), [rows])
fig = plt.figure(figsize=(15,4*rows))
nc = 5
for i,j in enumerate(idxs):
pred = preds.argmax(dim=1)[j]
targ = targs[j].squeeze()
pred[targ==0]=0
ax = plt.subplot(rows,nc,nc*i+1)
plt.imshow(np.clip(data.valid_ds[j][0].data[0:3].permute([1,2,0])*3.5,0,1))
ax.set_title('satellite')
plt.xticks([])
plt.yticks([])
ax = plt.subplot(rows,nc,nc*i+2)
plt.xticks([])
plt.yticks([])
ax.set_title('actual')
plt.imshow(targ, vmin=0, vmax=9)
plt.xticks([])
plt.yticks([])
ax = plt.subplot(rows,nc,nc*i+3)
ax.set_title('predicted')
plt.imshow(pred, vmin=0, vmax=9)
plt.xticks([])
plt.yticks([])
ax = plt.subplot(rows,nc,nc*i+4)
ax.set_title('where they match')
plt.imshow((targ==pred)&(targ!=0), vmin=0, vmax=1)
plt.xticks([])
plt.yticks([])
ax = plt.subplot(rows,nc,nc*i+5)
ax.set_title('where they are different')
plt.imshow((targ!=pred)&(targ!=0), vmin=0, vmax=1)
plt.xticks([])
plt.yticks([])
fig.subplots_adjust(wspace=0.05, hspace=0.05)
###Output
_____no_output_____
###Markdown
Inference on the test set
###Code
field_ids_list = []
for f in data.test_ds.items:
field_id = pkl.load(open(str(f).replace('feat','targ'),'rb'))
field_ids_list.append(field_id)
field_ids_arr = np.stack(field_ids_list).squeeze()
preds_no_zero = (preds[:, 1:10, ...]).numpy()
field_ids = np.unique(field_ids_arr)
preds_list = list()
for fid in field_ids[1:]: #exclude 0
prob_dic = {'Field_Id': fid}
preds = [np.median(preds_no_zero[:,c,...][field_ids_arr==fid]) for c in range(9)]
probs = np.exp(preds)/sum(np.exp(preds)) # take
for i, p in enumerate(probs):
prob_dic[f'crop_id_{i+1}'] = p
preds_list.append(prob_dic)
preds_df = pd.DataFrame(preds_list)
preds_df.to_csv(output_path/'submission.csv',index=False)
###Output
_____no_output_____ |
notebooks/LGBM_usage.ipynb | ###Markdown
Hyperparameters Tuning
###Code
### HYPERPARAM TUNING WITH GRID-SEARCH ###
model = BoostSearch(clf_lgbm, param_grid=param_grid)
model.fit(X_clf_train, y_clf_train, eval_set=[(X_clf_valid, y_clf_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.best_params_, model.best_score_
(model.score(X_clf_valid, y_clf_valid),
model.predict(X_clf_valid).shape,
model.predict_proba(X_clf_valid).shape)
### HYPERPARAM TUNING WITH RANDOM-SEARCH ###
model = BoostSearch(
regr_lgbm, param_grid=param_dist,
n_iter=8, sampling_seed=0
)
model.fit(X_regr_train, y_regr_train, eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.best_params_, model.best_score_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
### HYPERPARAM TUNING WITH HYPEROPT ###
model = BoostSearch(
regr_lgbm, param_grid=param_dist_hyperopt,
n_iter=8, sampling_seed=0
)
model.fit(
X_regr_train, y_regr_train, trials=Trials(),
eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0
)
model.estimator_, model.best_params_, model.best_score_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
###Output
_____no_output_____
###Markdown
Features Selection
###Code
### BORUTA ###
model = BoostBoruta(clf_lgbm, max_iter=200, perc=100)
model.fit(X_clf_train, y_clf_train, eval_set=[(X_clf_valid, y_clf_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.n_features_
(model.score(X_clf_valid, y_clf_valid),
model.predict(X_clf_valid).shape,
model.transform(X_clf_valid).shape,
model.predict_proba(X_clf_valid).shape)
### RECURSIVE FEATURE ELIMINATION (RFE) ###
model = BoostRFE(regr_lgbm, min_features_to_select=1, step=1)
model.fit(X_regr_train, y_regr_train, eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.n_features_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.transform(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
### RECURSIVE FEATURE ADDITION (RFA) ###
model = BoostRFA(regr_lgbm, min_features_to_select=1, step=1)
model.fit(X_regr_train, y_regr_train, eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.n_features_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.transform(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
###Output
_____no_output_____
###Markdown
Features Selection with SHAP
###Code
### BORUTA SHAP ###
model = BoostBoruta(
clf_lgbm, max_iter=200, perc=100,
importance_type='shap_importances', train_importance=False
)
model.fit(X_clf_train, y_clf_train, eval_set=[(X_clf_valid, y_clf_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.n_features_
(model.score(X_clf_valid, y_clf_valid),
model.predict(X_clf_valid).shape,
model.transform(X_clf_valid).shape,
model.predict_proba(X_clf_valid).shape)
### RECURSIVE FEATURE ELIMINATION (RFE) SHAP ###
model = BoostRFE(
regr_lgbm, min_features_to_select=1, step=1,
importance_type='shap_importances', train_importance=False
)
model.fit(X_regr_train, y_regr_train, eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.n_features_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.transform(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
### RECURSIVE FEATURE ADDITION (RFA) SHAP ###
model = BoostRFA(
regr_lgbm, min_features_to_select=1, step=1,
importance_type='shap_importances', train_importance=False
)
model.fit(X_regr_train, y_regr_train, eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.n_features_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.transform(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
###Output
_____no_output_____
###Markdown
Hyperparameters Tuning + Features Selection
###Code
### HYPERPARAM TUNING WITH GRID-SEARCH + BORUTA ###
model = BoostBoruta(clf_lgbm, param_grid=param_grid, max_iter=200, perc=100)
model.fit(X_clf_train, y_clf_train, eval_set=[(X_clf_valid, y_clf_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.best_params_, model.best_score_, model.n_features_
(model.score(X_clf_valid, y_clf_valid),
model.predict(X_clf_valid).shape,
model.transform(X_clf_valid).shape,
model.predict_proba(X_clf_valid).shape)
### HYPERPARAM TUNING WITH RANDOM-SEARCH + RECURSIVE FEATURE ELIMINATION (RFE) ###
model = BoostRFE(
regr_lgbm, param_grid=param_dist, min_features_to_select=1, step=1,
n_iter=8, sampling_seed=0
)
model.fit(X_regr_train, y_regr_train, eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.best_params_, model.best_score_, model.n_features_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.transform(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
### HYPERPARAM TUNING WITH HYPEROPT + RECURSIVE FEATURE ADDITION (RFA) ###
model = BoostRFA(
regr_lgbm, param_grid=param_dist_hyperopt, min_features_to_select=1, step=1,
n_iter=8, sampling_seed=0
)
model.fit(
X_regr_train, y_regr_train, trials=Trials(),
eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0
)
model.estimator_, model.best_params_, model.best_score_, model.n_features_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.transform(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
###Output
_____no_output_____
###Markdown
Hyperparameters Tuning + Features Selection with SHAP
###Code
### HYPERPARAM TUNING WITH GRID-SEARCH + BORUTA SHAP ###
model = BoostBoruta(
clf_lgbm, param_grid=param_grid, max_iter=200, perc=100,
importance_type='shap_importances', train_importance=False
)
model.fit(X_clf_train, y_clf_train, eval_set=[(X_clf_valid, y_clf_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.best_params_, model.best_score_, model.n_features_
(model.score(X_clf_valid, y_clf_valid),
model.predict(X_clf_valid).shape,
model.transform(X_clf_valid).shape,
model.predict_proba(X_clf_valid).shape)
### HYPERPARAM TUNING WITH RANDOM-SEARCH + RECURSIVE FEATURE ELIMINATION (RFE) SHAP ###
model = BoostRFE(
regr_lgbm, param_grid=param_dist, min_features_to_select=1, step=1,
n_iter=8, sampling_seed=0,
importance_type='shap_importances', train_importance=False
)
model.fit(X_regr_train, y_regr_train, eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0)
model.estimator_, model.best_params_, model.best_score_, model.n_features_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.transform(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
### HYPERPARAM TUNING WITH HYPEROPT + RECURSIVE FEATURE ADDITION (RFA) SHAP ###
model = BoostRFA(
regr_lgbm, param_grid=param_dist_hyperopt, min_features_to_select=1, step=1,
n_iter=8, sampling_seed=0,
importance_type='shap_importances', train_importance=False
)
model.fit(
X_regr_train, y_regr_train, trials=Trials(),
eval_set=[(X_regr_valid, y_regr_valid)], early_stopping_rounds=6, verbose=0
)
model.estimator_, model.best_params_, model.best_score_, model.n_features_
(model.score(X_regr_valid, y_regr_valid),
model.predict(X_regr_valid).shape,
model.transform(X_regr_valid).shape,
model.predict(X_regr_valid, pred_contrib=True).shape)
###Output
_____no_output_____
###Markdown
CUSTOM EVAL METRIC SUPPORT
###Code
from sklearn.metrics import roc_auc_score
def AUC(y_true, y_hat):
return 'auc', roc_auc_score(y_true, y_hat), True
model = BoostRFE(
LGBMClassifier(n_estimators=150, random_state=0, metric="custom"),
param_grid=param_grid, min_features_to_select=1, step=1,
greater_is_better=True
)
model.fit(
X_clf_train, y_clf_train,
eval_set=[(X_clf_valid, y_clf_valid)], early_stopping_rounds=6, verbose=0,
eval_metric=AUC
)
###Output
8 trials detected for ('learning_rate', 'num_leaves', 'max_depth')
trial: 0001 ### iterations: 00028 ### eval_score: 0.97581
trial: 0002 ### iterations: 00016 ### eval_score: 0.97514
trial: 0003 ### iterations: 00015 ### eval_score: 0.97574
trial: 0004 ### iterations: 00032 ### eval_score: 0.97549
trial: 0005 ### iterations: 00075 ### eval_score: 0.97551
trial: 0006 ### iterations: 00041 ### eval_score: 0.97597
trial: 0007 ### iterations: 00076 ### eval_score: 0.97592
trial: 0008 ### iterations: 00060 ### eval_score: 0.97539
###Markdown
CATEGORICAL FEATURE SUPPORT
###Code
categorical_feature = [0,1,2]
X_clf_train[:,categorical_feature] = (X_clf_train[:,categorical_feature]+100).clip(0).astype(int)
X_clf_valid[:,categorical_feature] = (X_clf_valid[:,categorical_feature]+100).clip(0).astype(int)
### MANUAL PASS categorical_feature WITH NUMPY ARRAYS ###
model = BoostRFE(clf_lgbm, param_grid=param_grid, min_features_to_select=1, step=1)
model.fit(
X_clf_train, y_clf_train,
eval_set=[(X_clf_valid, y_clf_valid)], early_stopping_rounds=6, verbose=0,
categorical_feature=categorical_feature
)
X_clf_train = pd.DataFrame(X_clf_train)
X_clf_train[categorical_feature] = X_clf_train[categorical_feature].astype('category')
X_clf_valid = pd.DataFrame(X_clf_valid)
X_clf_valid[categorical_feature] = X_clf_valid[categorical_feature].astype('category')
### PASS category COLUMNS IN PANDAS DF ###
model = BoostRFE(clf_lgbm, param_grid=param_grid, min_features_to_select=1, step=1)
model.fit(X_clf_train, y_clf_train, eval_set=[(X_clf_valid, y_clf_valid)], early_stopping_rounds=6, verbose=0)
###Output
8 trials detected for ('learning_rate', 'num_leaves', 'max_depth')
trial: 0001 ### iterations: 00029 ### eval_score: 0.2036
trial: 0002 ### iterations: 00030 ### eval_score: 0.2034
trial: 0003 ### iterations: 00027 ### eval_score: 0.20617
trial: 0004 ### iterations: 00024 ### eval_score: 0.20003
trial: 0005 ### iterations: 00060 ### eval_score: 0.20332
trial: 0006 ### iterations: 00063 ### eval_score: 0.20329
trial: 0007 ### iterations: 00054 ### eval_score: 0.20136
trial: 0008 ### iterations: 00052 ### eval_score: 0.19959
|
macro-hw3-2.ipynb | ###Markdown
Note:I've increased the length of the process to 1000 to more clearly show the trend.
###Code
N = 1000
Β = 0.99
α = 1/3
ρ = 0.9
Χ = 1.
ϵ = np.random.randn(N)
_ = simulate(α, Β, Χ, ρ, ϵ, N)
# The time series follows the random shocks but remains stable
ρ = 0.1
_ = simulate(α, Β, Χ, ρ, ϵ, N)
# This time series follows the random shocks and also remains stable,
# but is in a tighter range than the previous time series
ρ = 1.01
_ = simulate(α, Β, Χ, ρ, ϵ, N)
# Here we see the time series diverge
ρ = 1.
_ = simulate(α, Β, Χ, ρ, ϵ, N)
#_ = simulate(α, Β, Χ, ρ, np.random.randn(N), N)
# even with t=1000, we don't always see this time series remains stable, but it should
# the standard deviation of the time series has once again increased.
ρ = 1.
max_iter = 1000
N_loop = 100
plt.hist([np.std(simulate(α, Β, Χ, ρ, np.random.randn(N_loop), N_loop, show_plot=False)) for i in range(max_iter)], bins=25);
plt.show()
N_loop = 1000
plt.hist([np.std(simulate(α, Β, Χ, ρ, np.random.randn(N_loop), N_loop, show_plot=False)) for i in range(max_iter)], bins=25);
plt.show()
n = 10000
γ = α*((1.-Β**n)/(1.-Β))
print("I_t / C_t converges to {}".format(γ))
ρ = 1.
N = 1000
Y = simulate(α, Β, Χ, ρ, ϵ, N)
C = [Y[2:][t]/(1+γ) for t in range(N)]
I = [Y[2:][t] - C[t] for t in range(N)]
plt.plot(C)
plt.show()
plt.plot(I)
plt.show()
print("σ_c: {}\nσ_i: {}\nσ_y: {}\n".format(np.std(C), np.std(I), np.std(Y[2:])))
print("The ratio of the standard deviations is {} and {} respectively".format(np.std(C)/np.std(Y[2:]), np.std(I)/np.std(Y[2:])))
print("This sums to 1 ({} + {} = {}).".format(np.std(C)/np.std(Y[2:]),
np.std(I)/np.std(Y[2:]),
np.round(np.std(C)/np.std(Y[2:])+np.std(I)/np.std(Y[2:]), 2)))
ρ = 1.01
_ = simulate(α, Β, Χ, ρ, ϵ, N)
# Here we see the time series diverge
###Output
_____no_output_____ |
examples/AutoML-basics-example.ipynb | ###Markdown
Onepanel AutoML 0.1.5Onepanel AutoML is a framework that allows automated machine learning pipelines to be built easily and declaratively, running them locally (current implementation) or on a cluster (TBD).The framework can be easily extened with new features. Currently AutoML is integrated with popular open-source machine learning libraries Scikit-learn and Hyperopt.
###Code
import sys
# AutoML uses Python's logging module
import logging
# Various sklearn models and metrics
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_auc_score
from xgboost.sklearn import XGBClassifier
# AutoML Classes
from automl.pipeline import LocalExecutor, Pipeline, PipelineStep, PipelineData
from automl.data.dataset import Dataset
from automl.model import ModelSpace, CV, Validate, ChooseBest
from automl.hyperparam.templates import (random_forest_hp_space,
knn_hp_space, svc_kernel_hp_space,
grad_boosting_hp_space,
xgboost_hp_space)
from automl.feature.generators import FormulaFeatureGenerator
from automl.feature.selector import FeatureSelector
from automl.hyperparam.optimization import Hyperopt
from automl.combinators import RandomChoice
logging.basicConfig(level=logging.INFO)
# Create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Create STDERR handler
handler = logging.StreamHandler(sys.stderr)
# ch.setLevel(logging.DEBUG)
# Create formatter and add it to the handler
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Set STDERR handler as the only handler
logger.handlers = [handler]
###Output
_____no_output_____
###Markdown
Core conceptsAutoML follows code-is-data and data-is-code philosophy. You define automated machine learning pipelines as data structures that can be executed later.Key concepts in AutoML are:* `Pipeline` - a machine learning pipeline. It executes various steps inside the pipeline passing each step output as an input to the next step* `PipelineStep` - all `Pipeline`s consist of steps. AutoML provide lots of several different steps out of the box* `Executor` - executes a pipeline. Currently AutoML provides `LocalExecutor` which runs pipeline locally. Future versions will have `DistributedExecutor` built-inAutoML can easily be extended by implementing `PipelineStep`s. Next, we will use various built-in `PipelineStep`s to create an automated classification pipeline.
###Code
# Let's create a dataset first
x, y = make_classification(
n_samples=1000,
n_features=40,
n_informative=2,
n_redundant=10,
flip_y=0.05)
# We will use AutoML Dataset class to wrap our data
# into structure that can be understood by AutoML
data = Dataset(x, y)
# Next, we define our ModelSpace. ModelSpace is initialized by a list of tuples.
# First element of each tuple should be an sklearn-like estimator with fit method
# The second one is model parameter dictionary. Here we do not define parameters
# explicitly, but use hyperparameter templates from AutoML. Those templates can be
# used later by Hyperopt step to find best model parameters automatically
model_list = [
(RandomForestClassifier, random_forest_hp_space()),
(KNeighborsClassifier, knn_hp_space(lambda key: key)),
(XGBClassifier, xgboost_hp_space())
]
# Create executor, initialize it with our classification dataset
# and set total number of epochs to 2 (the pipeline will be run two times in a row).
# We can load any pipeline into executor using << operator like below:
context, pipeline_data = LocalExecutor(data, epochs=2) << \
(Pipeline() # Here we define the pipeline. Steps can be added to pipeline using >> operator
# First we define our ModelSpace. We wrap it with PipelineStep class
# and set initializer=True so that ModelSpace step will be run only at the first epoch
>> PipelineStep('model space', ModelSpace(model_list), initializer=True)
# But we are not obliged to wrap all steps with PipelineStep.
# This will be done automatically if we do not need to set any special parameters
# We use FormulaFeatureGenerator to create arithmetic combinations of features from the dataset
>> FormulaFeatureGenerator(['+', '-', '*'])
# Next we use Hyperopt to find the best combination of hyperparameters for each model
# We use test set validation with ROC AUC metric as a score function.
# CV could be used instead of Validate to perform cross-validation
>> Hyperopt(Validate(test_size=0.1, metrics=roc_auc_score), max_evals=5)
# Then we choose the best performing model we found
>> ChooseBest(1)
# And select 10 best features
>> FeatureSelector(10))
for result in pipeline_data.return_val:
print(result.model, result.score)
print(pipeline_data.dataset.data.shape)
###Output
LocalExecutor - INFO - Framework version: v0.1.5
LocalExecutor - INFO - Starting AutoML Epoch #1
LocalExecutor - INFO - Dataset columns: ['base_feature_0', 'base_feature_1', 'base_feature_2', 'base_feature_3', 'base_feature_4', 'base_feature_5', 'base_feature_6', 'base_feature_7', 'base_feature_8', 'base_feature_9', 'base_feature_10', 'base_feature_11', 'base_feature_12', 'base_feature_13', 'base_feature_14', 'base_feature_15', 'base_feature_16', 'base_feature_17', 'base_feature_18', 'base_feature_19', 'base_feature_20', 'base_feature_21', 'base_feature_22', 'base_feature_23', 'base_feature_24', 'base_feature_25', 'base_feature_26', 'base_feature_27', 'base_feature_28', 'base_feature_29', 'base_feature_30', 'base_feature_31', 'base_feature_32', 'base_feature_33', 'base_feature_34', 'base_feature_35', 'base_feature_36', 'base_feature_37', 'base_feature_38', 'base_feature_39']
0%| | 0/5 [00:00<?, ?it/s]LocalExecutor - INFO - Running step 'model space'
LocalExecutor - INFO - Running step 'FormulaFeatureGenerator'
FormulaFeatureGenerator - INFO - Generated new features. Old feature number - 40, new feature number - 41
LocalExecutor - INFO - Running step 'Hyperopt'
Hyperopt - INFO - {'n_estimators': <hyperopt.pyll.base.Apply object at 0x10ca16b70>, 'max_features': <hyperopt.pyll.base.Apply object at 0x10ca16f28>, 'max_depth': <hyperopt.pyll.base.Apply object at 0x10ca17278>, 'min_samples_split': 2, 'min_samples_leaf': <hyperopt.pyll.base.Apply object at 0x10ca17630>, 'bootstrap': <hyperopt.pyll.base.Apply object at 0x10ca17780>, 'oob_score': False, 'n_jobs': 1, 'random_state': <hyperopt.pyll.base.Apply object at 0x10ca17898>, 'verbose': False, 'criterion': 'gini'}
Hyperopt - INFO - Running hyperparameter optimization for <class 'sklearn.ensemble.forest.RandomForestClassifier'>
hyperopt.tpe - INFO - tpe_transform took 0.004116 seconds
hyperopt.tpe - INFO - TPE using 0 trials
###Markdown
Extending AutoMLFirst, let's look at how `PipelineStep`s can be created by creating a simple hello world pipeline.
###Code
# Let's create a simple pipeline
pipeline = Pipeline() >> PipelineStep('hello_step', lambda inp, context: print("Hello!"))
# And execute it locally
LocalExecutor() << pipeline
###Output
LocalExecutor - INFO - Framework version: v0.1.5
LocalExecutor - INFO - Starting AutoML Epoch #1
0%| | 0/1 [00:00<?, ?it/s]LocalExecutor - INFO - Running step 'hello_step'
100%|██████████| 1/1 [00:00<00:00, 301.92it/s]
###Markdown
As you can see steps can be added to a pipeline using `>>` operator. A pipeline may contain any number of steps. Any `PipelineStep` is constructed by passing a step name and a `callable` which will be executed when `Pipeline` is run by an `Executor`. It's important to mention that all `Pipeline`s are lazy and all steps inside will be executed only when `Pipeline` is loaded into `Executor.``PipelineStep` syntax is pretty verbose, but it can be simplified. You can pass any `callable` to a pipeline and it will be wrapped into `PipelineStep` automatically. Step function should have two arguments: `input` and `context`. `input` must be loaded through executor parameters, `context` contains global variables, available for each step. If `PipelineStep` returns any value, it should wrap it into `PipelineData` class. `input` passed to an `Executor` is wrapped to `PipelineData` automatically
###Code
# We create two steps that add 1 and 2 to input data
plus_one = PipelineStep('plus_one', lambda inp, context: inp.dataset + 1)
plus_two = PipelineStep('plus_two', lambda inp, context: inp.dataset + 2)
LocalExecutor(0) << \
(Pipeline()
# We use RandomChoice combinator to choose randomly between two steps while executing the pipeline
>> RandomChoice([plus_one, plus_two]))
###Output
LocalExecutor - INFO - Framework version: v0.1.5
LocalExecutor - INFO - Starting AutoML Epoch #1
0%| | 0/1 [00:00<?, ?it/s]LocalExecutor - INFO - Running step 'RandomChoice'
100%|██████████| 1/1 [00:00<00:00, 996.98it/s]
###Markdown
It is recommended to create complex callables for `PipelineStep`s as classes:
###Code
class ComplexStep:
def __init__(self):
print("Initializing ComplexStep")
def __call__(self, inp, context):
print(inp)
return inp
LocalExecutor() << (Pipeline() >> ComplexStep())
###Output
LocalExecutor - INFO - Framework version: v0.1.5
LocalExecutor - INFO - Starting AutoML Epoch #1
0%| | 0/1 [00:00<?, ?it/s]LocalExecutor - INFO - Running step 'ComplexStep'
100%|██████████| 1/1 [00:00<00:00, 701.39it/s] |
EDA_joy/SARC/sarc.ipynb | ###Markdown
The json is larger, so we use ijson here to load the data.
###Code
with open('comments.json', 'r') as f:
comments = next(ijson.items(f, ''))
###Output
_____no_output_____
###Markdown
Take a look at the variables.
###Code
[i for i in comments['7u4r6']]
###Output
_____no_output_____
###Markdown
There are total of 12704751 comments.
###Code
len(comments)
###Output
_____no_output_____
###Markdown
Take a look at the first comment
###Code
comments['7u4r6']
###Output
_____no_output_____
###Markdown
Partition data into small json files. Each json file has around 1000000 data, which are easier to manipulate.
###Code
# for i in np.arange(len(comments),1000000):
d = dict(itertools.islice(comments.items(), 11000000, len(comments)))
with open('comments' + str(12) + '.json', 'w') as f:
f.write("%s\n" % d)
###Output
_____no_output_____
###Markdown
Take the first 1M comments to conduct EDA.
###Code
d = dict(itertools.islice(comments.items(), 1000000))
df = pd.DataFrame.from_dict(d).T
df.head()
df.describe()
###Output
_____no_output_____
###Markdown
Lowercase
###Code
df['text'] = df['text'].apply(lambda x: x.lower())
df.head()
###Output
_____no_output_____
###Markdown
Text length
###Code
df['len'] = df['text'].apply(lambda x: len(x.split(" ")))
df.head()
###Output
_____no_output_____
###Markdown
remove punctuation
###Code
import string
for i in string.punctuation:
df['text'] = df['text'].apply(lambda x: x.replace(i, ""))
df
df.sort_values('ups')
df.sort_values('downs')
###Output
_____no_output_____
###Markdown
Scores are calculated by ups - downs
###Code
df.sort_values('score')
df.sort_values('score')['text'][2]
###Output
_____no_output_____
###Markdown
A column to indicate positive/negative score
###Code
df['sign'] = df['score'].apply(lambda x: 1 if x >= 0 else -1)
###Output
_____no_output_____
###Markdown
import sentiment table
###Code
sen = pd.read_csv('vader_lexicon.txt',
sep='\t',
usecols=[0, 1],
header=None,
names=['token', 'polarity'],
index_col='token'
)
sen.head()
tidy_format = (
df['text']
.str.split(expand=True)
.stack()
.reset_index(level=1)
.rename(columns={'level_1': 'num', 0: 'word'})
)
tidy_format.head()
###Output
_____no_output_____
###Markdown
calculate the sentiment score for the data
###Code
df['polarity'] = (
tidy_format
.merge(sen, how='left', left_on='word', right_index=True)
.reset_index()
.loc[:, ['index', 'polarity']]
.groupby('index')
.sum()
.fillna(0)
)
df.head()
df.groupby('sign').describe()
df.sort_values('score')
df.to_csv('sample_df.csv')
###Output
_____no_output_____ |
Programming for Data Analytics Project.ipynb | ###Markdown
Problem statementFor this project you must create a data set by simulating a real-world phenomenon of your choosing. You may pick any phenomenon you wish – you might pick one that is of interest to you in your personal or professional life. Then, rather than collect data related to the phenomenon, you should model and synthesise such data using Python. We suggest you use the numpy.random package for this purpose. Specifically, in this project you should:* Choose a real-world phenomenon that can be measured and for which you could collect at least one-hundred data points across at least four different variables.* Investigate the types of variables involved, their likely distributions, and their relationships with each other.* Synthesise/simulate a data set as closely matching their properties as possible.* Detail your research and implement the simulation in a Jupyter notebook – the data set itself can simply be displayed in an output cell within the notebook.Note that this project is about simulation – you must synthesise a data set. Some students may already have some real-world data sets in their own files. It is okay to base your synthesised data set on these should you wish (please reference it if you do), but the main task in this project is to create a synthesised data set. Choosing the Phenomenon to Simulate Data forHaving spent some time trying to think of a phenomenon that could be convincingly modelled by using one's only own judgement, and having considered, for example, creating a dataset similar to [Verizon's data breach investigation report datasets] (https://enterprise.verizon.com/resources/reports/dbir/), I concluded that it would be best to lessen as much as possible the severity of the suspension of disbelief required to take the dataset seriously. In this vein, I decided to model a very uncomplicated phenomenon, namely, student satisfaction for a module in such a course as GMIT's Higher Diploma in Data Analytics (at least I think that's what it's still called - it might have been changed to 'Higher Diploma in Data Fabrication' considering the nature of this assessment). Approach to Simulating the DataTo create the dataset, I imagined that a thousand students had completed a survey with seven questions:1. What is your age?2. How satisfied were you with your lecturer's engagement with you and the course?3. How satisfied were you with the structure and pace of the module?4. Did you find the module-content interesting?5. How would you rate the difficulty of the module?6. Were the assessments of the module appropriate to the module content?7. How satisfied were you with the module overall?In the case of the first question, the options available to the respondent were:* (20-25) * (25-30) * (30-35) * (35-40) * (40-50) * (50-65). For the other six questions, the respondent was given five possible answers to choose from, namely: * (very unsatisfied/easy/unappropriate) * (unsatisfied/easy/unappropriate) * (neutral) * (satisfied/difficult/appropriate) * (very satisfied/difficult/appropriate).Because we are in a sense 'reverse engineering' a datasest, the easiest way to create the dataset would likely be to first determine the distribution for the variable that could be said to be the target variable, i.e. a variable that is more determined than determining in relation to the other variables in the dataset. Of course, there is not necessarily just one target variable in every dataset, and the lines being target and non-target (i.e. dependent) variables can often blur. However, in our dataset, there happens to be one main, target variable.In our dataset, the target variable would of course be the answer to the seventh question, 'how satisfied were you with the module overall'. I assumed a distribution approximate to the normal distribution for this variable. I say 'approximate' here because of course all the variables in this dataset are discrete, and the actual normal distribution is continuous. Thus, to create the values for the 'overall satisfaction' variable, I used numpy.random.normal() with a mean of three and standard deviation of 1, rounding each result to the nearest integer, and replacing any values less than zero with zero, and any greater than five with five. The fact that we are removing the tails of the distribution of course takes away from the 'normal-ness' of the distribution, but that is acceptable for our purpose, as this is a discrete variable in any case. Once the target variable's distribution has been determined, its dependent variable's distribution and values can more easily be determined, particularly in our case when all the depentable variables have the say scale (very un-, un-, neutral, affirmative, very affirmative). The dependent variables (the only justification here is my own judgement - these are simulated, hypothetical relationships) are the answers to questions, 2 ('engagement'), 3 ('structure'), 4 ('interesting') and 6 ('assessment'). If we assume that those values will also be approximately normally distributed, then for each datapoint, we can add more or less noise to the target variable value and take that 'noisened' value as the dependent value, e.g. the dependent variables more closely correlated to the target variable will have less noise. We can create the noise by generating a normal distribution with only one datapoint, taking the datapoint's target variable value ('overall satisfaction') as the mean, and the standard deviation as the noise. This will account for the relationships between the target variable and each of its dependent variables.The other variables in this dataset are 'age' and 'difficulty', and they actually comprise a target and dependent variable set themselves, with age determining difficulty, i.e. older age brackets finding the module more difficult. This time we set probabilities for each age bracket, and select an age bracket based on those probabilities for each data point, using numpy.random.choice. Then the the value for 'difficulty' for each datapoint is determined using the age-bracket. Each difficulty value is taken from a normal distribution, with the mean of that distribution *depending* on the age-bracket, i.e. higher means for higher age-brackets.That concludes how I simulated the data. Below is the code for performing the simulation
###Code
import numpy as np
import pandas as pd
from collections import Counter
# age brackets
ages = ["20-25", "25-30", "30-35", "35-40", "40-50", "50-65"]
# corresponding probabilities
ageProbs = [0.2,0.25,0.3,0.15, 0.06, 0.04]
# select 1000 age brackets according to probabilities
age = np.random.choice(ages, 1000, replace=True, p=ageProbs)
print(Counter(age))
# generate approximately normal distribution of satisfaction answers using list comprehension
# good overview of list comprehensions here: https://appdividend.com/2020/05/13/python-list-replace-replace-string-integer-in-list/
satisfaction = [1 if x < 1 else x for x in [5 if x > 5 else x for x in [int(x.round()) for x in np.random.normal(3, 1, 1000)]]]
# I have incorporated the below list comprehensions into the above
#removeOverFives = [5 if x > 5 else x for x in overallSatisfaction]
#removeUnderZeros = [1 if x < 1 else x for x in overallSatisfaction]
difficulty = []
# greater mean difficulty score for higher age brackets
# difficult is dependent on age, but nothing else
agesDiffMeans = {ages[0]:3, ages[1]:3, ages[2]:3, ages[3]:3.5, ages[4]:3.75, ages[5]:4}
for y in age:
difficulty.append([1 if x < 1 else x for x in [5 if x > 5 else x for x in [int(x.round()) for x in np.random.normal(agesDiffMeans[y], 1, 1)]]][0])
# create a dictionary to store the data and column names
# for now we will only include the remaining data to be simuluated
# this way we can loop through the dictionary to simulate the data
# we will include standard deviation as an extra key value pair for each item
# as this will needed to created the data
data = {}
data.update({"engagement":{"data":np.array([]), "std": 0.8}})
data.update({"structure":{"data":np.array([]), "std": 1}})
data.update({"content":{"data":np.array([]), "std": 0.3}})
data.update({"assessment":{"data":np.array([]), "std": 0.6}})
for key, value in data.items():
value['data'] = np.array([1 if x < 1 else x for x in [5 if x > 5 else x for x in [int(x.round()) for x in [x + np.random.normal(0, value['std'], 1) for x in satisfaction]]]])
# we don't need the standard deviation anymore, so we get rid of it
# we change the dictionary to a simpler format: 'column name':data
# this will allow us to use the dict to create a Pandas DataFrame
data[key] = [x for x in value['data']]
# now add in the data we had already calculated
data.update({"difficulty":difficulty})
data.update({"age":age})
data.update({"satisfaction":satisfaction})
# print out the frequencies for each column, using collections.Counter
for key, value in data.items():
print(f"{key} counts are: {Counter(data[key])}")
# create a DataFrame for easy display
df = pd.DataFrame.from_dict(data)
# display at least 30 rows
pd.set_option('display.min_rows', 30)
df
###Output
Counter({'30-35': 299, '25-30': 264, '20-25': 195, '35-40': 150, '40-50': 55, '50-65': 37})
engagement counts are: Counter({3: 321, 2: 216, 4: 215, 1: 134, 5: 114})
structure counts are: Counter({3: 282, 4: 221, 2: 213, 1: 155, 5: 129})
content counts are: Counter({3: 352, 2: 275, 4: 232, 5: 72, 1: 69})
assessment counts are: Counter({3: 331, 4: 240, 2: 228, 1: 117, 5: 84})
difficulty counts are: Counter({3: 371, 4: 277, 2: 221, 5: 90, 1: 41})
age counts are: Counter({'30-35': 299, '25-30': 264, '20-25': 195, '35-40': 150, '40-50': 55, '50-65': 37})
satisfaction counts are: Counter({3: 389, 2: 263, 4: 229, 1: 62, 5: 57})
###Markdown
Analyzing the Data When analyzing any dataset, one doesn't begin 'blind.' One almost always has a certain understanding of what variables one is working with, and likely which variables are going to be most interesting. In this dataset, it is of course the 'satisfaction' variable values that is going to be the most important to look at, or rather, how those values are related to the other variables. However, before analyzing the relationships, the first to do would be to look at each variable individually, i.e. plot histograms for each variable to see how their valuesare distributed. Step 1: Use Histograms to Observe Variables' Frequency Distributions
###Code
import matplotlib.pyplot as plt
%matplotlib inline
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(nrows=3, ncols=3, figsize=(15,9))
axes = [ax4, ax5, ax6, ax7, ax8, ax9, ax2]
for column, axis in zip(df.columns, axes):
labels, counts = np.unique(df[column], return_counts=True)
axis.bar(labels, counts, align='center', edgecolor='k')
axis.set_xlabel(column)
fig.delaxes(ax1)
fig.delaxes(ax3)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
The following is immediately obvious from looking at the histograms:* Except age, all of the variables have a bell shaped distribution with 3 being most frequent, followed by 2 or 4, and lastly 1 or 5. They appear to be symmetric without significant skew.* The Age variable is not bell shaved, but heavily skewed heavily towards the left. It still one reasonably central upper limit, however.* The Satisfaction and Content histograms are very similar in shape, indicating a strong correlation between the two perhaps.* The Difficulty histogram appears to be skewed somewhat toward the right. Apart from age, it is the most skewed.* Engagement and Structure have higher frequencies for 1 and 5, i.e. low kurtosis, or 'fatter' tails.Once we have initially understand the distribution of each of the variables, we should move on to examining the relationships between the variables, particularly where relationships have been suggested through the histograms, such as between Satisfaction and Content. Of course, one can perform further analysis of each distribution, by performing kernel density estimates and performing tests to check if the variable samples are likely to have been drawn from particular distributions, but the relative simplicity of the survey that these data points are being drawn renders such in-depth analysis superfluous. Step 2: Visual Analysis of Relationships between VariablesWhen performing visual analysis of variables' relationships, Seaborn's pairplot is usually the first port of call. However, in our case, because there are only five possible values for each variable (apart from age), most of the datapoints will be overlayed on top of each other, and as the density of each point will not be displayed, we will likely be faced with a five by five grid of twenty five points, hardly revealing of anything. For the sake of showing this, I will call pairplot() on the dataframe, using Age as the hue. One thing to note, however, is that there are still some discernable suggestions of relationships, such as between Content and Satisfaction, as above. Note also the large differences in shapes of the kernel density estimates of the Difficulty variable for each Age variable.
###Code
import seaborn as sb
sb.pairplot(df, hue='age')
plt.show()
###Output
_____no_output_____
###Markdown
Step 3: Linear Regression to Analyze Relationships Between VariablesBecause the relationships between our discrete distributions are not given to being easily visually analyzed through plotting, the next step would be to analyze the relationships non-visually, i.e. by calculating statistics for the relationships, such as the accuracy and error rates of linear regression models used to predict one variable based on another. This can easily be done using sklearn.A reasonable approach to this would be to first create a multiple linear regression model by using all the variables except Satisfaction and Age as our inputs, and having Satisfaction as the output to be predicted based on those inputs. We prioritize this model, as intuition suggests that the value of a datapoint's Satisfaction is determined by those other variables. As a measure of whether Satisfaction can accurately be predicted/determined by those inputs, we could calculate the accuracy of the model (how often the prediction was correct), and two common measures of a predictive model's error rate: mean absolute error, which is the mean of the absolute value of the error for each datapoint, and root mean squared error, which is the equivalent for error measuring as what standard deviation is for variance-measuring. All of these values are attributes of sklearn's linear regression model, once fitted.As to why I calculated two measures for error - there are advantages to either measure. Mean absolute error is intuitive, while root mean squared error is more amenable to mathematics and penalized severe errors heavily. There is a very clear introduction to the reasons why one might choose to evaluate a model's mean absolute error versus its root mean squared error [here] (https://medium.com/human-in-a-machine-world/mae-and-rmse-which-metric-is-better-e60ac3bde13d). To quote from the insightful conclusions to this article:> Taking the square root of the average squared errors has some interesting implications for RMSE. Since the errors are squared before they are averaged, the RMSE gives a relatively high weight to large errors. This means the RMSE should be more useful when large errors are particularly undesirable.>From an interpretation standpoint, MAE is clearly the winner. RMSE does not describe average error alone and has other implications that are more difficult to tease out and understand.On the other hand, one distinct advantage of RMSE over MAE is that RMSE avoids the use of taking the absolute value, which is undesirable in many mathematical calculations (not discussed in this article, another time…).There is clear introduction to using Linear Regression and calculating error values with sklearn [here] (https://www.freecodecamp.org/news/how-to-build-and-train-linear-and-logistic-regression-ml-models-in-python/).There is a 'relentlessly pruned' example of how to use sklearn to calculate acccuracy [here] (https://mahata.github.io/machine%20learning/2014/12/31/sklearn-accuracy_score/).Once we have calculated the above multiple linear regression model, another reasonable step would be to calculating the single linear regression models between each pair of variables, which in our case of 6 variables (discounting Age) is fifteen pairs. We can then list then in order of accuracy and error measures to understand which variables are most and least correlated.
###Code
# some code adapted from here:
# https://www.freecodecamp.org/news/how-to-build-and-train-linear-and-logistic-regression-ml-models-in-python/
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
# our dependent variables
x = df[['engagement', 'content', 'structure', 'assessment', 'difficulty']]
# our target variable
y = df['satisfaction']
# train our model on 0.7 of our data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3)
model = LinearRegression()
model.fit(x_train, y_train)
predictions = model.predict(x_test)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15,7))
axes[0].scatter(y_test, predictions)
axes[0].set_ylabel("Predicted Satisfaction Data")
axes[0].set_xlabel("Actual Satisfaction Data")
axes[1].scatter(y_test, predictions.round())
axes[1].set_xlabel("Actual Satisfaction Data")
plt.show()
print(f'The mean absolute error is {metrics.mean_absolute_error(y_test, predictions)}')
print(f'The root mean squared error is {np.sqrt(metrics.mean_squared_error(y_test, predictions))}')
print(f'The accuracy of our model is {metrics.accuracy_score(y_test, predictions.round())}')
variables = ['engagement', 'content', 'structure', 'assessment', 'difficulty', 'satisfaction']
rmseResults = {}
meaResults = {}
accResults = {}
for x in variables:
for y in variables:
if x == y:
continue
if variables.index(y) < variables.index(x):
continue
x_train, x_test, y_train, y_test = train_test_split(df[[x]], df[[y]], test_size = 0.3)
model = LinearRegression()
model.fit(x_train, y_train)
predictions = model.predict(x_test)
mea = metrics.mean_absolute_error(y_test, predictions)
rmse = np.sqrt(metrics.mean_squared_error(y_test, predictions))
accuracy = metrics.accuracy_score(y_test, predictions.round())
print(f'The model for "{x}" as the predictor of "{y}" resulted in the following measures of error:')
print(f'\tThe mean absolute error is {mea}')
print(f'\tThe root mean squared error is {rmse}')
print(f'\tThe accuracy is {accuracy}\n')
rmseResults.update({f"{x}-{y}": rmse})
meaResults.update({f"{x}-{y}": mea})
accResults.update({f"{x}-{y}": accuracy})
sortedResults = dict(sorted(rmseResults.items(), key=lambda item: item[1]))
print('The rankings for variable relationships according to the root mean squared error is as follows:')
for i, result in enumerate(sortedResults):
print(f'{i+1} - {result} - {sortedResults[result]}')
sortedResults = dict(sorted(meaResults.items(), key=lambda item: item[1]))
print('\nThe rankings for variable relationships according to the mean absolute error is as follows:')
for i, result in enumerate(sortedResults):
print(f'{i+1} - {result} - {sortedResults[result]}')
sortedResults = dict(sorted(accResults.items(), key=lambda item: item[1], reverse=True))
print('\nThe rankings for variable relationships according to their accuracy is as follows:')
for i, result in enumerate(sortedResults):
print(f'{i+1} - {result} - {sortedResults[result]}')
###Output
The model for "engagement" as the predictor of "content" resulted in the following measures of error:
The mean absolute error is 0.552433030836295
The root mean squared error is 0.6906652623985692
The accuracy is 0.5133333333333333
The model for "engagement" as the predictor of "structure" resulted in the following measures of error:
The mean absolute error is 0.9610907500026357
The root mean squared error is 1.161069691603746
The accuracy is 0.27666666666666667
The model for "engagement" as the predictor of "assessment" resulted in the following measures of error:
The mean absolute error is 0.6899435741957217
The root mean squared error is 0.8455513500990229
The accuracy is 0.38666666666666666
The model for "engagement" as the predictor of "difficulty" resulted in the following measures of error:
The mean absolute error is 0.7945956308081544
The root mean squared error is 0.9945554727158672
The accuracy is 0.38333333333333336
The model for "engagement" as the predictor of "satisfaction" resulted in the following measures of error:
The mean absolute error is 0.5019886417578182
The root mean squared error is 0.6258786734600016
The accuracy is 0.59
The model for "content" as the predictor of "structure" resulted in the following measures of error:
The mean absolute error is 0.724106294340977
The root mean squared error is 0.9243269768222441
The accuracy is 0.45666666666666667
The model for "content" as the predictor of "assessment" resulted in the following measures of error:
The mean absolute error is 0.4930673049442703
The root mean squared error is 0.6885853629183797
The accuracy is 0.5866666666666667
The model for "content" as the predictor of "difficulty" resulted in the following measures of error:
The mean absolute error is 0.796594912780658
The root mean squared error is 1.0075154652165135
The accuracy is 0.36666666666666664
The model for "content" as the predictor of "satisfaction" resulted in the following measures of error:
The mean absolute error is 0.1358780742828569
The root mean squared error is 0.24933397148075015
The accuracy is 0.9366666666666666
The model for "structure" as the predictor of "assessment" resulted in the following measures of error:
The mean absolute error is 0.7461815771622679
The root mean squared error is 0.9331577218342764
The accuracy is 0.37333333333333335
The model for "structure" as the predictor of "difficulty" resulted in the following measures of error:
The mean absolute error is 0.826592563659636
The root mean squared error is 1.0334242286941733
The accuracy is 0.39
The model for "structure" as the predictor of "satisfaction" resulted in the following measures of error:
The mean absolute error is 0.5521884393703052
The root mean squared error is 0.7250724861925315
The accuracy is 0.5233333333333333
The model for "assessment" as the predictor of "difficulty" resulted in the following measures of error:
The mean absolute error is 0.8120625165138716
The root mean squared error is 1.017438682073009
The accuracy is 0.37
The model for "assessment" as the predictor of "satisfaction" resulted in the following measures of error:
The mean absolute error is 0.42849660451310856
The root mean squared error is 0.5624964889851665
The accuracy is 0.62
The model for "difficulty" as the predictor of "satisfaction" resulted in the following measures of error:
The mean absolute error is 0.6988896229011836
The root mean squared error is 0.9335465623659756
The accuracy is 0.42
The rankings for variable relationships according to the root mean squared error is as follows:
1 - content-satisfaction - 0.24933397148075015
2 - assessment-satisfaction - 0.5624964889851665
3 - engagement-satisfaction - 0.6258786734600016
4 - content-assessment - 0.6885853629183797
5 - engagement-content - 0.6906652623985692
6 - structure-satisfaction - 0.7250724861925315
7 - engagement-assessment - 0.8455513500990229
8 - content-structure - 0.9243269768222441
9 - structure-assessment - 0.9331577218342764
10 - difficulty-satisfaction - 0.9335465623659756
11 - engagement-difficulty - 0.9945554727158672
12 - content-difficulty - 1.0075154652165135
13 - assessment-difficulty - 1.017438682073009
14 - structure-difficulty - 1.0334242286941733
15 - engagement-structure - 1.161069691603746
The rankings for variable relationships according to the mean absolute error is as follows:
1 - content-satisfaction - 0.1358780742828569
2 - assessment-satisfaction - 0.42849660451310856
3 - content-assessment - 0.4930673049442703
4 - engagement-satisfaction - 0.5019886417578182
5 - structure-satisfaction - 0.5521884393703052
6 - engagement-content - 0.552433030836295
7 - engagement-assessment - 0.6899435741957217
8 - difficulty-satisfaction - 0.6988896229011836
9 - content-structure - 0.724106294340977
10 - structure-assessment - 0.7461815771622679
11 - engagement-difficulty - 0.7945956308081544
12 - content-difficulty - 0.796594912780658
13 - assessment-difficulty - 0.8120625165138716
14 - structure-difficulty - 0.826592563659636
15 - engagement-structure - 0.9610907500026357
The rankings for variable relationships according to their accuracy is as follows:
1 - content-satisfaction - 0.9366666666666666
2 - assessment-satisfaction - 0.62
3 - engagement-satisfaction - 0.59
4 - content-assessment - 0.5866666666666667
5 - structure-satisfaction - 0.5233333333333333
6 - engagement-content - 0.5133333333333333
7 - content-structure - 0.45666666666666667
8 - difficulty-satisfaction - 0.42
9 - structure-difficulty - 0.39
10 - engagement-assessment - 0.38666666666666666
11 - engagement-difficulty - 0.38333333333333336
12 - structure-assessment - 0.37333333333333335
13 - assessment-difficulty - 0.37
14 - content-difficulty - 0.36666666666666664
15 - engagement-structure - 0.27666666666666667
|
FeatureExtractionModule/src/autoencoder_approach/Per task approach/NC/autoencoder_classifiers-NC-low-vs-high-no-TFv1.ipynb | ###Markdown
Classifiers - NC - low vs high complexity - no TFv1Exploring different classifiers with different autoencoders for the NC task. No contractive autoencoder because it needs TFv1 compatibility. Table of contents: autoencoders: [Undercomplete Autoencoder](Undercomplete-Autoencoder) [Sparse Autoencoder](Sparse-Autoencoder) [Deep Autoencoder](Deep-Autoencoder) classifiers: [Simple dense classifier](Simple-dense-classifier) [LSTM-based classifier](LSTM-based-classifier) [kNN](kNN) [SVC](SVC) [Random Forest](Random-Forest) [XGBoost](XGBoost)
###Code
import datareader # made by the previous author for reading the collected data
import dataextractor # same as above
import pandas
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, regularizers
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Dropout, Activation, Input
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Conv1D, MaxPooling1D
from tensorflow.keras.optimizers import Adam, Nadam
import tensorflow.keras.backend as K
tf.keras.backend.set_floatx('float32') # call this, to set keras to use float32 to avoid a warning message
metrics = ['accuracy']
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import json
from datetime import datetime
import warnings
import matplotlib.pyplot as plt
import random
random.seed(1)
np.random.seed(4)
tf.random.set_seed(2)
# Start the notebook in the terminal with "PYTHONHASHSEED=0 jupyter notebook"
# or in anaconda "set PYTHONHASHSEED=0" then start jupyter notebook
import os
if os.environ.get("PYTHONHASHSEED") != "0":
raise Exception("You must set PYTHONHASHSEED=0 before starting the Jupyter server to get reproducible results.")
###Output
_____no_output_____
###Markdown
This is modfied original author's code for reading data:
###Code
def model_train(model, x_train, y_train, batch_size, epochs, x_valid, y_valid, x_test, y_test):
"""Train model with the given training, validation, and test set, with appropriate batch size and # epochs."""
epoch_data = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_valid, y_valid), verbose=0)
score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=0)
acc = score[1]
score = score[0]
return score, acc, epoch_data
def get_task_complexities_timeframes_br_hb(path, ident, seconds, checkIfValid=True):
"""Returns raw data along with task complexity class.
TODO: join functions. Add parameter to choose different task types and complexities"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
samp_rate = int(round(len(data[1]) / max(data[0])))
cog_res = dataread.read_cognitive_load_study(str(ident) + '-primary-extract.txt')
tasks_data = np.empty((0, seconds*samp_rate))
tasks_y = np.empty((0, 1))
breathing = np.empty((0,12))
heartbeat = np.empty((0,10))
busy_n = dataread.get_data_task_timestamps(return_indexes=True)
for i in cog_res['task_number']:
task_num_table = i - 225 # 0 - 17
tmp_tasks_data = np.empty((0, seconds*samp_rate))
tmp_tasks_y = np.empty((0, 1))
tmp_breathing = np.empty((0,12))
tmp_heartbeat = np.empty((0,10))
### task complexity classification
if cog_res['task_complexity'][task_num_table] == 'medium':
continue
if cog_res['task_label'][task_num_table] != 'NC':
continue
map_compl = {
'low': 0,
'medium': 2,
'high': 1
}
for j in range(10):
new_end = int(busy_n[task_num_table][1] - j * samp_rate)
new_start = int(new_end - samp_rate*30)
dataextract = dataextractor.DataExtractor(data[0][new_start:new_end],
data[1][new_start:new_end], samp_rate)
# get extracted features for breathing
tmpBR = dataextract.extract_from_breathing_time(data[0][new_start:new_end],
data[1][new_start:new_end])
#get extracted features for heartbeat
tmpHB = dataextract.extract_from_heartbeat_time(data[0][new_start:new_end],
data[1][new_start:new_end])
if checkIfValid and not(tmpBR['br_ok'][0]):
continue
try:
tmp_tasks_data = np.vstack((tmp_tasks_data, dataextract.y[-samp_rate * seconds:]))
tmp_tasks_y = np.vstack((tmp_tasks_y, map_compl.get(cog_res['task_complexity'][task_num_table])))
tmp_breathing = np.vstack((tmp_breathing, tmpBR.to_numpy(dtype='float64', na_value=0)[0][:-1]))
tmp_heartbeat = np.vstack((tmp_heartbeat, tmpHB.to_numpy(dtype='float64', na_value=0)[0][:-1]))
except ValueError:
# print(ident)
continue
tasks_data = np.vstack((tasks_data, dataextract.y))
tasks_y = np.vstack((tasks_y, map_compl.get(cog_res['task_complexity'][task_num_table])))
breathing = np.vstack((breathing, tmpBR.to_numpy(dtype='float64', na_value=0)[0][:-1]))
heartbeat = np.vstack((heartbeat, tmpHB.to_numpy(dtype='float64', na_value=0)[0][:-1]))
return tasks_data, tasks_y, breathing, heartbeat
def get_data_from_idents_br_hb(path, idents, seconds):
"""Go through all user data and take out windows of only <seconds> long time frames,
along with the given class (from 'divide_each_task' function).
"""
samp_rate = 43 # hard-coded sample rate
data, ys = np.empty((0, samp_rate*seconds)), np.empty((0, 1))
brs = np.empty((0,12))
hbs = np.empty((0,10))
combined = np.empty((0,22))
# was gettign some weird warnings; stack overflow said to ignore them
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
for i in idents:
#x, y, br, hb = get_busy_vs_relax_timeframes_br_hb(path, i, seconds) # either 'get_busy_vs_relax_timeframes',
# get_engagement_increase_vs_decrease_timeframes, get_task_complexities_timeframes or get_TLX_timeframes
x, y, br, hb = get_task_complexities_timeframes_br_hb(path, i, seconds)
data = np.vstack((data, x))
ys = np.vstack((ys, y))
brs = np.vstack((brs, br))
hbs = np.vstack((hbs, hb))
combined = np.hstack((brs,hbs))
return data, ys, brs, hbs, combined
# Accs is a dictionary which holds 1d arrays of accuracies in each key
# except the key 'test id' which holds strings of the id which yielded the coresponding accuracies
def print_accs_stats(accs):
printDict = {}
# loop over each key
for key in accs:
if (key == 'test id'):
# skip calculating ids
continue
printDict[key] = {}
tmpDict = printDict[key]
# calculate and print some statistics
tmpDict['min'] = np.min(accs[key])
tmpDict['max'] = np.max(accs[key])
tmpDict['mean'] = np.mean(accs[key])
tmpDict['median'] = np.median(accs[key])
print(pandas.DataFrame.from_dict(printDict).to_string())
def clear_session_and_set_seeds():
# clear session and set seeds again
K.clear_session()
random.seed(1)
np.random.seed(4)
tf.random.set_seed(2)
###Output
_____no_output_____
###Markdown
Prepare data Initialize variables:
###Code
# initialize a dictionary to store accuracies for comparison
accuracies = {}
# used for reading the data into an array
seconds = 30 # time window length
samp_rate = 43 # hard-coded sample rate
phase_shape = np.empty((0, samp_rate*seconds))
y_shape = np.empty((0, 1))
breathing_shape = np.empty((0,12))
heartbeat_shape = np.empty((0,10))
combined_shape = np.empty((0,22))
idents = ['2gu87', 'iz2ps', '1mpau', '7dwjy', '7swyk', '94mnx', 'bd47a', 'c24ur', 'ctsax', 'dkhty', 'e4gay',
'ef5rq', 'f1gjp', 'hpbxa', 'pmyfl', 'r89k1', 'tn4vl', 'td5pr', 'gyqu9', 'fzchw', 'l53hg', '3n2f9',
'62i9y']
path = '../../../../../StudyData/'
# change to len(idents) at the end to use all the data
n = len(idents)
# Holds all the data so it doesnt have to be read from file each time
data_dict = {}
###Output
_____no_output_____
###Markdown
Fill the data dictionary:
###Code
for ident in idents.copy():
# read data
phase, y, breathing, heartbeat, combined = get_data_from_idents_br_hb(path, [ident], seconds)
if (y.shape[0] <= 0):
idents.remove(ident)
print(ident)
continue
# initialize ident in
data_dict[ident] = {}
tmpDataDict = data_dict[ident]
# load data into dictionary
tmpDataDict['phase'] = phase
tmpDataDict['y'] = y
tmpDataDict['breathing'] = breathing
tmpDataDict['heartbeat'] = heartbeat
tmpDataDict['combined'] = combined
print(n)
n = len(idents)
print(n)
# load all phase data to use for training autoencoders
phase_all_train = get_data_from_idents_br_hb(path, idents[:-2], seconds)[0]
# Scale each row with MinMax to range [0,1]
phase_all_train = MinMaxScaler().fit_transform(phase_all_train.T).T
# load all validation phase data to use for training autoencoders
phase_all_valid = get_data_from_idents_br_hb(path, idents[-2:], seconds)[0]
# Scale each row with MinMax to range [0,1]
phase_all_valid = MinMaxScaler().fit_transform(phase_all_valid.T).T
###Output
_____no_output_____
###Markdown
Autoencoders Train autoencoders to save their encoded representations in the data dictionary:
###Code
# AE Training params
batch_size = 128
epochs = 1000
encoding_dim = 30
ae_encoded_shape = np.empty((0,encoding_dim))
def compare_plot_n(data1, data2, data3, plot_n=3):
#plot data1 values
plt.figure()
plt.figure(figsize=(20, 4))
for i in range(plot_n):
plt.subplot(1, 5, i+1)
plt.plot(data1[i])
#plot data2 values
plt.figure()
plt.figure(figsize=(20, 4))
for i in range(plot_n):
plt.subplot(1, 5, i+1)
plt.plot(data2[i])
#plot data3 values
plt.figure()
plt.figure(figsize=(20, 4))
for i in range(plot_n):
plt.subplot(1, 5, i+1)
plt.plot(data3[i])
###Output
_____no_output_____
###Markdown
Undercomplete Autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
###Code
def undercomplete_ae(x, encoding_dim=64, encoded_as_model=False):
# Simplest possible autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
# this is our input placeholder
input_data = Input(shape=x[0].shape, name="input")
dropout = Dropout(0.125, name="dropout", seed=42)(input_data)
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu', name="encoded")(dropout)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(x[0].shape[0], activation='sigmoid', name="decoded")(encoded)
autoencoder = Model(input_data, decoded)
# compile the model
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics)
# if return encoder in the encoded variable
if encoded_as_model:
encoded = Model(input_data, encoded)
return autoencoder, encoded
###Output
_____no_output_____
###Markdown
Train autoencoder on data:
###Code
clear_session_and_set_seeds()
uc_ae, uc_enc = undercomplete_ae(phase_all_train, encoding_dim=encoding_dim, encoded_as_model=True)
uc_ae.fit(phase_all_train, phase_all_train,
validation_data=(phase_all_valid, phase_all_valid),
batch_size=batch_size,
shuffle=True,
epochs=epochs,
verbose=0)
###Output
_____no_output_____
###Markdown
Plot signal, reconstruction and encoded representation:
###Code
data2 = uc_ae.predict(phase_all_valid)
data3 = uc_enc.predict(phase_all_valid)
compare_plot_n(phase_all_valid, data2, data3)
###Output
_____no_output_____
###Markdown
Store the encoded representations in the data dictionary:
###Code
for ident in data_dict:
tmpDataDict = data_dict[ident]
# read data
phase = tmpDataDict['phase']
uc_data = uc_enc.predict(phase)
# load data into dictionary
tmpDataDict['undercomplete_encoded'] = uc_data
###Output
_____no_output_____
###Markdown
Sparse Autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
###Code
def sparse_ae(x, encoding_dim=64, encoded_as_model=False):
# Simplest possible autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
# this is our input placeholder
input_data = Input(shape=x[0].shape, name="input")
dropout = Dropout(0.125, name="dropout", seed=42) (input_data)
# "encoded" is the encoded representation of the input
# add a sparsity constraint
encoded = Dense(encoding_dim, activation='relu', name="encoded",
activity_regularizer=regularizers.l1(10e-5))(dropout)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(x[0].shape[0], activation='sigmoid', name="decoded")(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_data, decoded, name="sparse_ae")
# compile the model
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics)
# if return encoder in the encoded variable
if encoded_as_model:
encoded = Model(input_data, encoded)
return autoencoder, encoded
###Output
_____no_output_____
###Markdown
Train autoencoder on data:
###Code
clear_session_and_set_seeds()
sp_ae, sp_enc = sparse_ae(phase_all_train, encoding_dim=encoding_dim, encoded_as_model=True)
sp_ae.fit(phase_all_train, phase_all_train,
validation_data=(phase_all_valid, phase_all_valid),
batch_size=batch_size,
shuffle=True,
epochs=epochs,
verbose=0)
###Output
_____no_output_____
###Markdown
Plot signal, reconstruction and encoded representation:
###Code
data2 = sp_ae.predict(phase_all_valid)
data3 = sp_enc.predict(phase_all_valid)
compare_plot_n(phase_all_valid, data2, data3)
###Output
_____no_output_____
###Markdown
Store the encoded representations in the data dictionary:
###Code
for ident in data_dict:
tmpDataDict = data_dict[ident]
# read data
phase = tmpDataDict['phase']
sp_data = sp_enc.predict(phase)
# load data into dictionary
tmpDataDict['sparse_encoded'] = sp_data
###Output
_____no_output_____
###Markdown
Deep Autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
###Code
def deep_ae(x, enc_layers=[512,128], encoding_dim=64, dec_layers=[128,512], encoded_as_model=False):
# From https://www.tensorflow.org/guide/keras/functional#use_the_same_graph_of_layers_to_define_multiple_models
input_data = keras.Input(shape=x[0].shape, name="normalized_signal")
model = Dropout(0.125, name="dropout", autocast=False, seed=42)(input_data)
for i in enumerate(enc_layers):
model = Dense(i[1], activation="relu", name="dense_enc_" + str(i[0]+1))(model)
encoded_output = Dense(encoding_dim, activation="relu", name="encoded_signal")(model)
encoded = encoded_output
model = layers.Dense(dec_layers[0], activation="sigmoid", name="dense_dec_1")(encoded_output)
for i in enumerate(dec_layers[1:]):
model = Dense(i[1], activation="sigmoid", name="dense_dec_" + str(i[0]+2))(model)
decoded_output = Dense(x[0].shape[0], activation="sigmoid", name="reconstructed_signal")(model)
autoencoder = Model(input_data, decoded_output, name="autoencoder")
# compile the model
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics)
# if return encoder in the encoded variable
if encoded_as_model:
encoded = Model(input_data, encoded)
return autoencoder, encoded
###Output
_____no_output_____
###Markdown
Train autoencoder on data:
###Code
clear_session_and_set_seeds()
de_ae, de_enc = deep_ae(phase_all_train, encoding_dim=encoding_dim, encoded_as_model=True)
de_ae.fit(phase_all_train, phase_all_train,
validation_data=(phase_all_valid, phase_all_valid),
batch_size=batch_size,
shuffle=True,
epochs=epochs,
verbose=0)
###Output
_____no_output_____
###Markdown
Plot signal, reconstruction and encoded representation:
###Code
data2 = de_ae.predict(phase_all_valid)
data3 = de_enc.predict(phase_all_valid)
compare_plot_n(phase_all_valid, data2, data3)
###Output
_____no_output_____
###Markdown
Store the encoded representations in the data dictionary:
###Code
for ident in data_dict:
tmpDataDict = data_dict[ident]
# read data
phase = tmpDataDict['phase']
de_data = de_enc.predict(phase)
# load data into dictionary
tmpDataDict['deep_encoded'] = de_data
###Output
_____no_output_____
###Markdown
Helper function to get data from the dictionary:
###Code
def get_ident_data_from_dict(idents, data_dict):
# Initialize data variables
y = y_shape.copy()
phase = phase_shape.copy()
breathing = breathing_shape.copy()
heartbeat = heartbeat_shape.copy()
combined = combined_shape.copy()
undercomplete_encoded = ae_encoded_shape.copy()
sparse_encoded = ae_encoded_shape.copy()
deep_encoded = ae_encoded_shape.copy()
# Stack data form each ident into the variables
for tmp_id in idents:
y = np.vstack((y, data_dict[tmp_id]['y']))
phase = np.vstack((phase, data_dict[tmp_id]['phase']))
breathing = np.vstack((breathing, data_dict[tmp_id]['breathing']))
heartbeat = np.vstack((heartbeat, data_dict[tmp_id]['heartbeat']))
combined = np.vstack((combined, data_dict[tmp_id]['combined']))
undercomplete_encoded = np.vstack((undercomplete_encoded, data_dict[tmp_id]['undercomplete_encoded']))
sparse_encoded = np.vstack((sparse_encoded, data_dict[tmp_id]['sparse_encoded']))
deep_encoded = np.vstack((deep_encoded, data_dict[tmp_id]['deep_encoded']))
return y, phase, breathing, heartbeat, combined, undercomplete_encoded, sparse_encoded, deep_encoded
###Output
_____no_output_____
###Markdown
Classifiers Helper loop function definition A function that loops over all the data and calls the classifiers with it then stores the returned accuracies.
###Code
def helper_loop(classifier_function_train, idents, n=5, num_loops_to_average_over=1, should_scale_data=True):
#returns a dictionary with accuracies
# set the variables in the dictionary
accs = {}
accs['phase'] = []
accs['breathing'] = []
accs['heartbeat'] = []
accs['combined br hb'] = []
accs['undercomplete'] = []
accs['sparse'] = []
accs['deep'] = []
accs['test id'] = []
start_time = datetime.now()
# leave out person out validation
for i in range(n):
# print current iteration and time elapsed from start
print("iteration:", i+1, "of", n, "; time elapsed:", datetime.now()-start_time)
## ----- Data preparation:
validation_idents = [idents[i]]
test_idents = [idents[i-1]]
train_idents = []
for ident in idents:
if (ident not in test_idents) and (ident not in validation_idents):
train_idents.append(ident)
# save test id to see which id yielded which accuracies
accs['test id'].append(test_idents[0])
# Load train data
train_data = get_ident_data_from_dict(train_idents, data_dict)
y_train = train_data[0]
# Load validation data
valid_data = get_ident_data_from_dict(validation_idents, data_dict)
y_valid = valid_data[0]
# Load test data
test_data = get_ident_data_from_dict(test_idents, data_dict)
y_test = test_data[0]
data_names_by_index = ['y', 'phase', 'breathing', 'heartbeat',
'combined br hb', 'undercomplete', 'sparse', 'deep']
# Loop over all data that will be used for classification and send it to the classifier
# index 0 is y so we skip it
for index in range(1, len(test_data)):
clear_session_and_set_seeds()
train_x = train_data[index]
valid_x = valid_data[index]
test_x = test_data[index]
# Scale data
if should_scale_data:
# Scale with standard scaler
sscaler = StandardScaler()
sscaler.fit(train_x)
train_x = sscaler.transform(train_x)
# Scale valid and test with train's scaler
valid_x = sscaler.transform(valid_x)
test_x = sscaler.transform(test_x)
# Initialize variables
tmp_acc = []
data_name = data_names_by_index[index]
for tmp_index in range(num_loops_to_average_over):
curr_acc = classifier_function_train(train_x, y_train, valid_x, y_valid, test_x, y_test, data_name)
tmp_acc.append(curr_acc)
# Store accuracy
curr_acc = np.mean(tmp_acc)
accs[data_name].append(curr_acc)
# Print total time required to run this
end_time = datetime.now()
elapsed_time = end_time - start_time
print("Completed!", "Time elapsed:", elapsed_time)
return accs
###Output
_____no_output_____
###Markdown
Simple dense classifier Define the classifier:
###Code
params_dense_phase = {
'dropout': 0.3,
'hidden_size': 28,
'activation': 'sigmoid',
'loss': 'binary_crossentropy',
'optimizer': Adam,
'batch_size': 128,
'learning_rate': 0.001,
'epochs': 300
}
params_dense_br_hb = {
'dropout': 0.05,
'hidden_size': 24,
'activation': 'sigmoid',
'loss': 'poisson',
'optimizer': Nadam,
'learning_rate': 0.05,
'batch_size': 128,
'epochs': 200
}
params_dense_ae_enc = {
'dropout': 0.1,
'hidden_size': 30,
'activation': 'relu',
'loss': 'binary_crossentropy',
'optimizer': Adam,
'learning_rate': 0.01,
'batch_size': 106,
'epochs': 300
}
def dense_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_dense_br_hb
if (data_name == 'phase'):
params = params_dense_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_dense_ae_enc
# Define the model
model = Sequential()
model.add(Dropout(params['dropout']))
model.add(Dense(params['hidden_size']))
model.add(Activation(params['activation']))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# Compile the model
model.compile(loss=params['loss'],
optimizer=params['optimizer'](learning_rate=params['learning_rate']),
metrics=metrics)
# Train the model and return the accuracy
sc, curr_acc, epoch_data = model_train(model, x_train, y_train, params['batch_size'], params['epochs'],
x_valid, y_valid, x_test, y_test)
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(dense_train, idents, n)
accuracies['simple_dense'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.00000 1.000000 1.000000
mean 0.454751 0.516553 0.564991 0.485751 0.45051 0.355647 0.399878
median 0.500000 0.500000 0.571429 0.500000 0.50000 0.416667 0.333333
###Markdown
LSTM-based classifier based on the original author's code
###Code
params_lstm_phase = {
'kernel_size': 4,
'filters': 32,
'strides': 2,
'pool_size': 4,
'dropout': 0.01,
'lstm_output_size': 22,
'activation': 'relu',
'last_activation': 'sigmoid',
'loss': 'poisson',
'optimizer': Nadam,
'learning_rate': 0.005,
'batch_size': 186,
'epochs': 200
}
params_lstm_br_hb = {
'kernel_size': 2,
'filters': 12,
'strides': 2,
'pool_size': 1,
'dropout': 0.01,
'lstm_output_size': 64,
'activation': 'relu',
'last_activation': 'sigmoid',
'loss': 'poisson',
'optimizer': Nadam,
'learning_rate': 0.001,
'batch_size': 256,
'epochs': 100
}
params_lstm_ae_enc = {
'kernel_size': 2,
'filters': 6,
'strides': 2,
'pool_size': 2,
'dropout': 0.01,
'lstm_output_size': 32,
'activation': 'relu',
'last_activation': 'sigmoid',
'loss': 'poisson',
'optimizer': Nadam,
'learning_rate': 0.001,
'batch_size': 64,
'epochs': 100
}
def LSTM_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_lstm_br_hb
if (data_name == 'phase'):
params = params_lstm_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_lstm_ae_enc
# Reshape data to fit some layers
xt_train = x_train.reshape(-1, x_train[0].shape[0], 1)
xt_valid = x_valid.reshape(-1, x_valid[0].shape[0], 1)
xt_test = x_test.reshape(-1, x_test[0].shape[0], 1)
# Define the model
model = Sequential()
model.add(Dropout(params['dropout']))
model.add(Conv1D(params['filters'],
params['kernel_size'],
padding='valid',
activation=params['activation'],
strides=params['strides']))
model.add(MaxPooling1D(pool_size=params['pool_size']))
if (data_name == 'phase'):
model.add(Conv1D(params['filters'],
params['kernel_size'],
padding='valid',
activation=params['activation'],
strides=params['strides']))
model.add(MaxPooling1D(pool_size=params['pool_size']))
model.add(Dropout(params['dropout']))
model.add(LSTM(params['lstm_output_size']))
model.add(Dense(1))
model.add(Activation(params['last_activation']))
# Compile the model
model.compile(loss=params['loss'],
optimizer=params['optimizer'](learning_rate=params['learning_rate']),
metrics=['acc'])
# Train the model and return the accuracy
sc, curr_acc, epoch_data = model_train(model, xt_train, y_train, params['batch_size'], params['epochs'],
xt_valid, y_valid, xt_test, y_test)
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(LSTM_train, idents, n=n)
accuracies['LSTM'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.384138 0.548134 0.514848 0.513065 0.364427 0.357955 0.392747
median 0.411765 0.500000 0.500000 0.500000 0.500000 0.500000 0.411765
###Markdown
kNN
###Code
params_knn_phase = {
'n_neighbors': 3,
'metric': 'l2'
}
params_knn_br_hb = {
'n_neighbors': 15,
'metric': 'cosine'
}
params_knn_ae_enc = {
'n_neighbors': 5,
'metric': 'manhattan'
}
from sklearn.neighbors import KNeighborsClassifier
def KNN_classifier(params):
model = KNeighborsClassifier(n_neighbors=params['n_neighbors'], metric=params['metric'])
return model
def KNN_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_knn_br_hb
if (data_name == 'phase'):
params = params_knn_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_knn_ae_enc
model = KNN_classifier(params)
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(KNN_train, idents, n)
accuracies['kNN'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.446749 0.417928 0.513782 0.478352 0.391145 0.426932 0.358101
median 0.500000 0.428571 0.500000 0.450000 0.411765 0.473684 0.350000
###Markdown
SVC
###Code
params_svc_phase = {
'C': 3,
'kernel': 'rbf',
'gamma': 'scale'
}
params_svc_br_hb = {
'C': 5,
'kernel': 'poly',
'gamma': 'scale'
}
params_svc_ae_enc = {
'C': 5,
'kernel': 'rbf',
'gamma': 'scale'
}
from sklearn.svm import SVC
def SVC_classifier(params):
model = SVC(random_state=42, C=params['C'], kernel=params['kernel'], gamma=params['gamma'])
return model
def SVC_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_svc_br_hb
if (data_name == 'phase'):
params = params_svc_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_svc_ae_enc
model = SVC_classifier(params)
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(SVC_train, idents, n)
accuracies['SVC'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.428034 0.505187 0.416067 0.486185 0.448346 0.375208 0.434258
median 0.500000 0.500000 0.400000 0.500000 0.500000 0.411765 0.450000
###Markdown
Random Forest
###Code
params_rf_phase = {
'n_estimators': 190,
'max_depth': 50,
'min_samples_split': 4,
'min_samples_leaf': 2,
'oob_score': False,
'ccp_alpha': 0.005
}
params_rf_br_hb = {
'n_estimators': 190,
'max_depth': 20,
'min_samples_split': 3,
'min_samples_leaf': 3,
'oob_score': True,
'ccp_alpha': 0.015
}
params_rf_ae_enc = {
'n_estimators': 130,
'max_depth': 100,
'min_samples_split': 5,
'min_samples_leaf': 5,
'oob_score': True,
'ccp_alpha': 0.005
}
from sklearn.ensemble import RandomForestClassifier
def random_forest_classifier(params):
model = RandomForestClassifier(random_state=42,
n_estimators = params['n_estimators'],
criterion = 'entropy',
max_depth = params['max_depth'],
min_samples_split = params['min_samples_split'],
min_samples_leaf = params['min_samples_leaf'],
oob_score = params['oob_score'],
ccp_alpha = params['ccp_alpha'],
max_features = 'log2',
bootstrap = True)
return model
def random_forest_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_rf_br_hb
if (data_name == 'phase'):
params = params_rf_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_rf_ae_enc
model = random_forest_classifier(params)
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(random_forest_train, idents, n, should_scale_data=False)
accuracies['random_forest'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.311804 0.450447 0.478354 0.424564 0.437201 0.386870 0.380356
median 0.350000 0.500000 0.500000 0.450000 0.500000 0.411765 0.400000
###Markdown
Naive Bayesian
###Code
from sklearn.naive_bayes import GaussianNB
def naive_bayesian_classifier():
model = GaussianNB()
return model
def naive_bayesian_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
model = naive_bayesian_classifier()
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(naive_bayesian_train, idents, n)
accuracies['naive_bayesian'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.00000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.442435 0.49271 0.545921 0.575899 0.362728 0.358151 0.430943
median 0.500000 0.50000 0.500000 0.500000 0.428571 0.428571 0.500000
###Markdown
XGBoost
###Code
params_xgb_phase = {
'n_estimators': 50,
'max_depth': 50,
'booster': 'gbtree'
}
params_xgb_br_hb = {
'n_estimators': 50,
'max_depth': 4,
'booster': 'gbtree'
}
params_xgb_ae_enc = {
'n_estimators': 130,
'max_depth': 4,
'booster': 'gbtree'
}
from xgboost import XGBClassifier
def XGBoost_classifier(params):
model = XGBClassifier(random_state=42,
n_estimators=params['n_estimators'],
max_depth=params['max_depth'])
return model
def XGBoost_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_xgb_br_hb
if (data_name == 'phase'):
params = params_xgb_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_xgb_ae_enc
model = XGBoost_classifier(params)
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(XGBoost_train, idents, n, should_scale_data=False)
accuracies['XGBoost'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 0.950000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.363422 0.489298 0.476912 0.524146 0.466902 0.392696 0.433894
median 0.416667 0.473684 0.500000 0.500000 0.500000 0.411765 0.450000
###Markdown
Compare Accuracies Save all accuracies to results csv file:
###Code
results_path = "../../results/LvH/LvH-NC.csv"
# Make a dataframe from the accuracies
accs_dataframe = pandas.DataFrame(accuracies).T
# Save dataframe to file
accs_dataframe.to_csv(results_path, mode='w')
###Output
_____no_output_____
###Markdown
Print min, max, mean, median for each clasifier/autoencoder combination:
###Code
for classifier in accuracies:
print("-----------", classifier + ":", "-----------")
accs = accuracies[classifier]
print_accs_stats(accs)
print("\n")
###Output
----------- simple_dense: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.00000 1.000000 1.000000
mean 0.454751 0.516553 0.564991 0.485751 0.45051 0.355647 0.399878
median 0.500000 0.500000 0.571429 0.500000 0.50000 0.416667 0.333333
----------- LSTM: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.384138 0.548134 0.514848 0.513065 0.364427 0.357955 0.392747
median 0.411765 0.500000 0.500000 0.500000 0.500000 0.500000 0.411765
----------- kNN: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.446749 0.417928 0.513782 0.478352 0.391145 0.426932 0.358101
median 0.500000 0.428571 0.500000 0.450000 0.411765 0.473684 0.350000
----------- SVC: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.428034 0.505187 0.416067 0.486185 0.448346 0.375208 0.434258
median 0.500000 0.500000 0.400000 0.500000 0.500000 0.411765 0.450000
----------- random_forest: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.311804 0.450447 0.478354 0.424564 0.437201 0.386870 0.380356
median 0.350000 0.500000 0.500000 0.450000 0.500000 0.411765 0.400000
----------- naive_bayesian: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.00000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.442435 0.49271 0.545921 0.575899 0.362728 0.358151 0.430943
median 0.500000 0.50000 0.500000 0.500000 0.428571 0.428571 0.500000
----------- XGBoost: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 0.950000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
mean 0.363422 0.489298 0.476912 0.524146 0.466902 0.392696 0.433894
median 0.416667 0.473684 0.500000 0.500000 0.500000 0.411765 0.450000
###Markdown
Print all accuracies in table form:
###Code
for classifier in accuracies:
print(classifier + ":")
# print(pandas.DataFrame.from_dict(accuracies[classifier]))
# Using .to_string() gives nicer loooking results (doesn't split into new line)
print(pandas.DataFrame.from_dict(accuracies[classifier]).to_string())
print("\n")
###Output
simple_dense:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 1.000000 1.000000 1.000000 1.000000 1.000000 0.000000 0.333333 62i9y
1 0.000000 0.000000 0.100000 0.000000 0.000000 0.000000 0.000000 2gu87
2 0.285714 1.000000 0.571429 1.000000 0.714286 0.428571 0.285714 iz2ps
3 0.500000 0.375000 0.000000 0.250000 0.000000 0.000000 0.375000 1mpau
4 0.800000 0.650000 0.450000 0.750000 0.450000 0.450000 0.200000 7dwjy
5 0.000000 0.500000 0.300000 0.600000 0.500000 0.500000 0.250000 7swyk
6 1.000000 1.000000 1.000000 0.500000 1.000000 1.000000 1.000000 94mnx
7 0.117647 0.411765 0.352941 0.529412 0.352941 0.411765 0.294118 bd47a
8 0.722222 0.555556 1.000000 0.555556 0.555556 0.611111 0.500000 c24ur
9 0.000000 0.700000 0.300000 0.100000 0.000000 0.000000 0.400000 ctsax
10 0.750000 0.150000 0.750000 1.000000 0.500000 0.800000 0.250000 dkhty
11 0.416667 0.750000 0.750000 0.500000 0.666667 0.416667 0.333333 e4gay
12 0.500000 0.100000 0.550000 0.500000 0.350000 0.700000 1.000000 ef5rq
13 0.500000 0.600000 0.600000 0.850000 0.000000 0.050000 0.450000 f1gjp
14 0.300000 0.500000 0.650000 0.500000 0.350000 0.200000 0.500000 hpbxa
15 0.500000 0.500000 0.750000 0.500000 0.500000 0.500000 0.300000 pmyfl
16 0.500000 0.500000 0.300000 0.200000 0.000000 0.000000 0.000000 r89k1
17 0.600000 0.000000 0.900000 0.000000 1.000000 0.700000 0.900000 tn4vl
18 0.411765 0.764706 0.294118 0.529412 0.411765 0.411765 0.294118 td5pr
19 0.500000 0.550000 0.650000 0.350000 0.500000 0.500000 0.450000 gyqu9
20 0.350000 0.500000 1.000000 0.500000 0.500000 0.500000 0.450000 fzchw
21 0.105263 0.473684 0.526316 0.157895 0.210526 0.000000 0.631579 l53hg
22 0.600000 0.300000 0.200000 0.300000 0.800000 0.000000 0.000000 3n2f9
LSTM:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.000000 1.000000 1.000000 1.000000 0.000000 0.000000 0.166667 62i9y
1 0.000000 0.000000 0.100000 0.000000 0.000000 0.000000 0.000000 2gu87
2 0.071429 0.500000 0.571429 1.000000 0.500000 0.571429 0.000000 iz2ps
3 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1mpau
4 0.500000 0.650000 0.850000 0.850000 0.500000 0.500000 0.250000 7dwjy
5 1.000000 0.500000 0.300000 0.500000 0.500000 0.500000 1.000000 7swyk
6 0.000000 1.000000 0.500000 1.000000 0.000000 0.000000 0.000000 94mnx
7 0.588235 0.705882 0.529412 0.823529 0.529412 0.411765 0.411765 bd47a
8 0.444444 0.611111 0.833333 0.555556 0.555556 0.555556 1.000000 c24ur
9 0.000000 1.000000 0.300000 0.000000 0.000000 0.000000 0.000000 ctsax
10 1.000000 0.550000 0.900000 0.500000 0.550000 1.000000 1.000000 dkhty
11 0.166667 0.500000 0.333333 0.333333 0.166667 0.166667 0.166667 e4gay
12 0.500000 0.500000 0.600000 0.800000 0.500000 0.500000 1.000000 ef5rq
13 0.100000 0.500000 0.450000 0.500000 0.000000 0.000000 0.000000 f1gjp
14 0.500000 0.600000 0.350000 0.700000 0.500000 0.500000 0.500000 hpbxa
15 0.400000 0.500000 1.000000 0.500000 0.500000 0.500000 0.500000 pmyfl
16 0.300000 0.800000 0.000000 0.300000 0.000000 0.000000 0.000000 r89k1
17 0.800000 0.300000 0.800000 0.000000 0.800000 0.500000 0.100000 tn4vl
18 0.411765 0.705882 0.352941 0.411765 0.411765 0.411765 0.411765 td5pr
19 0.500000 0.500000 0.900000 0.500000 0.500000 0.500000 0.500000 gyqu9
20 0.500000 0.500000 0.650000 0.000000 0.500000 0.500000 0.500000 fzchw
21 0.052632 0.684211 0.421053 0.526316 0.368421 0.315789 0.526316 l53hg
22 1.000000 0.000000 0.100000 1.000000 1.000000 0.800000 1.000000 3n2f9
kNN:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 1.000000 1.000000 1.000000 1.000000 0.500000 0.500000 0.166667 62i9y
1 0.000000 0.000000 0.100000 0.700000 0.000000 0.200000 0.000000 2gu87
2 0.428571 0.428571 0.571429 0.500000 0.785714 0.500000 0.142857 iz2ps
3 0.000000 0.000000 0.000000 0.000000 0.250000 0.500000 0.125000 1mpau
4 0.500000 0.350000 0.800000 0.650000 0.350000 0.350000 0.100000 7dwjy
5 1.000000 0.500000 0.350000 0.600000 0.500000 0.600000 0.350000 7swyk
6 0.000000 1.000000 0.500000 1.000000 0.000000 1.000000 0.500000 94mnx
7 0.764706 0.470588 0.529412 0.647059 0.352941 0.411765 0.294118 bd47a
8 1.000000 0.722222 0.666667 0.555556 0.555556 0.722222 0.888889 c24ur
9 0.000000 0.100000 0.300000 0.300000 0.000000 0.000000 0.200000 ctsax
10 0.500000 0.050000 0.850000 0.650000 0.500000 0.500000 0.350000 dkhty
11 0.583333 0.416667 0.416667 0.416667 0.666667 0.500000 0.583333 e4gay
12 1.000000 0.400000 0.400000 0.450000 1.000000 1.000000 1.000000 ef5rq
13 0.000000 0.500000 0.500000 0.500000 0.000000 0.000000 0.000000 f1gjp
14 0.350000 0.450000 0.500000 0.400000 0.350000 0.350000 0.500000 hpbxa
15 0.000000 0.500000 0.700000 0.500000 0.000000 0.000000 0.150000 pmyfl
16 0.000000 0.200000 0.000000 0.300000 0.500000 0.400000 0.100000 r89k1
17 1.000000 0.300000 0.100000 0.000000 0.400000 0.400000 0.900000 tn4vl
18 0.411765 0.705882 0.411765 0.411765 0.411765 0.411765 0.411765 td5pr
19 0.500000 0.450000 0.900000 0.450000 0.500000 0.500000 0.500000 gyqu9
20 0.500000 0.500000 0.900000 0.250000 0.500000 0.500000 0.500000 fzchw
21 0.736842 0.368421 0.421053 0.421053 0.473684 0.473684 0.473684 l53hg
22 0.000000 0.200000 0.900000 0.300000 0.400000 0.000000 0.000000 3n2f9
SVC:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.000000 1.000000 1.000000 0.333333 0.000000 0.000000 1.000000 62i9y
1 0.000000 0.000000 0.000000 0.100000 0.000000 0.000000 0.000000 2gu87
2 0.500000 0.714286 0.428571 1.000000 0.571429 0.642857 0.571429 iz2ps
3 0.000000 0.000000 0.250000 0.375000 0.000000 0.000000 0.000000 1mpau
4 0.500000 0.450000 0.950000 0.650000 0.500000 0.500000 0.050000 7dwjy
5 0.500000 0.500000 0.000000 0.550000 1.000000 1.000000 0.500000 7swyk
6 1.000000 1.000000 0.000000 1.000000 1.000000 0.000000 0.000000 94mnx
7 0.294118 0.294118 0.294118 0.588235 0.470588 0.352941 0.411765 bd47a
8 0.555556 1.000000 0.500000 0.555556 0.555556 0.555556 1.000000 c24ur
9 0.000000 0.700000 0.300000 0.800000 0.000000 0.000000 0.000000 ctsax
10 1.000000 0.550000 0.500000 0.650000 0.500000 1.000000 0.900000 dkhty
11 0.083333 0.333333 0.166667 0.166667 0.750000 0.166667 0.166667 e4gay
12 0.500000 0.500000 0.450000 0.900000 0.500000 0.300000 1.000000 ef5rq
13 0.500000 0.500000 0.250000 0.700000 0.000000 0.000000 0.450000 f1gjp
14 0.500000 0.200000 0.400000 0.500000 0.500000 0.500000 0.500000 hpbxa
15 0.500000 1.000000 0.750000 0.900000 0.500000 0.500000 0.400000 pmyfl
16 0.000000 0.600000 0.000000 0.100000 0.000000 0.000000 0.100000 r89k1
17 1.000000 1.000000 0.800000 0.000000 1.000000 0.700000 1.000000 tn4vl
18 0.411765 0.411765 0.411765 0.352941 0.411765 0.411765 0.411765 td5pr
19 0.500000 0.500000 0.750000 0.500000 0.500000 0.500000 0.500000 gyqu9
20 0.500000 0.050000 0.700000 0.250000 0.500000 0.500000 0.500000 fzchw
21 0.000000 0.315789 0.368421 0.210526 0.052632 0.000000 0.526316 l53hg
22 1.000000 0.000000 0.300000 0.000000 1.000000 1.000000 0.000000 3n2f9
random_forest:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.833333 1.000000 0.166667 0.500000 0.166667 0.500000 0.000000 62i9y
1 0.000000 0.000000 0.100000 0.000000 0.600000 0.100000 0.000000 2gu87
2 0.428571 0.714286 0.500000 0.428571 0.571429 0.428571 0.785714 iz2ps
3 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1mpau
4 0.500000 0.450000 0.750000 0.800000 0.500000 0.350000 0.050000 7dwjy
5 1.000000 0.550000 0.250000 0.450000 0.500000 0.500000 0.400000 7swyk
6 0.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 94mnx
7 0.176471 0.470588 0.411765 0.470588 0.235294 0.411765 0.470588 bd47a
8 0.555556 0.444444 0.944444 0.777778 0.555556 0.555556 0.555556 c24ur
9 0.000000 0.200000 0.000000 0.500000 0.000000 0.000000 0.000000 ctsax
10 0.500000 0.000000 0.900000 0.600000 0.750000 0.850000 0.400000 dkhty
11 0.250000 0.500000 1.000000 0.666667 0.333333 0.166667 0.416667 e4gay
12 0.500000 0.650000 0.600000 0.500000 0.500000 0.900000 0.950000 ef5rq
13 0.000000 0.500000 0.000000 0.500000 0.000000 0.000000 0.000000 f1gjp
14 0.350000 0.500000 0.550000 0.450000 0.350000 0.350000 0.500000 hpbxa
15 0.350000 0.500000 0.450000 0.500000 0.000000 0.000000 0.250000 pmyfl
16 0.000000 1.000000 0.100000 0.400000 0.800000 0.600000 0.400000 r89k1
17 0.000000 0.200000 0.800000 0.000000 0.800000 0.500000 1.000000 tn4vl
18 0.411765 0.294118 0.352941 0.352941 0.411765 0.411765 0.411765 td5pr
19 0.500000 0.150000 0.700000 0.450000 0.350000 0.300000 0.500000 gyqu9
20 0.500000 0.500000 0.600000 0.050000 0.500000 0.500000 0.500000 fzchw
21 0.315789 0.736842 0.526316 0.368421 0.631579 0.473684 0.157895 l53hg
22 0.000000 0.000000 0.300000 0.000000 0.500000 0.000000 0.000000 3n2f9
naive_bayesian:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 0.000000 62i9y
1 0.000000 0.000000 1.000000 0.900000 0.000000 0.000000 0.000000 2gu87
2 0.571429 0.428571 0.428571 0.428571 0.428571 0.428571 0.500000 iz2ps
3 1.000000 0.000000 1.000000 1.000000 0.000000 0.000000 0.000000 1mpau
4 0.500000 1.000000 0.500000 0.500000 0.500000 0.500000 0.800000 7dwjy
5 0.500000 0.500000 0.500000 0.500000 0.500000 0.500000 0.500000 7swyk
6 0.000000 0.000000 1.000000 1.000000 0.000000 0.000000 0.000000 94mnx
7 0.470588 0.529412 0.411765 0.411765 0.411765 0.411765 0.588235 bd47a
8 0.555556 0.555556 0.444444 0.444444 0.555556 0.555556 0.555556 c24ur
9 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 0.000000 ctsax
10 1.000000 0.500000 0.500000 0.500000 1.000000 1.000000 0.600000 dkhty
11 0.166667 0.083333 0.833333 0.833333 0.166667 0.166667 0.166667 e4gay
12 1.000000 0.500000 0.500000 0.500000 0.500000 0.500000 0.500000 ef5rq
13 0.500000 0.500000 0.500000 0.500000 0.500000 0.500000 0.500000 f1gjp
14 0.500000 0.650000 0.500000 0.500000 0.500000 0.500000 0.500000 hpbxa
15 1.000000 0.500000 0.500000 0.500000 0.500000 0.500000 0.500000 pmyfl
16 0.000000 0.200000 1.000000 1.000000 0.000000 0.000000 0.000000 r89k1
17 0.000000 1.000000 0.000000 1.000000 0.000000 0.000000 1.000000 tn4vl
18 0.411765 0.411765 0.411765 0.411765 0.411765 0.411765 0.411765 td5pr
19 0.500000 0.500000 0.500000 0.500000 0.500000 0.500000 0.500000 gyqu9
20 0.500000 0.000000 0.500000 0.500000 0.500000 0.500000 0.500000 fzchw
21 0.000000 0.473684 0.526316 0.315789 0.368421 0.263158 0.789474 l53hg
22 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 3n2f9
XGBoost:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.666667 1.000000 0.166667 0.333333 0.666667 0.500000 0.333333 62i9y
1 0.100000 0.200000 0.100000 0.000000 0.700000 0.200000 0.600000 2gu87
2 0.500000 0.785714 0.571429 0.500000 0.428571 0.500000 0.785714 iz2ps
3 0.250000 0.000000 0.000000 0.000000 0.375000 0.375000 0.250000 1mpau
4 0.500000 0.100000 0.600000 0.750000 0.400000 0.300000 0.150000 7dwjy
5 0.950000 0.450000 0.400000 0.600000 0.450000 0.250000 0.450000 7swyk
6 0.000000 1.000000 0.500000 1.000000 1.000000 1.000000 0.500000 94mnx
7 0.176471 0.470588 0.411765 0.411765 0.352941 0.235294 0.235294 bd47a
8 0.555556 0.444444 0.722222 1.000000 0.555556 0.555556 0.555556 c24ur
9 0.100000 0.700000 0.400000 0.300000 0.000000 0.000000 0.300000 ctsax
10 0.700000 0.100000 0.900000 1.000000 0.500000 0.750000 0.600000 dkhty
11 0.416667 1.000000 1.000000 0.833333 0.416667 0.333333 0.500000 e4gay
12 0.500000 0.550000 0.700000 0.500000 0.500000 0.550000 0.900000 ef5rq
13 0.000000 0.850000 0.100000 0.800000 0.000000 0.000000 0.000000 f1gjp
14 0.150000 0.500000 0.550000 0.400000 0.300000 0.300000 0.500000 hpbxa
15 0.550000 0.550000 0.500000 0.550000 0.500000 0.050000 0.400000 pmyfl
16 0.000000 1.000000 0.000000 0.900000 0.500000 0.600000 0.400000 r89k1
17 0.200000 0.000000 0.800000 0.000000 0.700000 0.700000 1.000000 tn4vl
18 0.411765 0.529412 0.470588 0.705882 0.411765 0.411765 0.411765 td5pr
19 0.550000 0.300000 0.750000 0.500000 0.350000 0.500000 0.500000 gyqu9
20 0.450000 0.250000 0.600000 0.550000 0.500000 0.500000 0.450000 fzchw
21 0.631579 0.473684 0.526316 0.421053 0.631579 0.421053 0.157895 l53hg
22 0.000000 0.000000 0.200000 0.000000 0.500000 0.000000 0.000000 3n2f9
|
exercises/conventional_RL/monte-carlo/Monte_Carlo_Solution.ipynb | ###Markdown
Monte Carlo MethodsIn this notebook, you will write your own implementations of many Monte Carlo (MC) algorithms. While we have provided some starter code, you are welcome to erase these hints and write your code from scratch. Part 0: Explore BlackjackEnvWe begin by importing the necessary packages.
###Code
import sys
import gym
import numpy as np
from collections import defaultdict
from plot_utils import plot_blackjack_values, plot_policy
###Output
_____no_output_____
###Markdown
Use the code cell below to create an instance of the [Blackjack](https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py) environment.
###Code
env = gym.make('Blackjack-v0')
###Output
_____no_output_____
###Markdown
Each state is a 3-tuple of:- the player's current sum $\in \{0, 1, \ldots, 31\}$,- the dealer's face up card $\in \{1, \ldots, 10\}$, and- whether or not the player has a usable ace (`no` $=0$, `yes` $=1$).The agent has two potential actions:``` STICK = 0 HIT = 1```Verify this by running the code cell below.
###Code
print(env.observation_space)
print(env.action_space)
###Output
Tuple(Discrete(32), Discrete(11), Discrete(2))
Discrete(2)
###Markdown
Execute the code cell below to play Blackjack with a random policy. (_The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to get some experience with the output that is returned as the agent interacts with the environment._)
###Code
for i_episode in range(3):
state = env.reset()
while True:
print(state)
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
print('End game! Reward: ', reward)
print('You won :)\n') if reward > 0 else print('You lost :(\n')
break
###Output
(11, 4, False)
(21, 4, False)
End game! Reward: -1.0
You lost :(
(10, 10, False)
End game! Reward: 1.0
You won :)
(17, 4, False)
End game! Reward: -1.0
You lost :(
###Markdown
Part 1: MC PredictionIn this section, you will write your own implementation of MC prediction (for estimating the action-value function). We will begin by investigating a policy where the player _almost_ always sticks if the sum of her cards exceeds 18. In particular, she selects action `STICK` with 80% probability if the sum is greater than 18; and, if the sum is 18 or below, she selects action `HIT` with 80% probability. The function `generate_episode_from_limit_stochastic` samples an episode using this policy. The function accepts as **input**:- `bj_env`: This is an instance of OpenAI Gym's Blackjack environment.It returns as **output**:- `episode`: This is a list of (state, action, reward) tuples (of tuples) and corresponds to $(S_0, A_0, R_1, \ldots, S_{T-1}, A_{T-1}, R_{T})$, where $T$ is the final time step. In particular, `episode[i]` returns $(S_i, A_i, R_{i+1})$, and `episode[i][0]`, `episode[i][1]`, and `episode[i][2]` return $S_i$, $A_i$, and $R_{i+1}$, respectively.
###Code
def generate_episode_from_limit_stochastic(bj_env):
episode = []
state = bj_env.reset()
while True:
probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]
action = np.random.choice(np.arange(2), p=probs)
next_state, reward, done, info = bj_env.step(action)
episode.append((state, action, reward))
state = next_state
if done:
break
return episode
###Output
_____no_output_____
###Markdown
Execute the code cell below to play Blackjack with the policy. (*The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to gain some familiarity with the output of the `generate_episode_from_limit_stochastic` function.*)
###Code
for i in range(3):
print(generate_episode_from_limit_stochastic(env))
###Output
[((17, 2, False), 0, 1.0)]
[((18, 9, True), 1, 0.0), ((16, 9, False), 1, -1.0)]
[((17, 1, False), 0, -1.0)]
###Markdown
Now, you are ready to write your own implementation of MC prediction. Feel free to implement either first-visit or every-visit MC prediction; in the case of the Blackjack environment, the techniques are equivalent.Your algorithm has three arguments:- `env`: This is an instance of an OpenAI Gym environment.- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.- `generate_episode`: This is a function that returns an episode of interaction.- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).The algorithm returns as output:- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
###Code
def mc_prediction_q(env, num_episodes, generate_episode, gamma=1.0):
# initialize empty dictionaries of arrays
returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))
N = defaultdict(lambda: np.zeros(env.action_space.n))
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# generate an episode
episode = generate_episode(env)
# obtain the states, actions, and rewards
states, actions, rewards = zip(*episode)
# prepare for discounting
discounts = np.array([gamma**i for i in range(len(rewards)+1)])
# update the sum of the returns, number of visits, and action-value
# function estimates for each state-action pair in the episode
for i, state in enumerate(states):
returns_sum[state][actions[i]] += sum(rewards[i:]*discounts[:-(1+i)])
N[state][actions[i]] += 1.0
Q[state][actions[i]] = returns_sum[state][actions[i]] / N[state][actions[i]]
return Q
###Output
_____no_output_____
###Markdown
Use the cell below to obtain the action-value function estimate $Q$. We have also plotted the corresponding state-value function.To check the accuracy of your implementation, compare the plot below to the corresponding plot in the solutions notebook **Monte_Carlo_Solution.ipynb**.
###Code
# obtain the action-value function
Q = mc_prediction_q(env, 500000, generate_episode_from_limit_stochastic)
# obtain the corresponding state-value function
V_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \
for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V_to_plot)
###Output
Episode 500000/500000.
###Markdown
Part 2: MC ControlIn this section, you will write your own implementation of constant-$\alpha$ MC control. Your algorithm has four arguments:- `env`: This is an instance of an OpenAI Gym environment.- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.- `alpha`: This is the step-size parameter for the update step.- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).The algorithm returns as output:- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.- `policy`: This is a dictionary where `policy[s]` returns the action that the agent chooses after observing state `s`.(_Feel free to define additional functions to help you to organize your code._)
###Code
def generate_episode_from_Q(env, Q, epsilon, nA):
""" generates an episode from following the epsilon-greedy policy """
episode = []
state = env.reset()
while True:
action = np.random.choice(np.arange(nA), p=get_probs(Q[state], epsilon, nA)) \
if state in Q else env.action_space.sample()
next_state, reward, done, info = env.step(action)
episode.append((state, action, reward))
state = next_state
if done:
break
return episode
def get_probs(Q_s, epsilon, nA):
""" obtains the action probabilities corresponding to epsilon-greedy policy """
policy_s = np.ones(nA) * epsilon / nA
best_a = np.argmax(Q_s)
policy_s[best_a] = 1 - epsilon + (epsilon / nA)
return policy_s
def update_Q(env, episode, Q, alpha, gamma):
""" updates the action-value function estimate using the most recent episode """
states, actions, rewards = zip(*episode)
# prepare for discounting
discounts = np.array([gamma**i for i in range(len(rewards)+1)])
for i, state in enumerate(states):
old_Q = Q[state][actions[i]]
Q[state][actions[i]] = old_Q + alpha*(sum(rewards[i:]*discounts[:-(1+i)]) - old_Q)
return Q
def mc_control(env, num_episodes, alpha, gamma=1.0, eps_start=1.0, eps_decay=.99999, eps_min=0.05):
nA = env.action_space.n
# initialize empty dictionary of arrays
Q = defaultdict(lambda: np.zeros(nA))
epsilon = eps_start
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# set the value of epsilon
epsilon = max(epsilon*eps_decay, eps_min)
# generate an episode by following epsilon-greedy policy
episode = generate_episode_from_Q(env, Q, epsilon, nA)
# update the action-value function estimate using the episode
Q = update_Q(env, episode, Q, alpha, gamma)
# determine the policy corresponding to the final action-value function estimate
policy = dict((k,np.argmax(v)) for k, v in Q.items())
return policy, Q
###Output
_____no_output_____
###Markdown
Use the cell below to obtain the estimated optimal policy and action-value function. Note that you should fill in your own values for the `num_episodes` and `alpha` parameters.
###Code
# obtain the estimated optimal policy and action-value function
policy, Q = mc_control(env, 500000, 0.02)
###Output
Episode 500000/500000.
###Markdown
Next, we plot the corresponding state-value function.
###Code
# obtain the corresponding state-value function
V = dict((k,np.max(v)) for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V)
###Output
_____no_output_____
###Markdown
Finally, we visualize the policy that is estimated to be optimal.
###Code
# plot the policy
plot_policy(policy)
###Output
_____no_output_____ |
beer_volume_volume.ipynb | ###Markdown
Импортируем преобразователь текста в вектор.[Примерное объяснение как работает](http://zabaykin.ru/?p=463)[Документация](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html)
###Code
from sklearn.feature_extraction.text import CountVectorizer
###Output
_____no_output_____
###Markdown
Импортируем преобразователь меток в числа. Он присваивает каждому новой встреченной метке уникальный номер.Допустим у нас есть такой набор меток:```pythonlabels = ['заяц', 'волк', 'утка', 'белка', 'заяц', 'утка']```LabelEncoder строит примерно такое соответсвие:| Метка | Номер ||:-----:|:-----:|| волк | 0 || белка | 1 || заяц | 2 || утка | 3 |Так что если к нам поступает такой набор меток,```python['утка', 'утка', 'заяц', 'утка', 'белка', 'волк', 'заяц', 'волк', 'волк', 'белка']```то мы можем преобразовать их в список чисел```python[3, 3, 2, 3, 1, 0, 2, 0, 0, 1]```Это нужно потому что многие алгоритмы не умеют работать со строками, к тому же числа занимаю меньше места[Документация](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html)
###Code
from sklearn.preprocessing import LabelEncoder
###Output
_____no_output_____
###Markdown
Импортируем функцию для разбиения датасета на обучающий и тестирующий набор данных случайным образом. Зачем это нужно: когда модель обучается, то она может начать запоминать сочетания "вопрос"-"ответ". Вместо того, чтобы пытаться разобраться в во входных данных. Поэтому модель нужно проверять на данных, которых она до этого не видела. [Немного подробнее](http://robotosha.ru/algorithm/training-set-and-test-data.html)[Документация](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
###Code
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Импортируем классификатор данных из библиотеки CatBoost от Яндекса
###Code
from catboost import CatBoostClassifier
###Output
_____no_output_____
###Markdown
Выдираем сырые строки из колонки **SKU_NAME**. В таблице строки хранятся в виде списков байт, метод `.flatten()` преобразует их в простые строки
###Code
data = [item for item in beer_dataset[['SKU_NAME']].values.flatten()]
###Output
_____no_output_____
###Markdown
Выдираем метки из колонки **Объём**. В начальном наборе слишком много меток, поэтому делаем кластер меток, в которые входит 95% самых популярных
###Code
from sklearn.cluster import KMeans
from collections import Counter
from itertools import accumulate
raw_labels = beer_dataset['Объем'].values.flatten()
p = list(accumulate(count/len(raw_labels) for _, count in Counter(raw_labels).items()))
top_labels = np.sort([item for i, item in enumerate(Counter(raw_labels))
if p[i] <= 0.95])
cluster_centers = top_labels.reshape(-1, 1)
kmeans = KMeans(n_clusters=top_labels.shape[0], n_init=1, init=cluster_centers)
kmeans.fit(raw_labels.reshape(-1, 1))
labels = [kmeans.cluster_centers_[c][0] for c in kmeans.predict(raw_labels.reshape(-1, 1))]
pd.DataFrame({'volume':raw_labels, 'class':labels}, columns=['volume', 'class']).head(10)
###Output
_____no_output_____
###Markdown
Разбиваем датасет случайным образом на обучающие и тестовые данные с соотношением 2 к 1. `random_state` – это начальное состояние генератора случайных чисел для разбиения, ставим число 42 чтобы каждый раз разбиение было одним и тем же.
###Code
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=0.33, random_state=42)
###Output
_____no_output_____
###Markdown
Печатаем статистику полученных датасетов: суммарный размер (Total), размер обучающей выборки (train) и тестовой (test)
###Code
print(f"Total: {len(data)} samples\n"
f"\ttrain: {len(train_data)} data, {len(train_labels)} labels\n"
f"\ttest: {len(test_data)} data, {len(test_labels)} labels")
###Output
Total: 5824 samples
train: 3902 data, 3902 labels
test: 1922 data, 1922 labels
###Markdown
Воспомогательная функция. CountVectorizer возвращает сжатые вектора, а нам нужны обычные. Эта функция берёт список сжатых веткоров и преобразует их в массивы чисел.
###Code
def dense_vectors(vectors):
return [np.asarray(item.todense())[0] for item in vectors]
###Output
_____no_output_____
###Markdown
Функция обучения модели. Наша модель состоит из трёх частей: - `CountVectorizer` для преобразования входных данных в векторное представление - `LabelEncoder` для преобразования меток в числа - `CatBoostClassifier` – собственно классификатор данныхПо-хорошему надо сохранять все три части в один или несколько файлов, пока для тестирования сохраняем только последнее. Входные данные в виде списка строк переводятся в нижний регистр, разбиваются на токены с помощью регулярного выражения `(?u)\b\w\w+\b|[0-9\.,]+[\%\w]|\w+`. Посмотреть как работает это выражение можно посмотреть [здесь](https://regex101.com/r/Puyk9J/1). Оно разбивает строки на список подстрок примерно так|Строка| Токены ||:------------------------------|:-----------------------------------:|| Пиво БагБир 0.5л ст/бут | ['Пиво', 'БагБир', '0.5л', 'ст', 'бут'] || Пиво БАГ-БИР св.ст/б 0.5л | ['Пиво', 'БАГ', 'БИР', 'св', '.с', 'т', 'б', '0.5л'] || Пиво BAGBIER светлое 4,9% 1.5л | ['Пиво', 'BAGBIER', 'светлое', '4,9%', '1.5л'] || Пиво БАГ-БИР св.ПЭТ 2.5л | ['Пиво', 'БАГ', 'БИР', 'св', '.П', 'ЭТ', '2.5л'] || Пиво БАГ БИР ГОЛЬДЕН светлое ПЭТ 4% 1,5л | ['Пиво', 'БАГ', 'БИР', 'ГОЛЬДЕН', 'светлое', 'ПЭТ', '4%', '1,5л'] || Пиво БАГ-БИР ГОЛЬДЕН св.4% ПЭТ 2.5л | ['Пиво', 'БАГ', 'БИР', 'ГОЛЬДЕН', 'св', '.4%', 'ПЭТ', '2.5л'] || Пиво БАГ БИР БОК темное 4% 1.5л | ['Пиво', 'БАГ', 'БИР', 'БОК', 'темное', '4%', '1.5л'] || Нап.пивн.BAGBIER 4,6% ст/б 0.5л | ['Нап', '.п', 'ивн', '.B', 'AGBIER', '4,6%', 'ст', 'б', '0.5л'] || Нап.пивн.БАГ БИР 4,2% ст/б 0.5л | ['Нап', '.п', 'ивн', '.Б', 'АГ', 'БИР', '4,2%', 'ст', 'б', '0.5л'] || Пиво BRAHMA 4.6% св.ст/б 0.33л | ['Пиво', 'BRAHMA', '4.6%', 'св', '.с', 'т', 'б', '0.33л'] |Потом мы переводим данные в сжатые вектора, а из них получаем простые вектора с помощью фунции `dense_vectors`. Затем создаём LabelEncoder() и заставляем его запомнить наши метки.Теперь можно заняться самым главным: обучить классификатор. Мы создаём CatBoostClassifier с настраиваемым параметром iterations (число итераций) и обучаем его через вызов метода `fit` ([документация](https://catboost.ai/docs/concepts/python-reference_catboostclassifier_fit.html)). Этот метод принимает обучающие данные в виде списка векторов (`vectorized_data`) и список закодированных меток.После того, как обучились, возвращаем кортеж вида `(CountVectorizer, LabelEncoder, CatBoostClassifier)`
###Code
import re
import random
import time
random.seed(time.time())
analyzer = 'word'
token_pattern = r"(?u)\b\w\w+\b|[0-9\.,]+[\%\w]|\w+"
def tokenize(items, token_pattern):
pat = re.compile(token_pattern)
tokens = []
for item in items:
tokens.append([match.group() for match in re.finditer(pat, item)])
data = zip(items, tokens)
return pd.DataFrame(data, columns=['Строка', 'Токены'])
tokenize(random.sample(data, 10), r"(?m)[a-zA-Zа-яА-Я]+")
def build_model(data, labels, iterations=200):
vectorizer = CountVectorizer(lowercase=True)
compressed_data = vectorizer.fit_transform(data)
vectorized_data = dense_vectors(compressed_data)
le = LabelEncoder()
encoded_labels = le.fit_transform(labels)
classifier = CatBoostClassifier(iterations=iterations, task_type = "GPU")
classifier.fit(vectorized_data, encoded_labels, silent=False)
return (vectorizer, le, classifier)
###Output
_____no_output_____
###Markdown
Обучаем нашу модель
###Code
model = build_model(train_data, train_labels, iterations=NUM_OF_ITERATIONS)
###Output
0: learn: -5.9044050 total: 844ms remaining: 41.4s
1: learn: -5.7587951 total: 1.8s remaining: 43.3s
2: learn: -5.6491558 total: 2.68s remaining: 42s
3: learn: -5.5603306 total: 3.55s remaining: 40.9s
4: learn: -5.4850888 total: 4.4s remaining: 39.6s
5: learn: -5.4207348 total: 5.24s remaining: 38.4s
6: learn: -5.3619938 total: 6.1s remaining: 37.5s
7: learn: -5.3102355 total: 6.96s remaining: 36.6s
8: learn: -5.2630427 total: 7.8s remaining: 35.6s
9: learn: -5.2185368 total: 8.64s remaining: 34.6s
10: learn: -5.1758879 total: 9.49s remaining: 33.6s
11: learn: -5.1347391 total: 10.3s remaining: 32.7s
12: learn: -5.0962162 total: 11.2s remaining: 31.8s
13: learn: -5.0641718 total: 12.1s remaining: 31.1s
14: learn: -5.0316920 total: 12.9s remaining: 30.1s
15: learn: -5.0002873 total: 13.8s remaining: 29.2s
16: learn: -4.9741068 total: 14.6s remaining: 28.4s
17: learn: -4.9451884 total: 15.4s remaining: 27.5s
18: learn: -4.9158104 total: 16.3s remaining: 26.6s
19: learn: -4.8894863 total: 17.2s remaining: 25.7s
20: learn: -4.8628453 total: 18.1s remaining: 25s
21: learn: -4.8378366 total: 18.9s remaining: 24.1s
22: learn: -4.8117056 total: 19.8s remaining: 23.3s
23: learn: -4.7893568 total: 20.7s remaining: 22.4s
24: learn: -4.7681858 total: 21.5s remaining: 21.5s
25: learn: -4.7500210 total: 22.4s remaining: 20.6s
26: learn: -4.7260534 total: 23.2s remaining: 19.8s
27: learn: -4.7055707 total: 24.1s remaining: 18.9s
28: learn: -4.6911710 total: 24.9s remaining: 18s
29: learn: -4.6698788 total: 25.8s remaining: 17.2s
30: learn: -4.6536487 total: 26.7s remaining: 16.3s
31: learn: -4.6368579 total: 27.5s remaining: 15.5s
32: learn: -4.6202884 total: 28.3s remaining: 14.6s
33: learn: -4.5983125 total: 29.2s remaining: 13.7s
34: learn: -4.5841761 total: 30.1s remaining: 12.9s
35: learn: -4.5699606 total: 30.9s remaining: 12s
36: learn: -4.5551570 total: 31.8s remaining: 11.2s
37: learn: -4.5396006 total: 32.6s remaining: 10.3s
38: learn: -4.5254667 total: 33.6s remaining: 9.47s
39: learn: -4.5133225 total: 34.5s remaining: 8.62s
40: learn: -4.5002363 total: 35.4s remaining: 7.78s
41: learn: -4.4880310 total: 36.4s remaining: 6.92s
42: learn: -4.4751725 total: 37.2s remaining: 6.06s
43: learn: -4.4611002 total: 38.1s remaining: 5.2s
44: learn: -4.4485340 total: 39s remaining: 4.33s
45: learn: -4.4367972 total: 39.8s remaining: 3.46s
46: learn: -4.4198953 total: 40.7s remaining: 2.6s
47: learn: -4.4071555 total: 41.6s remaining: 1.73s
48: learn: -4.3950468 total: 42.5s remaining: 866ms
49: learn: -4.3829837 total: 43.3s remaining: 0us
###Markdown
Генератор отчёта по нашей модели. Скармливаем ей модель, тестовые данные и метки.Метод `.predict` ([документация](https://catboost.ai/docs/concepts/python-reference_catboostclassifier_predict.html)) классификатора `CountVectorizer` принимает список векторов (входные данные) и возвращает для них самые вероятные ответы в виде чисел float, которые нужно преобразовать к целым числам.Метод `.predict_proba` ([документация](https://catboost.ai/docs/concepts/python-reference_catboostclassifier_predict_proba.html)) классификатора `CountVectorizer` принимает список векторов (входные данные) и возвращает для них вероятности полученных выше ответов. Полученные вероятности умножем на 100 и преобразумем в строки вида `95%, 80%, 91%`. После этого берём полученные из метода `.predict` ответы и раскодируем их с помощью метода `.inverse_transform` кодировщика `LabelEncoder`, который преобразует список чисел в список строк.Из полученных выше ответов, входных данных и правильных ответов из тестового датасета делаем табличку (`table`) и преобразуем её в удобный для нас вид `pandas.DataFrame`.
###Code
def validate_model(model, valid_data, valid_labels):
vectorizer, le, classifier = model
columns = ["Запись", "Предсказание", "Уверенность" ,"Правильный результат"]
compressed_data = vectorizer.transform(valid_data)
vectorized_data = dense_vectors(compressed_data)
prediction = classifier.predict(vectorized_data).flatten().astype('int64')
proba = np.rint(100*classifier.predict_proba(vectorized_data).max(axis=1))
proba_column = (f"{int(item)}%" for item in proba)
results = le.inverse_transform(prediction).flatten()
table = zip(valid_data, results, proba_column, valid_labels)
return pd.DataFrame(table, columns=columns)
###Output
_____no_output_____
###Markdown
Запускаем валидацию нашей модели и выводим сравнительную табличку результатов
###Code
validation = validate_model(model, test_data, test_labels)
validation.head(20)
###Output
_____no_output_____
###Markdown
Хорошо бы посчитать процент правильных ответов. Фильтруем все строки, в которых значение в столбце **Предсказание** совпадает со значением в столбце **Правильный результат** и считаем их количество.Делим число правильных ответов на размер тестового набора данных и выводим.
###Code
valid = len(validation[validation['Предсказание'] == validation['Правильный результат']])
total = len(validation)
print(f"Valid: {valid} from {total} ({100*valid/total}%)")
validation[validation['Предсказание'] != validation['Правильный результат']].head(20)
###Output
Valid: 563 from 1922 (29.292403746097815%)
###Markdown
Не забываем сохранить модель в файл!
###Code
vectorizer, le, classifier = model
model_name = f"beer_volume_catboost_{NUM_OF_ITERATIONS}"
classifier.save_model(f"{model_name}.cbm")
from joblib import dump, load
dump(le, f"{model_name}_le.job")
dump(vectorizer, f"{model_name}_vect.job")
###Output
_____no_output_____ |
codes/final-modelling.ipynb | ###Markdown
Problem Statement: Can we Predict the likelihood of offence being prosecuted? Given the information related to crime such as type, time of year, location etc ?
###Code
import pandas as pd
import numpy as np
from numpy import linspace
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow_addons.metrics import F1Score
import matplotlib.pyplot as plt # visualization
from termcolor import colored as cl # text customization
from sklearn.preprocessing import StandardScaler # data normalization
from sklearn.model_selection import train_test_split # data split
from sklearn.tree import DecisionTreeClassifier # Decision tree algorithm
from sklearn.metrics import classification_report, roc_auc_score, roc_curve, auc, confusion_matrix, accuracy_score, f1_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.neighbors import KNeighborsClassifier # KNN algorithm
from sklearn.linear_model import LogisticRegression # Logistic regression algorithm
from sklearn.svm import SVC # SVM algorithm
from sklearn.naive_bayes import CategoricalNB
from sklearn.ensemble import RandomForestClassifier # Random forest tree algorithm
from xgboost import XGBClassifier # XGBoost algorithm
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
#from imblearn.combine import SMOTETomek
from collections import Counter
###Output
c:\Users\saqui\anaconda3\envs\ml\lib\site-packages\tensorflow_addons\utils\ensure_tf_install.py:53: UserWarning: Tensorflow Addons supports using Python ops for all Tensorflow versions above or equal to 2.6.0 and strictly below 2.9.0 (nightly versions are not supported).
The versions of TensorFlow you are currently using is 2.5.0 and is not supported.
Some things might work, some things might not.
If you were to encounter a bug, do not file an issue.
If you want to make sure you're using a tested and supported configuration, either change the TensorFlow version or the TensorFlow Addons's version.
You can find the compatibility matrix in TensorFlow Addon's readme:
https://github.com/tensorflow/addons
warnings.warn(
###Markdown
Clean Data Ingestion
###Code
df_train = pd.read_csv("D:/ADSP/Hertfordshire-Constabulary/data/df_train_final.csv")
df_train
df_test = pd.read_csv("D:/ADSP/Hertfordshire-Constabulary/data/df_test_final.csv")
df_test
def x_var(df):
df = df.iloc[:,:-1]
return df
def y_var(df):
df = df["outcome_type"]
return df
x_train = x_var(df_train)
x_test = x_var(df_test)
y_train = y_var(df_train)
y_test = y_var(df_test)
y_train.value_counts()
y_test.value_counts()
###Output
_____no_output_____
###Markdown
Modelling Using Grid Search CV to identify best model and parameters
###Code
model_params = {
'random_forest': {
'model': RandomForestClassifier(),
'params' : {
'n_estimators': [1,5,10,100],
'min_samples_leaf': [10,50,100]
}
},
'KNN': {
'model': KNeighborsClassifier(),
'params': {
'n_neighbors': list(range(1,10)),
'p': [1,2],
}
},
'decision_tree': {
'model': DecisionTreeClassifier(),
'params': {
'criterion': ['gini','entropy'],
'max_depth':[3,5,10]
}
},
"XGBClassifier": {
"model": XGBClassifier(use_label_encoder=False, booster='gbtree',
eval_metric = "logloss"),
"params": {'n_estimators': range(6, 10),
'max_depth': range(3, 8),
'learning_rate': [.01, .2, .3, .4, .5],
'colsample_bytree': [.7, .8, .9, 1]}
}
}
scores = []
for model_name, mp in model_params.items():
clf = GridSearchCV(mp['model'], mp['params'], cv = 5,
scoring = "f1", return_train_score=False)
clf.fit(x_train, y_train)
scores.append({
'model': model_name,
'best_score': clf.best_score_,
'best_params': clf.best_params_
})
df_metrics = pd.DataFrame(scores, columns=['model','best_score','best_params'])
df_metrics
###Output
_____no_output_____
###Markdown
KNN
###Code
clf_knn = KNeighborsClassifier(n_neighbors=1, p=1)
clf_knn.fit(x_train, y_train)
y_pred = clf_knn.predict(x_test)
cf_matrix = confusion_matrix(y_test, y_pred)
print(cf_matrix)
print(f"The F1 Score is: {f1_score(y_test, y_pred)}")
print(f"The AUC Score is: {roc_auc_score(y_test, y_pred)}")
ax = sns.heatmap(cf_matrix/np.sum(cf_matrix), annot=True,
fmt='.2%', cmap='Blues')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
## Ticket labels - List must be in alphabetical order
ax.xaxis.set_ticklabels(['False','True'])
ax.yaxis.set_ticklabels(['False','True'])
## Display the visualization of the Confusion Matrix.
plt.show()
fig = plt.figure(figsize=(10, 7))
fpr, tpr, threshold = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
###Output
_____no_output_____
###Markdown
Random Forest
###Code
clf_rf = RandomForestClassifier(min_samples_leaf=10, n_estimators=1)
clf_rf.fit(x_train, y_train)
y_pred = clf_rf.predict(x_test)
cf_matrix = confusion_matrix(y_test, y_pred)
print(cf_matrix)
print(f"The F1 Score is: {f1_score(y_test, y_pred)}")
print(f"The AUC Score is: {roc_auc_score(y_test, y_pred)}")
# Using Grid Search CV to find optimum weights
class_weight = np.linspace(0.05, 1.5, 20) # creating evenly spaced numbers
grid_para = {'class_weight' : [{0: x, 1: 1.0-x} for x in class_weight]}
gridsearch = GridSearchCV(estimator = RandomForestClassifier(n_estimators=1, min_samples_leaf=10),
param_grid = grid_para,
scoring = 'f1',
cv = 5)
gridsearch.fit(x_train, y_train)
print(gridsearch.best_params_)
clf_rf = RandomForestClassifier(min_samples_leaf=10, n_estimators=1,
class_weight={0: 0.20263157894736844, 1: 0.7973684210526315})
clf_rf.fit(x_train, y_train)
y_pred = clf_rf.predict(x_test)
cf_matrix = confusion_matrix(y_test, y_pred)
print(cf_matrix)
print(f"The F1 Score is: {f1_score(y_test, y_pred)}")
print(f"The AUC Score is: {roc_auc_score(y_test, y_pred)}")
ax = sns.heatmap(cf_matrix/np.sum(cf_matrix), annot=True,
fmt='.2%', cmap='Blues')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
## Ticket labels - List must be in alphabetical order
ax.xaxis.set_ticklabels(['False','True'])
ax.yaxis.set_ticklabels(['False','True'])
## Display the visualization of the Confusion Matrix.
plt.show()
fig = plt.figure(figsize=(10, 7))
fpr, tpr, threshold = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
###Output
_____no_output_____
###Markdown
Voting Ensemble Classifier
###Code
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import KFold, cross_val_score
kfold = KFold(n_splits=10)
# create the sub models
estimator = []
model1 = RandomForestClassifier(min_samples_leaf=10, n_estimators=1, class_weight={0: 0.20263157894736844, 1: 0.7973684210526315})
estimator.append(('RandomForest', model1))
model2 = KNeighborsClassifier(n_neighbors=1, p=1)
estimator.append(('KNN', model2))
model3 = XGBClassifier(use_label_encoder=False, booster='gbtree',
eval_metric = "logloss")
estimator.append(('XGBoost', model3))
# create the ensemble model
ensemble = VotingClassifier(estimators = estimator)
results = cross_val_score(ensemble, x_train, y_train, cv=kfold, scoring="f1")
print(results.mean())
ensemble_hard = VotingClassifier(estimators = estimator, voting = "hard")
ensemble_hard.fit(x_train, y_train)
y_pred = ensemble_hard.predict(x_test)
print(f"The F1 Score for Hard voting is: {f1_score(y_test, y_pred)}")
print(f"The AUC Score for soft voting is: {roc_auc_score(y_test, y_pred)}")
ensemble_soft = VotingClassifier(estimators = estimator, voting = "soft")
ensemble_soft.fit(x_train, y_train)
y_pred = ensemble_soft.predict(x_test)
print(f"The F1 Score for Hard voting is: {f1_score(y_test, y_pred)}")
print(f"The AUC Score for soft voting is: {roc_auc_score(y_test, y_pred)}")
###Output
The F1 Score for Hard voting is: 0.3390583012034116
The AUC Score for soft voting is: 0.5929699508716794
###Markdown
Artifical Neural Networks
###Code
# x_train, x_test, y_train, y_test = train_test_split(x_input, y, test_size=0.20, random_state=4, stratify=y)
# # using stratify to ensure no class disparity
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.20, random_state=4, stratify=y_train)
# using stratify to ensure no class disparity
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
classes = np.unique(y_train),
y = y_train)
class_weights
class_weight_dict = dict(enumerate(class_weights))
class_weight_dict
## https://datascience.stackexchange.com/questions/48246/how-to-compute-f1-in-tensorflow
from keras import backend as K
def f1_metric(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
recall = true_positives / (possible_positives + K.epsilon())
f1_val = 2*(precision*recall)/(precision+recall+K.epsilon())
return f1_val
metric = ['accuracy',
f1_metric,
#tensorflow_addons.metrics.F1Score(name = "F1", num_classes = 2),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc')]
def my_model():
model = keras.Sequential([
keras.layers.Dense(48, input_dim=24, activation='selu',
kernel_initializer='he_uniform'), # initialising weights
keras.layers.Dense(96, activation="selu"),
keras.layers.Dense(96, activation='selu'),
keras.layers.Dense(96, activation='selu'),
keras.layers.Dense(48, activation='selu'),
keras.layers.Dense(48, activation='selu'),
keras.layers.Dense(32, activation='selu'),
keras.layers.Dense(32, activation='selu'),
keras.layers.Dense(24, activation='selu'),
keras.layers.Dense(24, activation='selu'),
keras.layers.Dense(12, activation='selu'),
keras.layers.Dense(6, activation='selu'),
keras.layers.Dense(4, activation='selu'),
keras.layers.Dense(2, activation='selu'),
keras.layers.Dense(1, activation='sigmoid')
])
# The optimiser is Adam with a learning rate of 0.001:
optim = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1 = 0.9, beta_2 = 0.999,
epsilon = 10e-8, decay = 0.1, amsgrad = True)
# The model optimises cross entropy as its loss function and will monitor classification accuracy:
model.compile(optimizer=optim,
loss=tf.keras.losses.BinaryCrossentropy(from_logits=False),
metrics=metric)
# Printing model summary:
print(model.summary())
return model
print('Done!')
model = my_model()
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import datetime, os
es = EarlyStopping(monitor = "val_f1_metric", mode = "max", min_delta= 0.0001, patience = 5, verbose=1)
mc = ModelCheckpoint(filepath="D:/ADSP/Hertfordshire-Constabulary/model/checkpoint",
monitor="val_f1_metric", verbose=1, save_best_only= True, mode="max")
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
history = model.fit(x_train, y_train, epochs= 200, validation_data = (x_val, y_val),
callbacks = [es,mc, tensorboard_callback], class_weight = class_weight_dict)
fig = plt.figure(figsize=(16, 5))
# subplot #1
plt.subplot(121)
import seaborn as sns
sns.set_style("whitegrid")
plt.plot(history.history["accuracy"], 'r', label = "training accuracy")
plt.plot(history.history["val_accuracy"], label = "validation accuracy")
plt.plot(history.history["loss"], 'r', label = "training loss")
plt.plot(history.history["val_loss"], label = "validation loss")
plt.xlabel("epochs")
plt.ylabel("accuracy and loss")
plt.title("Accuracy & loss \n", fontsize = 14)
plt.legend()
# subplot #2
plt.subplot(122)
import seaborn as sns
sns.set_style("whitegrid")
plt.plot(history.history["f1_metric"], 'r', label = "training f1_metric")
plt.plot(history.history["val_f1_metric"], label = "validation f1_metric")
plt.plot(history.history["auc"], 'r', label = "training AUC")
plt.plot(history.history["val_auc"], label = "validation AUC")
plt.xlabel("epochs")
plt.ylabel("AUC and F1")
plt.title("F1 Score & AUC\n", fontsize = 14)
plt.legend()
plt.show()
acc = model.evaluate(x_test, y_test, verbose = 1, )
print(f"The accuracy for Test Data is: {acc[1] * 100} %")
model.save("D:/ADSP/Hertfordshire-Constabulary/model/ann-best-model")
y_pred_prob = model.predict(x_test)
y_pred_prob
y_pred = np.round(y_pred_prob)
y_pred
print("Classification Report: \n", classification_report(y_test, y_pred))
cf_matrix = confusion_matrix(y_test, y_pred)
print(cf_matrix)
ax = sns.heatmap(cf_matrix/np.sum(cf_matrix), annot=True,
fmt='.2%', cmap='Blues')
ax.set_title('Seaborn Confusion Matrix with labels\n\n');
ax.set_xlabel('\nPredicted Values')
ax.set_ylabel('Actual Values ');
## Ticket labels - List must be in alphabetical order
ax.xaxis.set_ticklabels(['False','True'])
ax.yaxis.set_ticklabels(['False','True'])
## Display the visualization of the Confusion Matrix.
plt.show()
fig = plt.figure(figsize=(10, 7))
fpr, tpr, threshold = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
df_pred = pd.DataFrame(y_pred, columns=["class_predicted"])
df_prob = pd.DataFrame(y_pred_prob, columns=["class_prob"])
df_test = pd.concat([df_test, df_pred, df_prob], axis=1)
df_test.head()
###Output
_____no_output_____
###Markdown
Likelihood of the Crime Outcome
###Code
df_test # class_prob represents likelihood of each outcome based on inputs
###Output
_____no_output_____ |
day-1/python-datatypes.ipynb | ###Markdown
Jupyter We'll be using Jupyter for all of our examples -- this allows us to run python in a web-based notebook, keeping a history of input and output, along with text and images.For Jupyter help, visit:https://jupyter.readthedocs.io/en/latest/content-quickstart.html We interact with python by typing into _cells_ in the notebook. By default, a cell is a _code_ cell, which means that you can enter any valid python code into it and run it. Another important type of cell is a _markdown_ cell. This lets you put text, with different formatting (italics, bold, etc) that describes what the notebook is doing.You can change the cell type via the menu at the top, or using the shortcuts: * ctrl-m m : mark down cell * ctrl-m y : code cell Some useful short-cuts: * shift+enter = run cell and jump to the next (creating a new cell if there is no other new one) * ctrl+enter = run cell-in place * alt+enter = run cell and insert a new one belowctrl+m h lists other commands A "markdown cell" enables you to typeset LaTeX equations right in your notebook. Just put them in $ or $$:$$\frac{\partial \rho}{\partial t} + \nabla \cdot (\rho U) = 0$$ **Important**: when you work through a notebook, everything you did in previous cells is still in memory and _known_ by python, so you can refer to functions and variables that were previously defined. Even if you go up to the top of a notebook and insert a cell, all the information done earlier in your notebook session is still defined -- it doesn't matter where physically you are in the notebook. If you want to reset things, you can use the options under the _Kernel_ menu. Quick Exercise:Create a new cell below this one. Make sure that it is a _code_ cell, and enter the following code and run it:```print("Hello, World")``` `print()` is a _function_ in python that takes arguments (in the `()`) and outputs to the screen. You can print multiple quantities at once like:
###Code
print(1, 2, 3)
###Output
_____no_output_____
###Markdown
Basic DatatypesNow we'll look at some of the basic datatypes in python -- these are analogous to what you will find in most programming languages, including numbers (integers and floating point), and strings.Some examples come from the python tutorial:http://docs.python.org/3/tutorial/ integers Integers are numbers without a decimal point. They can be positive or negative. Most programming languages use a finite-amount of memory to store a single integer, but in python will expand the amount of memory as necessary to store large integers.The basic operators, `+`, `-`, `*`, and `/` work with integers
###Code
2+2+3
2*-4
###Output
_____no_output_____
###Markdown
Note: integer division is one place where python 2 and python 3 different In python 3.x, dividing 2 integers results in a float. In python 2.x, dividing 2 integers results in an integer. The latter is consistent with many strongly-typed programming languages (like Fortran or C), since the data-type of the result is the same as the inputs, but the former is more inline with our expectations
###Code
1/2
###Output
_____no_output_____
###Markdown
To get an integer result, we can use the // operator.
###Code
1//2
###Output
_____no_output_____
###Markdown
Python is a _dynamically-typed language_—this means that we do not need to declare the datatype of a variable before initializing it. Here we'll create a variable (think of it as a descriptive label that can refer to some piece of data). The `=` operator assigns a value to a variable.
###Code
a = 1
b = 2
###Output
_____no_output_____
###Markdown
Functions operate on variables and return a result. Here, `print()` will output to the screen.
###Code
print(a+b)
print(a*b)
###Output
_____no_output_____
###Markdown
Note that variable names are case sensitive, so a and A are different
###Code
A = 2048
print(a, A)
###Output
_____no_output_____
###Markdown
Here we initialize 3 variable all to `0`, but these are still distinct variables, so we can change one without affecting the others.
###Code
x = y = z = 0
print(x, y, z)
z = 1
print(x, y, z)
###Output
_____no_output_____
###Markdown
Python has some built in help (and Jupyter/ipython has even more)
###Code
help(x)
x?
###Output
_____no_output_____
###Markdown
Another function, `type()` returns the data type of a variable
###Code
print(type(x))
###Output
_____no_output_____
###Markdown
Note in languages like Fortran and C, you specify the amount of memory an integer can take (usually 2 or 4 bytes). This puts a restriction on the largest size integer that can be represented. Python will adapt the size of the integer so you don't *overflow*
###Code
a = 12345678901234567890123456789012345123456789012345678901234567890
print(a)
print(a.bit_length())
print(type(a))
###Output
_____no_output_____
###Markdown
floating point when operating with both floating point and integers, the result is promoted to a float. This is true of both python 2.x and 3.x
###Code
1./2
###Output
_____no_output_____
###Markdown
but note the special integer division operator
###Code
1.//2
###Output
_____no_output_____
###Markdown
It is important to understand that since there are infinitely many real numbers between any two bounds, on a computer we have to approximate this by a finite number. There is an IEEE standard for floating point that pretty much all languages and processors follow. The means two things* not every real number will have an exact representation in floating point* there is a finite precision to numbers -- below this we lose track of differences (this is usually called *roundoff* error)On our course website, I posted a link to a paper, _What every computer scientist should know about floating-point arithmetic_ -- this is a great reference on understanding how a computer stores numbers.Consider the following expression, for example:
###Code
0.3/0.1 - 3
###Output
_____no_output_____
###Markdown
Here's another example: The number 0.1 cannot be exactly represented on a computer. In our print, we use a format specifier (the stuff inside of the {}) to ask for more precision to be shown:
###Code
a = 0.1
print("{:30.20}".format(a))
###Output
_____no_output_____
###Markdown
we can ask python to report the limits on floating point
###Code
import sys
print(sys.float_info)
###Output
_____no_output_____
###Markdown
Note that this says that we can only store numbers between 2.2250738585072014e-308 and 1.7976931348623157e+308We also see that the precision is 2.220446049250313e-16 (this is commonly called _machine epsilon_). To see this, consider adding a small number to 1.0. We'll use the equality operator (`==`) to test if two numbers are equal: Quick Exercise:Define two variables, $a = 1$, and $e = 10^{-16}$.Now define a third variable, `b = a + e`We can use the python `==` operator to test for equality. What do you expect `b == a` to return? run it an see if it agrees with your guess. modulesThe core python language is extended by a standard library that provides additional functionality. These added pieces are in the form of modules that we can _import_ into our python session (or program).The `math` module provides functions that do the basic mathematical operations as well as provide constants (note there is a separate `cmath` module for complex numbers).In python, you `import` a module. The functions are then defined in a separate _namespace_—this is a separate region that defines names and variables, etc. A variable in one namespace can have the same name as a variable in a different namespace, and they don't clash. You use the "`.`" operator to access a member of a namespace.By default, when you type stuff into the python interpreter or here in the Jupyter notebook, or in a script, it is in its own default namespace, and you don't need to prefix any of the variables with a namespace indicator.
###Code
import math
###Output
_____no_output_____
###Markdown
`math` provides the value of pi
###Code
print(math.pi)
###Output
_____no_output_____
###Markdown
This is distinct from any variable `pi` we might define here
###Code
pi = 3
print(pi, math.pi)
###Output
_____no_output_____
###Markdown
Note here that `pi` and `math.pi` are distinct from one another—they are in different namespaces. floating point operations The same operators, `+`, `-`, `*`, `/` work are usual for floating point numbers. To raise an number to a power, we use the `**` operator (this is the same as Fortran)
###Code
R = 2.0
print(math.pi*R**2)
###Output
_____no_output_____
###Markdown
operator precedence follows that of most languages. Seehttps://docs.python.org/3/reference/expressions.htmloperator-precedence in order of precedence:* quantites in `()`* slicing, calls, subscripts* exponentiation (`**`)* `+x`, `-x`, `~x`* `*`, `@`, `/`, `//`, `%`* `+`, `-`(after this are bitwise operations and comparisons)Parantheses can be used to override the precedence. Quick Exercise:Consider the following expressions. Using the ideas of precedence, think about what value will result, then try it out in the cell below to see if you were right. * `1 + 3*2**2` * `1 + (3*2)**2` * `2**3**2`
###Code
2**(3**2)
###Output
_____no_output_____
###Markdown
The math module provides a lot of the standard math functions we might want to use.For the trig functions, the expectation is that the argument to the function is in radians—you can use `math.radians()` to convert from degrees to radians, ex:
###Code
print(math.cos(math.radians(45)))
###Output
_____no_output_____
###Markdown
Notice that in that statement we are feeding the output of one function (`math.radians()`) into a second function, `math.cos()`When in doubt, as for help to discover all of the things a module provides:
###Code
help(math.sin)
###Output
_____no_output_____
###Markdown
complex numbers python uses '`j`' to denote the imaginary unit
###Code
print(1.0 + 2j)
a = 1j
b = 3.0 + 2.0j
print(a + b)
print(a*b)
###Output
_____no_output_____
###Markdown
we can use `abs()` to get the magnitude and separately get the real or imaginary parts
###Code
print(abs(b))
print(a.real)
print(a.imag)
###Output
_____no_output_____
###Markdown
strings python doesn't care if you use single or double quotes for strings:
###Code
a = "this is my string"
b = 'another string'
print(a)
print(b)
###Output
_____no_output_____
###Markdown
Many of the usual mathematical operators are defined for strings as well. For example to concatenate or duplicate:
###Code
print(a+b)
print(a + ". " + b)
print(a*2)
###Output
_____no_output_____
###Markdown
There are several escape codes that are interpreted in strings. These start with a backwards-slash, `\`. E.g., you can use `\n` for new line
###Code
a = a + "\n"
print(a)
###Output
_____no_output_____
###Markdown
Quick Exercise:The `input()` function can be used to ask the user for input. * Use `help(input)` to see how it works. * Write code to ask for input and store the result in a variable. `input()` will return a string. * Use the `float()` function to convert a number entered as input to a floating point variable. * Check to see if the conversion worked using the `type()` function.
###Code
a = input("enter a string")
print(type(float(a)))
a
###Output
_____no_output_____
###Markdown
""" can enclose multiline strings. This is useful for docstrings at the start of functions (more on that later...)
###Code
c = """
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis
nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore
eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt
in culpa qui officia deserunt mollit anim id est laborum."""
print(c)
###Output
_____no_output_____
###Markdown
a raw string does not replace escape sequences (like \n). Just put a `r` before the first quote:
###Code
d = r"this is a raw string\n"
print(d)
###Output
_____no_output_____
###Markdown
slicing is used to access a portion of a string.slicing a string can seem a bit counterintuitive if you are coming from Fortran. The trick is to think of the index as representing the left edge of a character in the string. When we do arrays later, the same will apply.Also note that python (like C) uses 0-based indexingNegative indices count from the right.
###Code
a = "this is my string"
print(a)
print(a[5:7])
print(a[0])
print(d)
print(d[-2])
###Output
_____no_output_____
###Markdown
Quick Exercise:Strings have a lot of _methods_ (functions that know how to work with a particular datatype, in this case strings). A useful method is `.find()`. For a string `a`,`a.find(s)` will return the index of the first occurrence of `s`.For our string `c` above, find the first `.` (identifying the first full sentence), and print out just the first sentence in `c` using this result
###Code
c
###Output
_____no_output_____
###Markdown
there are also a number of methods and functions that work with strings. Here are some examples:
###Code
print(a.replace("this", "that"))
print(len(a))
print(a.strip()) # Also notice that strip removes the \n
print(a.strip()[-1])
###Output
_____no_output_____
###Markdown
Note that our original string, `a`, has not changed. In python, strings are *immutable*. Operations on strings return a new string.
###Code
print(a)
print(type(a))
###Output
_____no_output_____
###Markdown
As usual, ask for help to learn more:
###Code
help(str)
###Output
_____no_output_____
###Markdown
We can format strings when we are printing to insert quantities in particular places in the string. A `{}` serves as a placeholder for a quantity and is replaced using the `.format()` method:
###Code
a = 1
b = 2.0
c = "test"
print("a = {}; b = {}; c = {}".format(a, b, c))
###Output
_____no_output_____ |
notebooks/pokemon/adversarial/basic/generative_inference_adversarial/convolutional/AE/pokemonAAEssmi Convolutional.ipynb | ###Markdown
Settings
###Code
%load_ext autoreload
%autoreload 2
%env TF_KERAS = 1
import os
sep_local = os.path.sep
import sys
sys.path.append('..'+sep_local+'..')
print(sep_local)
import tensorflow as tf
print(tf.__version__)
###Output
2.1.0
###Markdown
Dataset loading
###Code
dataset_name='pokemon'
images_dir = 'C:\\Users\\Khalid\\Documents\projects\\pokemon\DS06\\'
validation_percentage = 20
valid_format = 'png'
os.chdir('..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..')
print(os.getcwd())
from training.generators.file_image_generator import create_image_lists, get_generators
imgs_list = create_image_lists(
image_dir=images_dir,
validation_pct=validation_percentage,
valid_imgae_formats=valid_format
)
inputs_shape= image_size=(200, 200, 3)
batch_size = 32
latents_dim = 32
intermediate_dim = 50
training_generator, testing_generator = get_generators(
images_list=imgs_list,
image_dir=images_dir,
image_size=image_size,
batch_size=batch_size,
class_mode=None
)
import tensorflow as tf
train_ds = tf.data.Dataset.from_generator(
lambda: training_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
test_ds = tf.data.Dataset.from_generator(
lambda: testing_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
_instance_scale=1.0
for data in train_ds:
_instance_scale = float(data[0].numpy().max())
break
_instance_scale
import numpy as np
from collections.abc import Iterable
if isinstance(inputs_shape, Iterable):
_outputs_shape = np.prod(inputs_shape)
_outputs_shape
###Output
_____no_output_____
###Markdown
Model's Layers definition
###Code
units=20
c=50
enc_lays = [
tf.keras.layers.Conv2D(filters=units, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(filters=units*9, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latents_dim)
]
dec_lays = [
tf.keras.layers.Dense(units=c*c*units, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(c , c, units)),
tf.keras.layers.Conv2DTranspose(filters=units, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'),
tf.keras.layers.Conv2DTranspose(filters=units*3, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(filters=3, kernel_size=3, strides=(1, 1), padding="SAME")
]
###Output
_____no_output_____
###Markdown
Model definition
###Code
model_name = dataset_name+'ConvILTAAEssmi'
experiments_dir='experiments'+sep_local+model_name
from training.adversarial_basic.generative_adversarial.autoencoders.AAE import AAE as AE
inputs_shape=image_size
variables_params = \
[
{
'name': 'inference',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': enc_lays
}
,
{
'name': 'generative',
'inputs_shape':latents_dim,
'outputs_shape':inputs_shape,
'layers':dec_lays
}
]
from utils.data_and_files.file_utils import create_if_not_exist
_restore = os.path.join(experiments_dir, 'var_save_dir')
create_if_not_exist(_restore)
_restore
#to restore trained model, set filepath=_restore
from statistical.basic_adversarial_losses import \
create_inference_discriminator_real_losses, \
create_inference_discriminator_fake_losses, \
create_inference_generator_fake_losses, \
create_generative_discriminator_real_losses, \
create_generative_discriminator_fake_losses, \
create_generative_generator_fake_losses, \
generative_inference_discriminator_losses = {
'inference_discriminator_real_outputs': create_inference_discriminator_real_losses,
'inference_discriminator_fake_outputs': create_inference_discriminator_fake_losses,
'inference_generator_fake_outputs': create_inference_generator_fake_losses,
'generative_discriminator_fake_outputs': create_generative_discriminator_real_losses,
'generative_discriminator_fake_outputs': create_generative_discriminator_fake_losses,
'generative_generator_fake_outputs': create_generative_generator_fake_losses,
}
ae = AE(
name= model_name,
latents_dim=latents_dim,
batch_size=batch_size,
variables_params=variables_params,
filepath=None
)
from evaluation.quantitive_metrics.structural_similarity import prepare_ssim_multiscale
from statistical.losses_utilities import similarity_to_distance
ae.compile(loss={'x_logits': similarity_to_distance(prepare_ssim_multiscale([ae.batch_size]+ae.get_inputs_shape()))})
from training.callbacks.sample_generation import SampleGeneration
from training.callbacks.save_model import ModelSaver
es = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=1e-12,
patience=12,
verbose=1,
restore_best_weights=False
)
ms = ModelSaver(filepath=_restore)
csv_dir = os.path.join(experiments_dir, 'csv_dir')
create_if_not_exist(csv_dir)
csv_dir = os.path.join(csv_dir, ae.name+'.csv')
csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True)
csv_dir
image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir')
create_if_not_exist(image_gen_dir)
sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
import numpy as np
ae.fit(
x=train_ds,
input_kw=None,
steps_per_epoch=int(1e4),
epochs=int(1e6),
verbose=2,
callbacks=[ es, ms, csv_log, sg],
workers=-1,
use_multiprocessing=True,
validation_data=test_ds,
validation_steps=int(1e4)
)
###Output
_____no_output_____
###Markdown
Model Evaluation inception_score
###Code
from evaluation.generativity_metrics.inception_metrics import inception_score
is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200)
print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
###Output
_____no_output_____
###Markdown
Frechet_inception_distance
###Code
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance
fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32)
print(f'frechet inception distance: {fis_score}')
###Output
_____no_output_____
###Markdown
perceptual_path_length_score
###Code
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score
ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32)
print(f'perceptual path length score: {ppl_mean_score}')
###Output
_____no_output_____
###Markdown
precision score
###Code
from evaluation.generativity_metrics.precision_recall import precision_score
_precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'precision score: {_precision_score}')
###Output
_____no_output_____
###Markdown
recall score
###Code
from evaluation.generativity_metrics.precision_recall import recall_score
_recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'recall score: {_recall_score}')
###Output
_____no_output_____
###Markdown
Image Generation image reconstruction Training dataset
###Code
%load_ext autoreload
%autoreload 2
from training.generators.image_generation_testing import reconstruct_from_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
with Randomness
###Code
from training.generators.image_generation_testing import generate_images_like_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, testing_generator, save_dir)
###Output
_____no_output_____
###Markdown
Complete Randomness
###Code
from training.generators.image_generation_testing import generate_images_randomly
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'random_synthetic_dir')
create_if_not_exist(save_dir)
generate_images_randomly(ae, save_dir)
from training.generators.image_generation_testing import interpolate_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'interpolate_dir')
create_if_not_exist(save_dir)
interpolate_a_batch(ae, testing_generator, save_dir)
###Output
100%|██████████| 15/15 [00:00<00:00, 19.90it/s]
|
contrib/1.口罩检测模型直播开发指导文档/mask_detect.ipynb | ###Markdown
口罩检测模型开发指导文档 1. 介绍Duration: 1 mins 1.1 本文档目标- 指导开发者使用已标注好的口罩检测数据集,利用华为云[ModelArts](https://support.huaweicloud.com/modelarts/index.html)的一键模型上线功能训练得到一个口罩检测模型- 指导开发者在ModelArts Notebook中使用[ModelArts SDK](https://support.huaweicloud.com/sdkreference-modelarts/modelarts_04_0002.html)部署测试模型,可进行图片和视频的测试 1.2 您需要准备什么?- 一台可联网的电脑(Windows,Mac或Linux操作系统)- 谷歌浏览器 2. 准备工作Duration: 10 mins进行口罩检测模型的开发,需要完成以下准备工作 2.1 ModelArts准备工作参考[此文档](https://github.com/huaweicloud/ModelArts-Lab/tree/master/docs/ModelArts%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C)完成ModelArts准备工作。 注意:体验本案例将会消耗云资源,云资源是先使用、后计费的模式,计费非实时,而是定期结算,账户余额、云资源包和代金券都在云资源的扣费范围内。在使用ModelArts时要及时检查账号状态,避免账号处于欠费或冻结状态时资源被冻结,影响您的使用。 2.2 下载口罩检测数据集[点此链接](https://modelarts-labs-bj4.obs.cn-north-4.myhuaweicloud.com/case_zoo/mask_detect/datasets/mask_detect_datasets.zip) 下载口罩检测数据集,将得到mask_detect_datasets.zip,解压得到mask_detect_datasets,其中的train目录是训练集,里面是图片和已经标注好的物体检测xml标签文件,test是测试集,里面有测试图片和视频。 2.3 上传数据集至OBS(1)按照下图操作创建一个OBS桶,点击 添加桶 -》输入桶名(确保桶名符合命名规则)-》确定(2)按照下图上传数据集文件夹,点击 上传-》选择文件夹,选定前面解压的mask_detect_datasets目录 -》确定**至此,准备工作完成。** 3. ModelArts一键模型上线准备好数据集之后,我们就可以到ModelArts上使用一键模型上线功能开始训练口罩检测模型,训练过程总共分两大步骤:数据集导入和作业参数配置。[点此链接](https://console.huaweicloud.com/modelarts/?region=cn-north-4/manage/dashboard)前往ModelArts北京四区域的控制台页面,依次按照下面的步骤进行模型的训练。 3.1 数据集导入 (1)按照下图创建数据集 (2)按照下图中步骤指定数据输入位置 (3)按照下图中步骤指定数据输出位置 (4)按照下图中步骤选择物体检测,点击创建 (5)等待数据集自动导入点击创建后,ModelArts会自动从OBS中导入已经标注好的数据集,导入时间根据数据集大小而定,可以手动刷新页面查看最新的导入进度,如下图所示,等待进度达到100%。 (6)上传未标注的图片进行标注(可选)本步骤为可选步骤,跳过本步不影响整个案例的完成。本案例提供了已经标注好的数据集用于训练,那么如何上传新的未标注图片加入训练呢?只需要两个步骤:上传图片到OBS、手工标注图片。上面第(2)步,我们选定了OBS上的"obs://mask-detect-0211/mask_detect_datasets/train/"路径作为数据输入位置,那么未标注图片也要上传到该位置,按照下图中步骤进行图片的上传,注意一次最多上传500张图等待图片上传完成后,再回到ModelArts,点击上面第(5)步创建的数据集名称,进入到数据集详情页面,如下图所示,点击开始标注再依次点击 未标注-》同步数据源,等待数据同步完成,将会看到显示的图片点击图片,将进入数据标注页面,如下图所示,标注工具的详细用法请[点此链接](https://support.huaweicloud.com/engineers-modelarts/modelarts_23_0012.html)查看所有图片标注完成后,依次点击 返回数据标注预览-》返回数据集预览-》返回数据集列表,此时已完成了新图片的标注工作,您可以再次点击 一键模型上线-》任务创建 开始新的训练任务。 3.2 作业参数配置 (1)创建一键模型上线任务按照下图点击 一键模型上线 -》任务创建 (2)配置作业参数按照下图修改作业名称,选择预置算法,设置算法参数。本案例直接使用默认的预置算法、默认的算法参数即可,直接使用默认的配置也可以得到较好的模型训练效果。 (3)指定训练输出位置按照下图选择训练输出位置按照下图步骤新建一个train_output目录 (4)指定作业日志路径按照下图选择作业日志路径按照下图步骤新建一个train_log目录 (5)参数配置完成,如下图所示,点击下一步,提交 (6)等待作业训练完成,预计总耗时需30分钟左右如果在等待过程中退出了该网页,可以按照下图 数据集-》一键模型上线-》任务历史,重新进入任务页面查看任务详情:**至此,模型训练任务完成。**回顾一下整个训练过程,我们只需要准备标注好的训练数据集,然后使用ModelArts的一键模型上线功能,进行一些参数配置,后台就会自动开始模型训练,在训练完之后,会将模型保存到OBS,并将模型部署成在线服务,整个过程是零代码开发,非常方便。 4. 模型测试一键模型上线任务完成之后,将会创建一个在线服务,这个在线服务是将模型进行了部署,并可以通过在线API的形式获取到模型的预测能力。按照下图找到以"mask_detect_demo"前缀为名的服务名称,这个前缀就是我们创建一键部署任务时取的名字点击服务名称进入到在线服务详情页面,如下图所示,依次点击预测-》上传-》预测,即可以实现一张图片的预测,由于该在线服务默认是用CPU部署,所以预测较慢一些,使用GPU部署可以加快预测。 5. Notebook交互式开发调试为了进一步地测试模型的能力,我们可以使用ModelArts的高级功能——Notebook交互式开发调试工具,来做图片的预测和视频的预测。Notebook简介请[点此链接](https://support.huaweicloud.com/engineers-modelarts/modelarts_23_0033.html)进行查看。 5.1 创建Notebook我们需要先创建一个Notebook,按照下图创建再按照下图进行配置,选择GPU -》P100,下一步,提交,大约两分钟后创建好。等待Notebook的状态变为“运行中”,然后点击Notebook名称进入到Notebook,进去将看到一个空的工作空间,如下图所示: 5.2 创建ipynb脚本我们需要创建一个交互式开发脚本——ipynb脚本,按照下图中步骤,点击右上角的"New",然后选择TensorFlow 1.8。之所以选择TensorFlow1.8是因为前面的一键模型上线步骤中选择的预置算法是使用TensorFlow1.8的AI引擎,如果您选择了其他预置算法,则需要更改为相应的AI引擎,每个预置算法使用的引擎类型,可以[点此查看](https://support.huaweicloud.com/engineers-modelarts/modelarts_23_0158.html)。这时就已经成功创建了一个空的ipynb脚本,按照下图中步骤,点击左上方的文件名"Untitled",并输入一个与本案例相关的名称,如"mask_detect"。 5.3 学习ipynb的基本用法ipynb的基本用法,请参考下图,您可以手动输入 print('Hello, ModelArts'),然后点击快捷功能键的“Run”按钮或按下Ctrl+Enter键运行。快捷功能键的作用从左往右,依次是“保存当前脚本”、“新建一个Cell”、“剪切一个Cell”(也可用于删除一个Cell)、“拷贝一个Cell”、“粘贴一个Cell”、“把当前Cell往上移动一步”、“把当前Cell往下移动一步”、“停止当前Cell的执行”、“重启当前脚本的内核”(内核就是运行环境,即右上角的AI引擎)、“切换当前Cell的类型”(支持Code、Markdown等类型)、“打开命令调色板”(基本极少用到)、“将ipynb脚本转换成python脚本”。好,到目前为止,您已经掌握了ipynb的基本用法,下面我们开始使用代码来测试口罩检测模型。
###Code
print('Hello, ModelArts!')
###Output
Hello, ModelArts!
###Markdown
5.4 执行ipynb脚本本案例的ipynb脚本分6个功能模块:(1)获取模型id(2)下载测试数据(3)部署环境初始化(4)模型初始化(5)图片测试(6)视频测试下面我们开始进入到脚本执行环节。 (1)获取模型id进行模型部署之前,我们需要指定部署哪一个模型,按照下图步骤,找到以"mask_detect_demo"前缀为名的服务名称,这个前缀就是我们创建一键模型上线任务时取的名字点击模型名字,进入到模型详情页面,按照下图找到 id,这就是模型id,复制,填写到下面的test_model_id参数中。
###Code
test_model_id = "92b7dfab-ce58-4aba-995c-778eac825d80"
###Output
_____no_output_____
###Markdown
(2)下载测试数据本案例提供了部分测试数据,直接运行下面的Cell即可下载到Notebook
###Code
import os
import shutil
import moxing as mox
if not os.path.exists('mask_detect_datasets'):
mox.file.copy('s3://modelarts-labs-bj4/case_zoo/mask_detect/datasets/mask_detect_datasets.zip', './mask_detect_datasets.zip')
os.system("unzip mask_detect_datasets.zip")
os.system("rm mask_detect_datasets.zip")
###Output
INFO:root:Using MoXing-v1.14.1-ddfd6c9a
INFO:root:Using OBS-Python-SDK-3.1.2
###Markdown
(3)部署环境初始化a) 注意,本步骤在打开ipynb后只需要运行一次即可,不需要运行多次b) 本步骤的运行时长依赖于您的网络状况,请留意当前Cell的运行结果输出,如果看到“Successfully configure tensorflow local inference environment”,则表示环境初始化成功
###Code
from modelarts.session import Session
from modelarts.model import Model
from modelarts.config.model_config import ServiceConfig
session = Session()
Model.configure_tf_infer_environ(device_type='GPU') # 如果不是使用 TF 训练的模型,则屏蔽这一行
###Output
Configuring tensorflow local inference environment ...
Successfully configure tensorflow local inference environment
###Markdown
(4)模型初始化a) 注意本步骤在打开ipynb后只需要运行一次即可,不需要运行多次b) 本步骤的运行时长依赖于您的网络状况,请留意当前Cell的运行结果输出,如果看到“Successfully deployed the local service.”,则表示模型初始化成功
###Code
model_instance = Model(session, model_id=test_model_id)
configs = [ServiceConfig(model_id=model_instance.model_id, weight="100", specification="local", instance_count=1)]
predictor_instance = model_instance.deploy_predictor(configs=configs)
###Output
Service name is service-0224-181528
###Markdown
(5)图片测试
###Code
import cv2
import json
import numpy as np
import PIL.Image as pil_Image
def detect_img_and_show(img):
predict_result = predictor_instance.predict(data=img, data_type='images')
predict_result = json.loads(predict_result, encoding='utf8')
classes = predict_result.get('detection_classes', None)
boxes = predict_result.get('detection_boxes', None)
scores = predict_result.get('detection_scores', None)
if classes is not None:
img_copy = np.array(img.convert('RGB')).copy()
for i in range(len(classes)): # 绘制水平框
box = boxes[i]
y1, x1, y2, x2 = [int(float(v)) for v in box]
cv2.rectangle(img_copy, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)
text = classes[i] + '-%s' % str(float(scores[i]) * 100)[:4] + '%'
cv2.putText(img_copy, text, (x1, y1 - 3), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 1) # 绘制标签名
return classes, img_copy
###Output
_____no_output_____
###Markdown
开始图片测试
###Code
file_path = 'mask_detect_datasets/test/no_1.jpg'
classes, img_show = detect_img_and_show(pil_Image.open(file_path))
print(classes)
pil_Image.fromarray(img_show) # 显示图片
###Output
['no_mask']
###Markdown
修改图片路径,换一张图片进行测试
###Code
file_path = 'mask_detect_datasets/test/yes_no_5.jpg'
classes, img_show = detect_img_and_show(pil_Image.open(file_path))
print(classes)
pil_Image.fromarray(img_show) # 显示图片
###Output
['yes_mask', 'no_mask']
###Markdown
至此,您已经完成了图片的测试,您还可以上传自己的图片进行测试。下一步,我们将进行视频的测试。 (6)视频测试
###Code
import ipywidgets
from IPython.display import clear_output, Image, display
# 定义视频读取函数
def read_video(input_video_path, video_start_time, video_end_time):
cap = cv2.VideoCapture(input_video_path) # 打开视频
total_frame_num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # 获取视频总帧数
fps = cap.get(cv2.CAP_PROP_FPS) # 视频帧率
s_time_split = video_start_time.split(':')
start_time = int(s_time_split[0]) * 3600 + int(s_time_split[1]) * 60 + int(s_time_split[2])
e_time_split = video_end_time.split(':')
end_time = int(e_time_split[0]) * 3600 + int(e_time_split[1]) * 60 + int(e_time_split[2])
start_frame_id = int(start_time * fps) # 设置需要处理的开始帧
end_frame_id = int(end_time * fps) # 设置需要处理的结束帧
if end_frame_id > total_frame_num:
end_frame_id = total_frame_num - 1
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame_id) # 设置开始帧
return cap, start_frame_id, end_frame_id
input_video_path = 'mask_detect_datasets/test/yes_mask.mp4' # 输入视频所在的路径
video_start_time = '00:00:00' # 设置需要处理的视频开始时间,按照2位数字'时:分:秒'的格式进行填写
video_end_time = '00:00:06' # 设置需要处理的视频结束时间,按照2位数字'时:分:秒'的格式进行填写
cap, start_frame_id, end_frame_id = read_video(input_video_path, video_start_time, video_end_time)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 获取视频画面宽度
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 获取视频画面高度
for frame_id in range(start_frame_id + 1, end_frame_id + 1):
clear_output(wait=True)
ret, frame = cap.read()
pil_frame = pil_Image.fromarray(frame[:, :, ::-1])
classes, img_show = detect_img_and_show(pil_frame)
cv2.putText(img_show, 'id: ' + str(frame_id), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # 画frame_id
img_show = img_show[:, :, ::-1]
display(Image(data=cv2.imencode('.jpg', img_show)[1]))
print('end')
###Output
_____no_output_____ |
module1-rnn-and-lstm/LS_DS_441_RNN_and_LSTM_Assignment.ipynb | ###Markdown
AssignmentIt is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM.This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txtUse it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach.Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size.Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more!
###Code
import pandas as pd
import numpy as np
text_file = open('/Users/ksmith/Documents/Code/DS1/Unit4/DS-Unit-4-Sprint-4-Deep-Learning/module1-rnn-and-lstm/www.gutenberg.org/files/100/100-0.txt')
lines = text_file.read().split('\n')
df = pd.DataFrame(lines)
import string
import re
table = str.maketrans('', '', string.punctuation)
# df[0] = df[0].str.lower() #Text is lowercase
# df[0] = df[0].str.translate(table) #Remove punctuation
df[0] = df[0].str.split(r'(\s+)')
# df = df[135:156826]
df = df[135:500]
flat_list = [item for sublist in df[0] for item in sublist]
# Based on "The Unreasonable Effectiveness of RNN" implementation
chars = list(set(flat_list)) # split and remove duplicate characters. convert to list.
num_chars = len(chars) # the number of unique characters
txt_data_size = len(flat_list)
print("unique characters : ", num_chars)
print("txt_data_size : ", txt_data_size)
# one hot encode
char_to_int = dict((c, i) for i, c in enumerate(chars)) # "enumerate" retruns index and value. Convert it to dictionary
int_to_char = dict((i, c) for i, c in enumerate(chars))
print(char_to_int)
print("----------------------------------------------------")
print(int_to_char)
print("----------------------------------------------------")
# integer encode input data
integer_encoded = [char_to_int[i] for i in flat_list] # "integer_encoded" is a list which has a sequence converted from an original data to integers.
print(integer_encoded)
print("----------------------------------------------------")
print("data length : ", len(integer_encoded))
# hyperparameters
iteration = 100
sequence_length = 40
batch_size = round((txt_data_size /sequence_length)+0.5) # = math.ceil
hidden_size = 50 # size of hidden layer of neurons.
learning_rate = 1e-1
# model parameters
W_xh = np.random.randn(hidden_size, num_chars)*0.01 # weight input -> hidden.
W_hh = np.random.randn(hidden_size, hidden_size)*0.01 # weight hidden -> hidden
W_hy = np.random.randn(num_chars, hidden_size)*0.01 # weight hidden -> output
b_h = np.zeros((hidden_size, 1)) # hidden bias
b_y = np.zeros((num_chars, 1)) # output bias
h_prev = np.zeros((hidden_size,1)) # h_(t-1)
def forwardprop(inputs, targets, h_prev):
# Since the RNN receives the sequence, the weights are not updated during one sequence.
xs, hs, ys, ps = {}, {}, {}, {} # dictionary
hs[-1] = np.copy(h_prev) # Copy previous hidden state vector to -1 key value.
loss = 0 # loss initialization
for t in range(len(inputs)): # t is a "time step" and is used as a key(dic).
xs[t] = np.zeros((num_chars,1))
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(W_xh, xs[t]) + np.dot(W_hh, hs[t-1]) + b_h) # hidden state.
ys[t] = np.dot(W_hy, hs[t]) + b_y # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars.
# Softmax. -> The sum of probabilities is 1 even without the exp() function, but all of the elements are positive through the exp() function.
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss). Efficient and simple code
# y_class = np.zeros((num_chars, 1))
# y_class[targets[t]] =1
# loss += np.sum(y_class*(-np.log(ps[t]))) # softmax (cross-entropy loss)
return loss, ps, hs, xs
def backprop(ps, inputs, hs, xs):
dWxh, dWhh, dWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy) # make all zero matrices.
dbh, dby = np.zeros_like(b_h), np.zeros_like(b_y)
dhnext = np.zeros_like(hs[0]) # (hidden_size,1)
# reversed
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t]) # shape (num_chars,1). "dy" means "dloss/dy"
dy[targets[t]] -= 1 # backprop into y. After taking the soft max in the input vector, subtract 1 from the value of the element corresponding to the correct label.
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(W_hy.T, dy) + dhnext # backprop into h.
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity #tanh'(x) = 1-tanh^2(x)
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(W_hh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients.
return dWxh, dWhh, dWhy, dbh, dby
%%time
data_pointer = 0
# memory variables for Adagrad
mWxh, mWhh, mWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy)
mbh, mby = np.zeros_like(b_h), np.zeros_like(b_y)
for i in range(iteration):
h_prev = np.zeros((hidden_size,1)) # reset RNN memory
data_pointer = 0 # go from start of data
for b in range(batch_size):
inputs = [char_to_int[ch] for ch in flat_list[data_pointer:data_pointer+sequence_length]]
targets = [char_to_int[ch] for ch in flat_list[data_pointer+1:data_pointer+sequence_length+1]] # t+1
if (data_pointer+sequence_length+1 >= len(flat_list) and b == batch_size-1): # processing of the last part of the input data.
# targets.append(char_to_int[df[0]]) # When the data doesn't fit, add the first char to the back.
targets.append(char_to_int[" "]) # When the data doesn't fit, add space(" ") to the back.
# forward
loss, ps, hs, xs = forwardprop(inputs, targets, h_prev)
# print(loss)
# backward
dWxh, dWhh, dWhy, dbh, dby = backprop(ps, inputs, hs, xs)
# perform parameter update with Adagrad
for param, dparam, mem in zip([W_xh, W_hh, W_hy, b_h, b_y],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam # elementwise
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
data_pointer += sequence_length # move data pointer
if i % 100 == 0:
print ('iter %d, loss: %f' % (i, loss)) # print progress
def predict(test_char, length):
x = np.zeros((num_chars, 1))
x[char_to_int[test_char]] = 1
ixes = []
h = np.zeros((hidden_size,1))
for t in range(length):
h = np.tanh(np.dot(W_xh, x) + np.dot(W_hh, h) + b_h)
y = np.dot(W_hy, h) + b_y
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(num_chars), p=p.ravel()) # ravel -> rank0
# "ix" is a list of indexes selected according to the soft max probability.
x = np.zeros((num_chars, 1)) # init
x[ix] = 1
ixes.append(ix) # list
txt = test_char + ''.join(int_to_char[i] for i in ixes)
print ('----\n %s \n----' % (txt, ))
predict('The', 100)
###Output
----
The worst churl, livery this too find.But from my earth tiger’s her young.But thou repair should live paws,And make thy age)Be long from from date. this, repair my nature’s changing gazeth, crime,forbid be succeeding breathe your sweet one keen false women’s souls away: flowers,Much truth with show, lease
----
|
Sentiment Analysis/SentimentAnalysis.ipynb | ###Markdown
Sentiment Analysis with Python
###Code
#Loading the data to the variable as DataFrame
#importing pandas library
import pandas as pd
#import counter vectorized function from sklearn
from sklearn.feature_extraction.text import CountVectorizer
count=CountVectorizer()
data=pd.read_csv("Train.csv")
data.head()
#finding the positive and negative text in Data
pos=data[data['label']==1]
neg=data[data['label']==0]
print("Positive text \n",pos.head())
print("\nNegative text \n",neg.head())
#Plotting the Postive vs Negative in piechart.
#Importing matplotlib library to plot pie chart.
import matplotlib.pyplot as plt
fig=plt.figure(figsize=(5,5))
temp=[pos['label'].count(),neg['label'].count()]
plt.pie(temp,labels=["Positive","Negative"],autopct ='%2.1f%%',shadow = True,startangle = 50,explode=(0, 0.3))
plt.title('Positive vs Negative')
#importing re library
import re
#Defining preprocessing function to process the data
def preprocess(text):
text=re.sub('<[^>]*>','',text)
emoji=re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)',text)
text=re.sub('[\W]+',' ',text.lower()) +' '.join(emoji).replace('-','')
return text
#Applying the function preprocess on the data
data['text']=data['text'].apply(preprocess)
#Displaying the dataframe after applying the preprocessing.
data.head()
#Defining a function called tokenizer which splits the sentence
def tokenizer(text):
return text.split()
tokenizer("He was joyful as he was working in good environment")
#Importing stemmer function from NLTK library
from nltk.stem.porter import PorterStemmer
porter=PorterStemmer()
#Defining function for Tokenizer porter
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
#Importing NLTK library.
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop=stopwords.words('english')
#Importing Word cloud
from wordcloud import WordCloud
#getting positive and negative data
positive_data = data[ data['label'] == 1]
positive_data = positive_data['text']
negative_data = data[data['label'] == 0]
negative_data= negative_data['text']
#Defining the function to plot the data in wordcloud
def plot_wordcloud(data, color = 'white'):
words = ' '.join(data)
clean_word = " ".join([word for word in words.split() if(word!='movie' and word!='film')])
wordcloud = WordCloud(stopwords=stop,background_color=color,width=2500,height=2000).generate(clean_word)
plt.figure(1,figsize=(10, 7))
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
#Printing the positive data in wordcloud
print("Positive words")
plot_wordcloud(positive_data,'white')
#Printing the negative data in wordcloud
print("Negative words")
plot_wordcloud(negative_data,'black')
#importing tfiVectorizer from sklearn for feature extraction.
from sklearn.feature_extraction.text import TfidfVectorizer
tfid=TfidfVectorizer(strip_accents=None,preprocessor=None,lowercase=False,use_idf=True,norm='l2',tokenizer=tokenizer_porter,smooth_idf=True)
y=data.label.values
#scaling the data
x=tfid.fit_transform(data.text)
#splitting the train and test split using train_test_split function of sklearn
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(x,y,random_state=1,test_size=0.5,shuffle=False)
#Importing Logisitic RegressionCV from sklearn library
from sklearn.linear_model import LogisticRegressionCV
model=LogisticRegressionCV(cv=6,scoring='accuracy',random_state=0,n_jobs=-1,verbose=3,max_iter=500).fit(X_train,y_train)
y_pred = model.predict(X_test)
#Importing metrics from sklesrn to calculate accuracy
from sklearn import metrics
# Accuracy of our built model
print("Accuracy of our model:",metrics.accuracy_score(y_test, y_pred))
###Output
_____no_output_____
###Markdown
Import Libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
###Output
_____no_output_____
###Markdown
Load Data
###Code
csv = 'clean_tweet.csv'
my_df = pd.read_csv(csv,index_col=0)
my_df.head()
my_df.dropna(inplace=True)
my_df.reset_index(drop=True,inplace=True)
my_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1596041 entries, 0 to 1596040
Data columns (total 2 columns):
text 1596041 non-null object
target 1596041 non-null int64
dtypes: int64(1), object(1)
memory usage: 24.4+ MB
###Markdown
Defining Input and Output for the Model
###Code
x = my_df.text
y = my_df.target
###Output
_____no_output_____
###Markdown
Train Test Split with 80% - 20%
###Code
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from time import time
SEED = 2000
x_train, x_validation_and_test, y_train, y_validation_and_test = train_test_split(x, y, test_size=.02, random_state=SEED)
x_validation, x_test, y_validation, y_test = train_test_split(x_validation_and_test, y_validation_and_test, test_size=.5, random_state=SEED)
def accuracy_summary(pipeline, x_train, y_train, x_test, y_test):
t0 = time()
sentiment_fit = pipeline.fit(x_train, y_train)
y_pred = sentiment_fit.predict(x_test)
train_test_time = time() - t0
accuracy = accuracy_score(y_test, y_pred)
print("accuracy score: {0:.2f}%".format(accuracy*100))
print("train and test time: {0:.2f}s".format(train_test_time))
print("-"*80)
return accuracy, train_test_time, sentiment_fit
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
cvec = CountVectorizer()
lr = LogisticRegression()
n_features = np.arange(10000,100001,10000)
def nfeature_accuracy_checker(vectorizer=cvec, n_features=n_features, stop_words=None, ngram_range=(1, 1), classifier=lr):
result = []
print (classifier)
print ("\n")
for n in n_features:
vectorizer.set_params(stop_words=stop_words, max_features=n, ngram_range=ngram_range)
checker_pipeline = Pipeline([
('vectorizer', vectorizer),
('classifier', classifier)
])
print("Validation result for {} features".format(n))
nfeature_accuracy,tt_time,_ = accuracy_summary(checker_pipeline, x_train, y_train, x_validation, y_validation)
result.append((n,nfeature_accuracy,tt_time))
return result
###Output
_____no_output_____
###Markdown
TFIDF Vectorizer TF-IDF is another way to convert textual data to a numeric form and is short for Term Frequency-Inverse Document Frequency. The vector value it yields is the product of these two terms; TF and IDF. Let's first look at Term Frequency. We have already looked at term frequency above with count vectorizer, but this time, we need one more step to calculate the relative frequency. Let's say we have two documents in total as below.1. I love dogs2. I hate dogs and knitting Relative term frequency is calculated for each term within each document as below.$${TF(t,d)} = \frac {number\ of\ times\ term(t)\ appears\ in\ document(d)}{total\ number\ of\ terms\ in\ document(d)}$$ For example, if we calculate relative term frequency for 'I' in both document 1 and document 2, it will be as below.$${TF('I',d1)} = \frac {1}{3} \approx {0.33}$$$${TF('I',d2)} = \frac {1}{5} = {0.2}$$ Next, we need to get Inverse Document Frequency, which measures how important a word is to differentiate each document by following the calculation as below.$${IDF(t,D)} = \log \Big(\frac {total\ number\ of\ documents(D)}{number\ of\ documents\ with\ the\ term(t)\ in\ it}\Big)$$ If we calculate inverse document frequency for 'I',$${IDF('I',D)} = \log \Big(\frac {2}{2}\Big) = {0}$$ Once we have the values for TF and IDF, now we can calculate TFIDF as below.$${TFIDF(t,d,D)} = {TF(t,d)}\cdot{IDF(t,D)}$$ Following the case of our example, TFIDF for term 'I' in both documents will be as below.$${TFIDF('I',d1,D)} = {TF('I',d1)}\cdot{IDF('I',D)} = {0.33}\times{0} = {0}$$$${TFIDF('I',d2,D)} = {TF('I',d2)}\cdot{IDF('I',D)} = {0.2}\times{0} = {0}$$ As you can see, the term 'I' appeared equally in both documents, and the TFIDF score is 0, which means the term is not really informative in differentiating documents. The rest is same as count vectorizer, TFIDF vectorizer will calculate these scores for terms in documents, and convert textual data into a numeric form. How does Uni-Gram, Bi-Gram, Tri-Gram tokens work as the vocabulory of the dataset?
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
tvec = TfidfVectorizer()
ttvec = TfidfVectorizer(ngram_range=(1,3))
ttvec.fit(["an apple a day keeps the doctor away"])
print(ttvec.vocabulary_)
###Output
{'away': 6, 'day': 7, 'keeps': 12, 'the': 15, 'doctor': 10, 'the doctor': 16, 'apple day keeps': 5, 'an apple day': 2, 'apple day': 4, 'an': 0, 'keeps the doctor': 14, 'keeps the': 13, 'apple': 3, 'doctor away': 11, 'day keeps': 8, 'an apple': 1, 'day keeps the': 9, 'the doctor away': 17}
###Markdown
Evaluting what Feature Size (Vocabulory Size) and Token Method (Uni-Gram, Bi-Gram, Tri-Gram) works best on the dataset
###Code
%%time
print("RESULT FOR UNIGRAM WITH STOP WORDS (Tfidf)\n")
feature_result_ugt = nfeature_accuracy_checker(vectorizer=tvec)
%%time
print("RESULT FOR BIGRAM WITH STOP WORDS (Tfidf)\n")
feature_result_bgt = nfeature_accuracy_checker(vectorizer=tvec,ngram_range=(1, 2))
%%time
print("RESULT FOR TRIGRAM WITH STOP WORDS (Tfidf)\n")
feature_result_tgt = nfeature_accuracy_checker(vectorizer=tvec,ngram_range=(1, 3))
###Output
RESULT FOR TRIGRAM WITH STOP WORDS (Tfidf)
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, max_iter=100, multi_class='warn',
n_jobs=None, penalty='l2', random_state=None, solver='warn',
tol=0.0001, verbose=0, warm_start=False)
Validation result for 10000 features
###Markdown
It seems like TFIDF vectorizer is yielding better results when fed to logistic regression. Let's plot the results from together with TFIDF vectorizer.
###Code
nfeatures_plot_tgt = pd.DataFrame(feature_result_tgt,columns=['nfeatures','validation_accuracy','train_test_time'])
nfeatures_plot_bgt = pd.DataFrame(feature_result_bgt,columns=['nfeatures','validation_accuracy','train_test_time'])
nfeatures_plot_ugt = pd.DataFrame(feature_result_ugt,columns=['nfeatures','validation_accuracy','train_test_time'])
plt.figure(figsize=(8,6))
plt.plot(nfeatures_plot_tgt.nfeatures, nfeatures_plot_tgt.validation_accuracy,label='trigram tfidf vectorizer',color='royalblue')
plt.plot(nfeatures_plot_bgt.nfeatures, nfeatures_plot_bgt.validation_accuracy,label='bigram tfidf vectorizer',color='orangered')
plt.plot(nfeatures_plot_ugt.nfeatures, nfeatures_plot_ugt.validation_accuracy, label='unigram tfidf vectorizer',color='gold')
plt.title("N-gram(1~3) test result : Accuracy")
plt.xlabel("Number of features")
plt.ylabel("Validation set accuracy")
plt.legend()
###Output
_____no_output_____
###Markdown
From above chart, we can see including bigram and trigram boost the model performance both in count vectorizer and TFIDF vectorizer. And for every case of unigram to trigram, TFIDF yields better results than count vectorizer. Model Training
###Code
names = ["Logistic Regression"]
classifiers = [
LogisticRegression()
]
zipped_clf = zip(names,classifiers)
tvec = TfidfVectorizer()
def classifier_comparator(vectorizer=tvec, n_features=10000, stop_words=None, ngram_range=(1, 1), classifier=zipped_clf):
result = []
model= None
vectorizer.set_params(stop_words=stop_words, max_features=n_features, ngram_range=ngram_range)
for n,c in classifier:
checker_pipeline = Pipeline([
('vectorizer', vectorizer),
('classifier', c)
])
print("Validation result for {}".format(n))
print(c)
clf_accuracy,tt_time, model = accuracy_summary(checker_pipeline, x_train, y_train, x_validation, y_validation)
result.append((n,clf_accuracy,tt_time))
return result, model
%%time
trigram_result, model = classifier_comparator(n_features=100000,ngram_range=(1,3))
print(trigram_result)
###Output
[('Logistic Regression', 0.8281954887218045, 233.4237082004547)]
###Markdown
Testing Training Model on some other tweets for Sanity Testing
###Code
sentences = ["The weather is not very well today. i wish it was raining",
"a real bad example of customer engagement",
"glad to be part of the venture"]
predictions = []
predictions_conf = []
for s in sentences:
predictions.append(model.predict(pd.Series(s)))
predictions_conf.append(model.predict_proba(pd.Series(s)))
print(model.classes_)
#print(predictions, predictions_conf)
# print("%s %s %s %s")
i = 1
for p, pc in zip(predictions, predictions_conf):
print("Tweet {} is classified as {} with confidence (0,1) {}".format(i, p, pc))
i = i+1
###Output
[0 1]
Tweet 1 is classified as [0] with confidence (0,1) [[0.98841197 0.01158803]]
Tweet 2 is classified as [0] with confidence (0,1) [[0.57675026 0.42324974]]
Tweet 3 is classified as [1] with confidence (0,1) [[0.03883742 0.96116258]]
|
Classification/2) Titanic/Titanic Dataset Solution.ipynb | ###Markdown
Importing data from csv files.
###Code
#Dataset is taken from kaggle thus is divided into 3 files.
train=pd.read_csv(r'train.csv')
test=pd.read_csv(r'test.csv')
gender_submission=pd.read_csv(r'gender_submission.csv')
###Output
_____no_output_____
###Markdown
Pre-Processing testing dataset.
###Code
# gender_submission file consist the output of testing dataset thus mergeing that data with out output data file.
test.insert(1, "Survived", gender_submission['Survived'], True)
test.head()
train.head()
#Taking the columns which are needed for classification and ignoring the columns like PassengerID,Name,Ticket,Fare and Cabin
train=train[['Survived','Pclass','Sex','SibSp','Parch','Embarked']]
test=test[['Survived','Pclass','Sex','SibSp','Parch','Embarked']]
#Droping the Null value rows and performing one hot encoding
train.dropna()
train=pd.get_dummies(train)
test=pd.get_dummies(test)
###Output
_____no_output_____
###Markdown
Pre-Processed Dataset.
###Code
train.head()
test.head()
###Output
_____no_output_____
###Markdown
Making y_train and y_test from train and test DataFrame
###Code
y_train=train['Survived']
train.drop(['Survived'], axis=1)
y_test=test['Survived']
test.drop(['Survived'], axis=1)
print(len(y_train))
print(len(y_test))
###Output
891
418
###Markdown
Accuracy Function
###Code
def accuracy(y_pred,y_test):
from sklearn.metrics import accuracy_score,confusion_matrix, f1_score
import seaborn as sns
import matplotlib.pyplot as plt
print("accuracy score:",accuracy_score(y_test, y_pred))
print("confusion matrix:\n",confusion_matrix(y_test, y_pred))
print("f1 score:",f1_score(y_test, y_pred, average='macro'))
# using heatmat to plot accuracy
a=np.array(y_pred).reshape(-1,1)
b=np.array(y_test).reshape(-1,1)
df=pd.DataFrame(np.append(a,b,axis=1))
df.columns=["predicted_vals","true_vals"]
cor = df.corr()
sns.heatmap(cor)
#to use scatter plot uncomment the below given code
#plt.scatter(y_test,y_pred)
plt.show()
###Output
_____no_output_____
###Markdown
1) Using RandomForestClassifier from sklearn.ensemble to generate, fit the model and predict the output.
###Code
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, bootstrap = True, max_features = 'sqrt')
model.fit(train,y_train)
y_pred_randF= model.predict(test)
y_pred_randF=y_pred_randF.tolist()
###Output
_____no_output_____
###Markdown
2) Using Naive Bayes from sklearn.ensemble to generate, fit the model and predict the output.
###Code
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
y_pred_naiveBayes = gnb.fit(train, y_train).predict(test)
###Output
_____no_output_____
###Markdown
3) Using Support Vector Machine from sklearn.ensemble to generate, fit the model and predict the output.
###Code
from sklearn import svm
clf = svm.SVC()
clf.fit(train, y_train)
y_pred_SVM=clf.predict(test)
###Output
_____no_output_____
###Markdown
4) Using Stochastic Gradient Descent from sklearn.ensemble to generate, fit the model and predict the output.
###Code
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss="hinge", penalty="l2", max_iter=5)
clf.fit(train, y_train)
SGDClassifier(max_iter=5)
y_pred_SGD=clf.predict(test)
###Output
C:\Users\Dell\Anaconda3\lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:557: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit.
ConvergenceWarning)
###Markdown
5) Using Stochastic Gradient Descent from sklearn.ensemble to generate, fit the model and predict the output.
###Code
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier(n_neighbors=2)
neigh.fit(train,y_train)
y_pred_KNN=neigh.predict(test)
print("Random Forest Accuracy")
accuracy(y_pred_randF,y_test)
print("\nNaive Bayes Accuracy")
accuracy(y_pred_naiveBayes,y_test)
print("\nSupport Vector Machine Accuracy")
accuracy(y_pred_SVM,y_test)
print("\nStochastic Gradient Decent Accuracy")
accuracy(y_pred_SGD,y_test)
print("\n KNN Accuracy")
accuracy(y_pred_KNN,y_test)
###Output
Random Forest Accuracy
accuracy score: 1.0
confusion matrix:
[[266 0]
[ 0 152]]
f1 score: 1.0
|
notebooks/03-oscars_gender_viz.ipynb | ###Markdown
The OscarsDalton Hahn (2762306) Shakespearean Play Datahttps://www.kaggle.com/kingburrito666/shakespeare-plays/download Data Visualization and Storytelling "What is the ratio/trend in 'airtime' that Shakespeare gives to men vs. women?"
###Code
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import datetime as dt
import seaborn as sns
import matplotlib.pyplot as plt
import math
from statistics import mean, stdev
df = pd.read_csv("../data/processed/genders.csv")
df.head()
###Output
_____no_output_____
###Markdown
Important Notes* Gender column (0 = Male, 1 = Female)
###Code
unique_plays = df["Play"].unique()
print(unique_plays)
# NOTE, in my gender_word_counts dictionary, the tuple of counts will be (MALE, FEMALE)
gender_word_counts = dict.fromkeys(unique_plays, (0,0))
for index,row in df.iterrows():
if row["Gender"] == 0:
gen_tuple = gender_word_counts.get(row["Play"])
new_male_val = gen_tuple[0] + len(row["PlayerLine"].split())
fin_tuple = (new_male_val, gen_tuple[1])
else:
gen_tuple = gender_word_counts.get(row["Play"])
new_female_val = gen_tuple[1] + len(row["PlayerLine"].split())
fin_tuple = (gen_tuple[0], new_female_val)
gender_word_counts[row["Play"]] = fin_tuple
print(gender_word_counts)
# INSPIRATION: https://python-graph-gallery.com/11-grouped-barplot/
# set width of bar
barWidth = 0.4
# set height of bar
male_bars = []
female_bars = []
for key,val in gender_word_counts.items():
male_bars.append(val[0])
female_bars.append(val[1])
print(male_bars)
print(female_bars)
# Set position of bar on X axis
r1 = np.arange(len(male_bars))
r2 = [x + barWidth for x in r1]
# Make the plot
plt.figure(figsize=(25,8))
plt.bar(r1, male_bars, color='#886bff', width=barWidth, edgecolor='white', label='Male Words')
plt.bar(r2, female_bars, color='#ffb0fe', width=barWidth, edgecolor='white', label='Female Words')
# Add xticks on the middle of the group bars
plt.xlabel('Play', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(male_bars))], list(gender_word_counts.keys()), rotation=90)
# Create legend & Show graphic
plt.legend()
plt.show()
###Output
_____no_output_____ |
notebook/L7-regularizedRegression.06-Linear-Regression.ipynb | ###Markdown
Reguralized (Linear) RegressionAdapted from: * https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.06-Linear-Regression.ipynbscrollTo=TNA3vumSulUH
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
rng = np.random.RandomState(1)
x = 10 * rng.rand(50)
y = np.sin(x) + 0.1 * rng.randn(50)
plt.scatter(x, y)
xfit = np.linspace(0, 10, 1000)
from sklearn.base import BaseEstimator, TransformerMixin
class GaussianFeatures(BaseEstimator, TransformerMixin):
"""Uniformly spaced Gaussian features for one-dimensional input"""
def __init__(self, N, width_factor=2.0):
self.N = N
self.width_factor = width_factor
@staticmethod
def _gauss_basis(x, y, width, axis=None):
arg = (x - y) / width
return np.exp(-0.5 * np.sum(arg ** 2, axis))
def fit(self, X, y=None):
# create N centers spread along the data range
self.centers_ = np.linspace(X.min(), X.max(), self.N)
self.width_ = self.width_factor * (self.centers_[1] - self.centers_[0])
return self
def transform(self, X):
return self._gauss_basis(X[:, :, np.newaxis], self.centers_,
self.width_, axis=1)
gauss_model = make_pipeline(GaussianFeatures(20),
LinearRegression())
gauss_model.fit(x[:, np.newaxis], y)
yfit = gauss_model.predict(xfit[:, np.newaxis])
plt.scatter(x, y)
plt.plot(xfit, yfit)
plt.xlim(0, 10);
model = make_pipeline(GaussianFeatures(40),
LinearRegression())
model.fit(x[:, np.newaxis], y)
plt.scatter(x, y)
plt.plot(xfit, model.predict(xfit[:, np.newaxis]))
plt.xlim(0, 10)
plt.ylim(-1.5, 1.5);
###Output
_____no_output_____
###Markdown
With the data projected to the 30-dimensional basis, the model has far too much flexibility and goes to extreme values between locations where it is constrained by data.We can see the reason for this if we plot the coefficients of the Gaussian bases with respect to their locations:
###Code
def basis_plot(model, title=None):
fig, ax = plt.subplots(2, sharex=True)
model.fit(x[:, np.newaxis], y)
ax[0].scatter(x, y)
ax[0].plot(xfit, model.predict(xfit[:, np.newaxis]))
ax[0].set(xlabel='x', ylabel='y', ylim=(-1.5, 1.5))
if title:
ax[0].set_title(title)
ax[1].plot(model.steps[0][1].centers_,
model.steps[1][1].coef_)
ax[1].set(xlabel='basis location',
ylabel='coefficient',
xlim=(0, 10))
model = make_pipeline(GaussianFeatures(40), LinearRegression())
basis_plot(model)
###Output
_____no_output_____
###Markdown
The lower panel of this figure shows the amplitude of the basis function at each location.This is typical over-fitting behavior when basis functions overlap: the coefficients of adjacent basis functions blow up and cancel each other out.We know that such behavior is problematic, and it would be nice if we could limit such spikes expliticly in the model by penalizing large values of the model parameters.Such a penalty is known as *regularization*, and comes in several forms. Ridge regression ($L_2$ Regularization)Perhaps the most common form of regularization is known as *ridge regression* or $L_2$ *regularization*, sometimes also called *Tikhonov regularization*.This proceeds by penalizing the sum of squares (2-norms) of the model coefficients; in this case, the penalty on the model fit would be $$P = \alpha\sum_{n=1}^N \theta_n^2$$where $\alpha$ is a free parameter that controls the strength of the penalty.This type of penalized model is built into Scikit-Learn with the ``Ridge`` estimator:
###Code
from sklearn.linear_model import Ridge
model = make_pipeline(GaussianFeatures(40), Ridge(alpha=0.1))
basis_plot(model, title='Ridge Regression')
###Output
_____no_output_____
###Markdown
The $\alpha$ parameter is essentially a knob controlling the complexity of the resulting model.In the limit $\alpha \to 0$, we recover the standard linear regression result; in the limit $\alpha \to \infty$, all model responses will be suppressed.One advantage of ridge regression in particular is that it can be computed very efficiently—at hardly more computational cost than the original linear regression model. Lasso regression ($L_1$ regularization)Another very common type of regularization is known as lasso, and involves penalizing the sum of absolute values (1-norms) of regression coefficients:$$P = \alpha\sum_{n=1}^N |\theta_n|$$Though this is conceptually very similar to ridge regression, the results can differ surprisingly: for example, due to geometric reasons lasso regression tends to favor *sparse models* where possible: that is, it preferentially sets model coefficients to exactly zero.We can see this behavior in duplicating the ridge regression figure, but using L1-normalized coefficients:
###Code
from sklearn.linear_model import Lasso
model = make_pipeline(GaussianFeatures(40), Lasso(alpha=0.001))
basis_plot(model, title='Lasso Regression')
###Output
_____no_output_____ |
notebooks/sparse-gemm/MCMM-simple.ipynb | ###Markdown
Initialize Sliders & default parameters
###Code
# Initial values
# Most cells below are modified from spMspM_pruned
M = 6 #defines M rank
N = 6 #defines N rank
K = 6 #defines K rank
density = [0.5, 1.0] #defines portion NNZ for [A, B]
interval = 5 #defines max value in A or B
seed = 10 #defines random seed
sample_rate = 0.4 # dictates sample threshold for portion of A
def set_params(rank_M, rank_N, rank_K, tensor_density, uniform_sample_rate, max_value, rand_seed):
global M
global N
global K
global density
global seed
global sample_rate
global interval
M = rank_M
N = rank_N
K = rank_K
density = tensor_density[::-1]
seed = rand_seed
sample_rate = uniform_sample_rate
interval = max_value
w = interactive(set_params,
rank_M=widgets.IntSlider(min=2, max=10, step=1, value=M),
rank_N=widgets.IntSlider(min=2, max=10, step=1, value=N),
rank_K=widgets.IntSlider(min=2, max=10, step=1, value=K),
tensor_density=widgets.FloatRangeSlider(min=0.0, max=1.0, step=0.05, value=[0.5, 1.0]),
uniform_sample_rate=widgets.FloatSlider(min=0, max=1.0, step=0.05, value=sample_rate),
max_value=widgets.IntSlider(min=0, max=20, step=1, value=interval),
rand_seed=widgets.IntSlider(min=0, max=100, step=1, value=seed))
display(w)
###Output
_____no_output_____
###Markdown
Input Tensors
###Code
a = Tensor.fromRandom(["M", "K"], [M, K], density, interval, seed=seed)
#a.setName("A")
a.setColor("blue")
displayTensor(a)
# Create swapped rank version of a
a_swapped = a.swapRanks()
#a_swapped.setName("A_swapped")
displayTensor(a_swapped)
b = Tensor.fromRandom(["N", "K"], [N, K], density, interval, seed=2*seed)
#b.setName("B")
b.setColor("green")
displayTensor(b)
# Create swapped rank version of b
b_swapped = b.swapRanks()
#b_swapped.setName("B_swapped")
displayTensor(b_swapped)
###Output
_____no_output_____
###Markdown
Reference Output
###Code
z_validate = Tensor(rank_ids=["M", "N"], shape=[M, N])
a_m = a.getRoot()
b_n = b.getRoot()
z_m = z_validate.getRoot()
for m, (z_n, a_k) in z_m << a_m:
for n, (z_ref, b_k) in z_n << b_n:
for k, (a_val, b_val) in a_k & b_k:
z_ref += a_val * b_val
displayTensor(z_validate)
def compareZ(z):
n = 0
total = 0
z1 = z_validate.getRoot()
z2 = z.getRoot()
for m, (ab_n, z1_n, z2_n) in z1 | z2:
for n, (ab_val, z1_val, z2_val) in z1_n | z2_n:
# Unpack the values to use abs (arggh)
z1_val = Payload.get(z1_val)
z2_val = Payload.get(z2_val)
n += 1
total += abs(z1_val-z2_val)
return total/n
###Output
_____no_output_____
###Markdown
Prune Functions and helper functions
###Code
# MCMM uniform random sampling
class UniformRandomPrune():
def __init__(self, sample_rate=0.5):
self.sample_rate = sample_rate
def __call__(self, n, c, p):
sample = random.uniform(0,1)
result = (sample < self.sample_rate)
print(f"Preserve = {result}")
return result
# MCMM sample against number of elements
class RandomSizePrune():
def __init__(self, max_size=4):
self.max_size = max_size
def __call__(self, n, c, p):
size = p.countValues()
sample = random.uniform(0, 1)
result = (sample < (size / self.max_size))
print(f"Preserve = {result}")
return result
# a cute recursive helper for getting total absolute magnitude of Fiber of arbitrary rank
# this is modeled after countValues(), but I haven't tested it super thoroughly
# is this a helpful thing to add as a Fiber method? useful for computing matrix norms and stuff
def get_magnitude(p):
mag = 0
for el in p.payloads:
if Payload.contains(el, Fiber):
mag += get_magnitude(el)
else:
mag += np.absolute(el.v()) if not Payload.isEmpty(el) else 0
return mag
# MCMM weight sample by sum of elements
class RandomMagnitudePrune():
def __init__(self, max_mag=6):
#as-written, max_mag and mag should be ints, not payloads
self.max_mag = max_mag
def __call__(self, n, c, p):
magnitude = get_magnitude(p)
sample = random.uniform(0, 1)
result = (sample < (magnitude / self.max_mag))
print(f"Preserve = {result}")
return result
###Output
_____no_output_____
###Markdown
Outer Product w/ MCMM; uniform sampling A
###Code
z = Tensor(rank_ids=["M", "N"], shape=[M, N])
sample_tensor = UniformRandomPrune(sample_rate)
a_k = a_swapped.getRoot()
b_k = b_swapped.getRoot()
z_m = z.getRoot()
canvas = createCanvas(a_swapped, b_swapped, z)
for k, (a_m, b_n) in a_k.prune(sample_tensor) & b_k:
for m, (z_n, a_val) in z_m << a_m:
for n, (z_ref, b_val) in z_n << b_n:
z_ref += a_val * b_val
canvas.addFrame((k, m), (k, n), (m, n))
print(f"Error = {compareZ(z)}")
displayTensor(z)
displayCanvas(canvas)
###Output
_____no_output_____
###Markdown
Outer product, sample with threshold by num elements
###Code
z = Tensor(rank_ids=["M", "N"], shape=[M, N])
a_k = a_swapped.getRoot()
b_k = b_swapped.getRoot()
z_m = z.getRoot()
#traverse to get max elements per a_k
max_size = 0
for k, a_m in a_k:
size = a_m.countValues()
if size > max_size:
max_size = size
print(max_size)
sample_tensor = RandomSizePrune(max_size)
canvas = createCanvas(a_swapped, b_swapped, z)
for k, (a_m, b_n) in a_k.prune(sample_tensor) & b_k:
for m, (z_n, a_val) in z_m << a_m:
for n, (z_ref, b_val) in z_n << b_n:
z_ref += a_val * b_val
canvas.addFrame((k, m), (k, n), (m, n))
print(f"Error = {compareZ(z)}")
displayTensor(z)
displayCanvas(canvas)
###Output
_____no_output_____
###Markdown
Outer Product, sample with threshold by magnitude
###Code
z = Tensor(rank_ids=["M", "N"], shape=[M, N])
a_k = a_swapped.getRoot()
b_k = b_swapped.getRoot()
z_m = z.getRoot()
#traverse to get max magnitude per a_k
max_mag = 0
for k, a_m in a_k:
mag = 0
for m, a_val in a_m:
mag += a_val
if mag > max_mag:
max_mag = mag
sample_tensor = RandomMagnitudePrune(max_mag.v()) #convert payload to value
canvas = createCanvas(a_swapped, b_swapped, z)
for k, (a_m, b_n) in a_k.prune(sample_tensor) & b_k:
for m, (z_n, a_val) in z_m << a_m:
for n, (z_ref, b_val) in z_n << b_n:
z_ref += a_val * b_val
canvas.addFrame((k, m), (k, n), (m, n))
print(f"Error = {compareZ(z)}")
displayTensor(z)
displayCanvas(canvas)
###Output
_____no_output_____ |
kaggle_location/location_knn.ipynb | ###Markdown
Data
###Code
train = pd.read_csv("location_train.csv")
test = pd.read_csv("location_test.csv")
train.info()
train.head()
test.info()
test.head()
X_train = train.drop(["ID", "class"], axis=1)
y_train = train["class"]
X_test = test.drop(["ID"], axis=1)
###Output
_____no_output_____
###Markdown
Pre-processing
###Code
# not necessary
###Output
_____no_output_____
###Markdown
Exploratory data analysis
###Code
X_train.isnull().sum()
###Output
_____no_output_____
###Markdown
Model selection
###Code
# Grid Search
cv = 10 # number of folds
verbose = 1 # information shown during training
###Output
_____no_output_____
###Markdown
KNN
###Code
parameters = {
"n_neighbors":[1, 5, 10, 20],
"weights":["uniform", "distance"],
"metric":["euclidean", "manhattan", "chebyshev", "minkowski", "wminkowski", "seuclidean", "mahalanobis"]}
model = GridSearchCV(neighbors.KNeighborsClassifier(), parameters, cv=cv, verbose=verbose, scoring="f1_weighted")
model.fit(X_train, y_train)
results = pd.DataFrame(model.cv_results_)
results= results[["param_n_neighbors", "param_weights", "param_metric", "mean_test_score"]]
results.sort_values(["mean_test_score"], ascending=False).head(10)
###Output
Fitting 10 folds for each of 56 candidates, totalling 560 fits
###Markdown
Final model
###Code
best_model = model.best_estimator_
best_model.fit(X_train, y_train)
predictions = pd.DataFrame(test["ID"])
predictions["class"] = best_model.predict(X_test)
predictions.to_csv("submission.csv", index=False)
###Output
_____no_output_____ |
0.12/_downloads/plot_ica_from_raw.ipynb | ###Markdown
.. _tut_preprocessing_ica:Compute ICA on MEG data and remove artifacts============================================ICA is fit to MEG raw data.The sources matching the ECG and EOG are automatically found and displayed.Subsequently, artifact detection and rejection quality are assessed.
###Code
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.preprocessing import ICA
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
from mne.datasets import sample
###Output
_____no_output_____
###Markdown
Setup paths and prepare raw data
###Code
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 45, n_jobs=1)
###Output
_____no_output_____
###Markdown
1) Fit ICA model using the FastICA algorithm
###Code
# Other available choices are `infomax` or `extended-infomax`
# We pass a float value between 0 and 1 to select n_components based on the
# percentage of variance explained by the PCA components.
ica = ICA(n_components=0.95, method='fastica')
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
ica.fit(raw, picks=picks, decim=3, reject=dict(mag=4e-12, grad=4000e-13))
# maximum number of components to reject
n_max_ecg, n_max_eog = 3, 1 # here we don't expect horizontal EOG components
###Output
_____no_output_____
###Markdown
2) identify bad components by analyzing latent sources.
###Code
title = 'Sources related to %s artifacts (red)'
# generate ECG epochs use detection via phase statistics
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5, picks=picks)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
ica.plot_scores(scores, exclude=ecg_inds, title=title % 'ecg', labels='ecg')
show_picks = np.abs(scores).argsort()[::-1][:5]
ica.plot_sources(raw, show_picks, exclude=ecg_inds, title=title % 'ecg')
ica.plot_components(ecg_inds, title=title % 'ecg', colorbar=True)
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude += ecg_inds
# detect EOG by correlation
eog_inds, scores = ica.find_bads_eog(raw)
ica.plot_scores(scores, exclude=eog_inds, title=title % 'eog', labels='eog')
show_picks = np.abs(scores).argsort()[::-1][:5]
ica.plot_sources(raw, show_picks, exclude=eog_inds, title=title % 'eog')
ica.plot_components(eog_inds, title=title % 'eog', colorbar=True)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
###Output
_____no_output_____
###Markdown
3) Assess component selection and unmixing quality
###Code
# estimate average artifact
ecg_evoked = ecg_epochs.average()
ica.plot_sources(ecg_evoked, exclude=ecg_inds) # plot ECG sources + selection
ica.plot_overlay(ecg_evoked, exclude=ecg_inds) # plot ECG cleaning
eog_evoked = create_eog_epochs(raw, tmin=-.5, tmax=.5, picks=picks).average()
ica.plot_sources(eog_evoked, exclude=eog_inds) # plot EOG sources + selection
ica.plot_overlay(eog_evoked, exclude=eog_inds) # plot EOG cleaning
# check the amplitudes do not change
ica.plot_overlay(raw) # EOG artifacts remain
# To save an ICA solution you can say:
# ica.save('my_ica.fif')
# You can later load the solution by saying:
# from mne.preprocessing import read_ica
# read_ica('my_ica.fif')
# Apply the solution to Raw, Epochs or Evoked like this:
# ica.apply(epochs)
###Output
_____no_output_____ |
Model backlog/VGG19/85 - VGG19 - Regression.ipynb | ###Markdown
Dependencies
###Code
import os
import cv2
import shutil
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from keras import backend as K
from keras.models import Model
from keras.utils import to_categorical
from keras import optimizers, applications
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler
from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input
# Set seeds to make the experiment more reproducible.
from tensorflow import set_random_seed
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(0)
seed = 0
seed_everything(seed)
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
###Output
Using TensorFlow backend.
###Markdown
Load data
###Code
hold_out_set = pd.read_csv('../input/aptos-data-split/hold-out.csv')
X_train = hold_out_set[hold_out_set['set'] == 'train']
X_val = hold_out_set[hold_out_set['set'] == 'validation']
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
print('Number of train samples: ', X_train.shape[0])
print('Number of validation samples: ', X_val.shape[0])
print('Number of test samples: ', test.shape[0])
# Preprocecss data
X_train["id_code"] = X_train["id_code"].apply(lambda x: x + ".png")
X_val["id_code"] = X_val["id_code"].apply(lambda x: x + ".png")
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
X_train['diagnosis'] = X_train['diagnosis']
X_val['diagnosis'] = X_val['diagnosis']
display(X_train.head())
###Output
Number of train samples: 2929
Number of validation samples: 733
Number of test samples: 1928
###Markdown
Model parameters
###Code
# Model parameters
BATCH_SIZE = 32
EPOCHS = 40
WARMUP_EPOCHS = 5
LEARNING_RATE = 1e-4
WARMUP_LEARNING_RATE = 1e-3
HEIGHT = 320
WIDTH = 320
CHANNELS = 3
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5
###Output
_____no_output_____
###Markdown
Pre-procecess images
###Code
train_base_path = '../input/aptos2019-blindness-detection/train_images/'
test_base_path = '../input/aptos2019-blindness-detection/test_images/'
train_dest_path = 'base_dir/train_images/'
validation_dest_path = 'base_dir/validation_images/'
test_dest_path = 'base_dir/test_images/'
# Making sure directories don't exist
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
# Creating train, validation and test directories
os.makedirs(train_dest_path)
os.makedirs(validation_dest_path)
os.makedirs(test_dest_path)
def crop_image(img, tol=7):
if img.ndim ==2:
mask = img>tol
return img[np.ix_(mask.any(1),mask.any(0))]
elif img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img>tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def circle_crop(img):
img = crop_image(img)
height, width, depth = img.shape
largest_side = np.max((height, width))
img = cv2.resize(img, (largest_side, largest_side))
height, width, depth = img.shape
x = width//2
y = height//2
r = np.amin((x, y))
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1)
img = cv2.bitwise_and(img, img, mask=circle_img)
img = crop_image(img)
return img
def preprocess_image(base_path, save_path, image_id, HEIGHT, WIDTH, sigmaX=10):
image = cv2.imread(base_path + image_id)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = circle_crop(image)
image = cv2.resize(image, (HEIGHT, WIDTH))
image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128)
cv2.imwrite(save_path + image_id, image)
# Pre-procecss train set
for i, image_id in enumerate(X_train['id_code']):
preprocess_image(train_base_path, train_dest_path, image_id, HEIGHT, WIDTH)
# Pre-procecss validation set
for i, image_id in enumerate(X_val['id_code']):
preprocess_image(train_base_path, validation_dest_path, image_id, HEIGHT, WIDTH)
# Pre-procecss test set
for i, image_id in enumerate(test['id_code']):
preprocess_image(test_base_path, test_dest_path, image_id, HEIGHT, WIDTH)
###Output
_____no_output_____
###Markdown
Data generator
###Code
datagen=ImageDataGenerator(rescale=1./255,
rotation_range=360,
horizontal_flip=True,
vertical_flip=True)
train_generator=datagen.flow_from_dataframe(
dataframe=X_train,
directory=train_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
valid_generator=datagen.flow_from_dataframe(
dataframe=X_val,
directory=validation_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
test_generator=datagen.flow_from_dataframe(
dataframe=test,
directory=test_dest_path,
x_col="id_code",
batch_size=1,
class_mode=None,
shuffle=False,
target_size=(HEIGHT, WIDTH),
seed=seed)
###Output
Found 2929 validated image filenames.
Found 733 validated image filenames.
Found 1928 validated image filenames.
###Markdown
Model
###Code
def create_model(input_shape):
input_tensor = Input(shape=input_shape)
base_model = applications.VGG19(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('../input/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
x = Dense(2048, activation='relu')(x)
x = Dropout(0.5)(x)
final_output = Dense(1, activation='linear', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
###Output
_____no_output_____
###Markdown
Train top layers
###Code
model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS))
for layer in model.layers:
layer.trainable = False
for i in range(-5, 0):
model.layers[i].trainable = True
metric_list = ["accuracy"]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
verbose=1).history
###Output
Epoch 1/5
91/91 [==============================] - 88s 966ms/step - loss: 1.5448 - acc: 0.3207 - val_loss: 0.9763 - val_acc: 0.3310
Epoch 2/5
91/91 [==============================] - 79s 869ms/step - loss: 1.1898 - acc: 0.3667 - val_loss: 1.1785 - val_acc: 0.2482
Epoch 3/5
91/91 [==============================] - 81s 895ms/step - loss: 1.1431 - acc: 0.3694 - val_loss: 0.9626 - val_acc: 0.3138
Epoch 4/5
91/91 [==============================] - 83s 914ms/step - loss: 1.0991 - acc: 0.3940 - val_loss: 0.9180 - val_acc: 0.3638
Epoch 5/5
91/91 [==============================] - 82s 900ms/step - loss: 1.0675 - acc: 0.3859 - val_loss: 0.9352 - val_acc: 0.4123
###Markdown
Fine-tune the complete model (1st step)
###Code
for layer in model.layers:
layer.trainable = True
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
rlrop = ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=DECAY_DROP, min_lr=1e-6, verbose=1)
callback_list = [es, rlrop]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
history_finetunning = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=int(EPOCHS*0.8),
callbacks=callback_list,
verbose=1).history
###Output
Epoch 1/32
91/91 [==============================] - 101s 1s/step - loss: 2.0027 - acc: 0.1915 - val_loss: 2.0304 - val_acc: 0.0970
Epoch 2/32
91/91 [==============================] - 89s 974ms/step - loss: 1.7543 - acc: 0.1201 - val_loss: 2.0605 - val_acc: 0.0984
Epoch 3/32
91/91 [==============================] - 91s 998ms/step - loss: 1.5116 - acc: 0.2542 - val_loss: 1.3516 - val_acc: 0.3138
Epoch 4/32
91/91 [==============================] - 91s 1s/step - loss: 1.2124 - acc: 0.4462 - val_loss: 1.2708 - val_acc: 0.4579
Epoch 5/32
91/91 [==============================] - 92s 1s/step - loss: 0.9931 - acc: 0.4783 - val_loss: 1.1380 - val_acc: 0.5264
Epoch 6/32
91/91 [==============================] - 95s 1s/step - loss: 0.8494 - acc: 0.5058 - val_loss: 0.7498 - val_acc: 0.5621
Epoch 7/32
91/91 [==============================] - 94s 1s/step - loss: 0.6728 - acc: 0.5790 - val_loss: 1.0496 - val_acc: 0.5749
Epoch 8/32
91/91 [==============================] - 92s 1s/step - loss: 0.5663 - acc: 0.6431 - val_loss: 0.8774 - val_acc: 0.6248
Epoch 9/32
91/91 [==============================] - 93s 1s/step - loss: 0.4944 - acc: 0.6685 - val_loss: 0.7060 - val_acc: 0.6748
Epoch 10/32
91/91 [==============================] - 91s 1s/step - loss: 0.4459 - acc: 0.6871 - val_loss: 0.6290 - val_acc: 0.6904
Epoch 11/32
91/91 [==============================] - 90s 994ms/step - loss: 0.4450 - acc: 0.6936 - val_loss: 0.8826 - val_acc: 0.6220
Epoch 12/32
91/91 [==============================] - 90s 994ms/step - loss: 0.4000 - acc: 0.7068 - val_loss: 0.6778 - val_acc: 0.7004
Epoch 13/32
91/91 [==============================] - 92s 1s/step - loss: 0.4227 - acc: 0.7021 - val_loss: 0.7095 - val_acc: 0.6748
Epoch 00013: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-05.
Epoch 14/32
91/91 [==============================] - 92s 1s/step - loss: 0.3714 - acc: 0.7216 - val_loss: 0.6205 - val_acc: 0.6847
Epoch 15/32
91/91 [==============================] - 91s 998ms/step - loss: 0.3753 - acc: 0.7210 - val_loss: 0.7742 - val_acc: 0.6576
Epoch 16/32
91/91 [==============================] - 92s 1s/step - loss: 0.3704 - acc: 0.7197 - val_loss: 0.8105 - val_acc: 0.6106
Epoch 17/32
91/91 [==============================] - 91s 999ms/step - loss: 0.3584 - acc: 0.7341 - val_loss: 0.7237 - val_acc: 0.6391
Epoch 00017: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-05.
Epoch 18/32
91/91 [==============================] - 92s 1s/step - loss: 0.3538 - acc: 0.7204 - val_loss: 0.6614 - val_acc: 0.6633
Epoch 19/32
91/91 [==============================] - 94s 1s/step - loss: 0.3492 - acc: 0.7334 - val_loss: 0.7316 - val_acc: 0.6462
Restoring model weights from the end of the best epoch
Epoch 00019: early stopping
###Markdown
Fine-tune the complete model (2nd step)
###Code
optimizer = optimizers.SGD(lr=LEARNING_RATE, momentum=0.9, nesterov=True)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
history_finetunning_2 = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=int(EPOCHS*0.2),
callbacks=callback_list,
verbose=1).history
###Output
Epoch 1/8
91/91 [==============================] - 98s 1s/step - loss: 0.3742 - acc: 0.7304 - val_loss: 0.7979 - val_acc: 0.6277
Epoch 2/8
91/91 [==============================] - 89s 973ms/step - loss: 0.3628 - acc: 0.7368 - val_loss: 0.7061 - val_acc: 0.6605
Epoch 3/8
91/91 [==============================] - 88s 972ms/step - loss: 0.3596 - acc: 0.7260 - val_loss: 0.7846 - val_acc: 0.6519
Epoch 4/8
91/91 [==============================] - 92s 1s/step - loss: 0.3469 - acc: 0.7375 - val_loss: 0.7027 - val_acc: 0.6633
Epoch 5/8
91/91 [==============================] - 89s 982ms/step - loss: 0.3655 - acc: 0.7267 - val_loss: 0.8436 - val_acc: 0.6348
Epoch 6/8
91/91 [==============================] - 88s 971ms/step - loss: 0.3523 - acc: 0.7264 - val_loss: 0.7461 - val_acc: 0.6377
Epoch 7/8
91/91 [==============================] - 90s 985ms/step - loss: 0.3572 - acc: 0.7318 - val_loss: 0.7068 - val_acc: 0.6619
Epoch 00007: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-05.
Epoch 8/8
91/91 [==============================] - 88s 972ms/step - loss: 0.3488 - acc: 0.7251 - val_loss: 0.7359 - val_acc: 0.6391
###Markdown
Model loss graph
###Code
history = {'loss': history_finetunning['loss'] + history_finetunning_2['loss'],
'val_loss': history_finetunning['val_loss'] + history_finetunning_2['val_loss'],
'acc': history_finetunning['acc'] + history_finetunning_2['acc'],
'val_acc': history_finetunning['val_acc'] + history_finetunning_2['val_acc']}
sns.set_style("whitegrid")
fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 14))
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
plt.xlabel('Epochs')
sns.despine()
plt.show()
# Create empty arays to keep the predictions and labels
df_preds = pd.DataFrame(columns=['label', 'pred', 'set'])
train_generator.reset()
valid_generator.reset()
# Add train predictions and labels
for i in range(STEP_SIZE_TRAIN + 1):
im, lbl = next(train_generator)
preds = model.predict(im, batch_size=train_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'train']
# Add validation predictions and labels
for i in range(STEP_SIZE_VALID + 1):
im, lbl = next(valid_generator)
preds = model.predict(im, batch_size=valid_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'validation']
df_preds['label'] = df_preds['label'].astype('int')
###Output
_____no_output_____
###Markdown
Threshold optimization
###Code
def classify(x):
if x < 0.5:
return 0
elif x < 1.5:
return 1
elif x < 2.5:
return 2
elif x < 3.5:
return 3
return 4
def classify_opt(x):
if x <= (0 + best_thr_0):
return 0
elif x <= (1 + best_thr_1):
return 1
elif x <= (2 + best_thr_2):
return 2
elif x <= (3 + best_thr_3):
return 3
return 4
def find_best_threshold(df, label, label_col='label', pred_col='pred', do_plot=True):
score = []
thrs = np.arange(0, 1, 0.01)
for thr in thrs:
preds_thr = [label if ((pred >= label and pred < label+1) and (pred < (label+thr))) else classify(pred) for pred in df[pred_col]]
score.append(cohen_kappa_score(df[label_col].astype('int'), preds_thr))
score = np.array(score)
pm = score.argmax()
best_thr, best_score = thrs[pm], score[pm].item()
print('Label %s: thr=%.2f, Kappa=%.3f' % (label, best_thr, best_score))
plt.rcParams["figure.figsize"] = (20, 5)
plt.plot(thrs, score)
plt.vlines(x=best_thr, ymin=score.min(), ymax=score.max())
plt.show()
return best_thr
# Best threshold for label 3
best_thr_3 = find_best_threshold(df_preds, 3)
# Best threshold for label 2
best_thr_2 = find_best_threshold(df_preds, 2)
# Best threshold for label 1
best_thr_1 = find_best_threshold(df_preds, 1)
# Best threshold for label 0
best_thr_0 = find_best_threshold(df_preds, 0)
# Classify predictions
df_preds['predictions'] = df_preds['pred'].apply(lambda x: classify(x))
# Apply optimized thresholds to the predictions
df_preds['predictions_opt'] = df_preds['pred'].apply(lambda x: classify_opt(x))
train_preds = df_preds[df_preds['set'] == 'train']
validation_preds = df_preds[df_preds['set'] == 'validation']
###Output
_____no_output_____
###Markdown
Model Evaluation Confusion Matrix Original thresholds
###Code
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
def plot_confusion_matrix(train, validation, labels=labels):
train_labels, train_preds = train
validation_labels, validation_preds = validation
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')
plt.show()
plot_confusion_matrix((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))
###Output
_____no_output_____
###Markdown
Optimized thresholds
###Code
plot_confusion_matrix((train_preds['label'], train_preds['predictions_opt']), (validation_preds['label'], validation_preds['predictions_opt']))
###Output
_____no_output_____
###Markdown
Quadratic Weighted Kappa
###Code
def evaluate_model(train, validation):
train_labels, train_preds = train
validation_labels, validation_preds = validation
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(np.append(train_preds, validation_preds), np.append(train_labels, validation_labels), weights='quadratic'))
print(" Original thresholds")
evaluate_model((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))
print(" Optimized thresholds")
evaluate_model((train_preds['label'], train_preds['predictions_opt']), (validation_preds['label'], validation_preds['predictions_opt']))
###Output
Original thresholds
Train Cohen Kappa score: 0.736
Validation Cohen Kappa score: 0.693
Complete set Cohen Kappa score: 0.727
Optimized thresholds
Train Cohen Kappa score: 0.753
Validation Cohen Kappa score: 0.725
Complete set Cohen Kappa score: 0.747
###Markdown
Apply model to test set and output predictions
###Code
def apply_tta(model, generator, steps=5):
step_size = generator.n//generator.batch_size
preds_tta = []
for i in range(steps):
generator.reset()
preds = model.predict_generator(generator, steps=step_size)
preds_tta.append(preds)
return np.mean(preds_tta, axis=0)
preds = apply_tta(model, test_generator)
predictions = [classify(x) for x in preds]
predictions_opt = [classify_opt(x) for x in preds]
results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
results_opt = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions_opt})
results_opt['id_code'] = results_opt['id_code'].map(lambda x: str(x)[:-4])
# Cleaning created directories
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
###Output
_____no_output_____
###Markdown
Predictions class distribution
###Code
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d", ax=ax1).set_title('Test')
sns.countplot(x="diagnosis", data=results_opt, palette="GnBu_d", ax=ax2).set_title('Test optimized')
sns.despine()
plt.show()
val_kappa = cohen_kappa_score(validation_preds['label'], validation_preds['predictions'], weights='quadratic')
val_opt_kappa = cohen_kappa_score(validation_preds['label'], validation_preds['predictions_opt'], weights='quadratic')
results_name = 'submission.csv'
results_opt_name = 'submission_opt.csv'
# if val_kappa > val_opt_kappa:
# results_name = 'submission.csv'
# results_opt_name = 'submission_opt.csv'
# else:
# results_name = 'submission_norm.csv'
# results_opt_name = 'submission.csv'
results.to_csv(results_name, index=False)
display(results.head())
results_opt.to_csv(results_opt_name, index=False)
display(results_opt.head())
###Output
_____no_output_____ |
_build/jupyter_execute/curriculum-notebooks/Science/LightTransmission/transmission-of-light.ipynb | ###Markdown

###Code
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
# Modules
import string
import numpy as np
import pandas as pd
import qgrid as q
import matplotlib.pyplot as plt
# Widgets & Display modules, etc..
from ipywidgets import widgets as w
from ipywidgets import Button, Layout, widgets
from IPython.display import display, Javascript, Markdown
# grid features for interactive grids
grid_features = { 'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': True,
'rowHeight': 40,
'enableColumnReorder': True,
'enableTextSelectionOnCells': True,
'editable': True,
'filterable': False,
'sortable': False,
'highlightSelectedRow': True}
from ipywidgets import Button , Layout , interact
from IPython.display import Javascript, display
# Function: executes previous cell on button widget click event and hides achievement indicators message
def run_current(ev):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+0,IPython.notebook.get_selected_index()+1)'))
# Counter for toggling achievement indicator on/off
button_ctr = 0
### Goal # 1
# Achievement Indicators
line_1 = "#### Achievement Indicators"
line_2 = "**General Outcome: **"
line_3 = "Learning about light and its interaction with different surface"
line_4 = "*Specific Outcome 1 *"
line_5 = "Investigate how light is reflected, transmitted and absorbed by different materials; and describe differences in the optical properties of various materials (e.g., compare light absorption of different materials; identify materials that transmit light; distinguish between clear and translucent materials; identify materials that will reflect a beam of light as a coherent beam"
line_6 = "*Specific Outcome 2*"
line_7 = "Investigate the transmission of light, and describe its behaviour using a geometric ray model."
line_8 = "*Specific Outcome 3*"
line_9 = "Investigate, measure and describe the refraction of light through different materials (e.g., measure differences in light refraction through pure water, salt water and different oils)"
line_10 = "*Specific Outcome 4*"
line_11 = "Investigate materials used in optical technologies; and predict the effects of changes in their design, alignment or composition"
# Use to print lines, then save in lines_list
def print_lines(n):
lines_str = ""
for i in range(1,n+1):
lines_str = lines_str + "line_"+str(i)+","
lines_str = lines_str[:-1]
print(lines_str)
lines_list = [line_1,line_2,line_3,line_4,line_5,line_6,line_7,line_8,line_9,line_10,line_11]
ai_button_show = widgets.Button(button_style='info',description="Show Achievement Indicators", layout=Layout(width='25%', height='30px') )
ai_button_hide = widgets.Button(button_style='info',description="Hide Achievement Indicators", layout=Layout(width='25%', height='30px') )
display(Markdown("For instructors:"))
button_ctr += 1
if(button_ctr % 2 == 0):
for line in lines_list:
display(Markdown(line))
display(ai_button_hide)
ai_button_hide.on_click( run_current )
else:
display(ai_button_show)
ai_button_show.on_click( run_current )
from ipywidgets import widgets, Button
from IPython.display import display, Javascript, Markdown
# How to use this function:
# Modify start and end cells to control which cells to execute given an event (i.e. button clicked)
def run_cells( event ):
# Start and end cells to execute from current cell this function is called
start = 1
end = 2
# Javascript input
ipy_js_string = 'IPython.notebook.execute_cell_range' + '(IPython.notebook.get_selected_index()+' + str(start) + ',IPython.notebook.get_selected_index()+' + str(end) + ')'
display(Javascript(ipy_js_string))
###Output
_____no_output_____
###Markdown
Transmission of Light Grade 8 General Science IntroductionIn this notebook we will learn about light and visible light. We will learn about the physical properties of visible light and how it interacts with different surfaces. We will learn about opaque, transparent and translucent materials and the subtle differences between them. We will also cover the difference between coherent and incoherent light and objects that reflect a beam of light as coherent light. Through the use of a geometric ray model, we will interact with an implementation of the Law of Reflection as well as the Law of Refraction. We will learn the definitions of incidence ray, reflected ray, angle of incidence, angle of reflection, refracted ray, angle of refraction as well as index of refraction. We will explore what happens when we point a light ray through different materials, such as water, oil, salt, diamond and sugar solution, and observe the differences in angles of reflection and refraction that are produced. We will learn about an interesting phenomenon called "total internal reflection" as well as the notion of critical angle and the conditions that create such phenomenon between two materials. We begin with a few definitions and interactives to aid our understanding of what light and visible light are. Light and Visible Light**Light** is a form of energy that can be detected by the human eye. Light is composed of **photons**, and is propagated in the form of **waves** . By wave, we mean something like this:
###Code
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure(figsize=(12,1))
ax = plt.axes(xlim=(0, 2), ylim=(-2, 2))
line, = ax.plot([], [], lw=2)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axis("Off")
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
return line,
# animation function. This is called sequentially
def animate(i):
x = np.linspace(0, 2, 1000)
y = np.sin(2 * np.pi * (x - 0.01 * i))
line.set_data(x, y)
return line,
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=200, interval=20, blit=True)
# equivalent to rcParams['animation.html'] = 'html5'
%matplotlib inline
HTML(anim.to_html5_video())
###Output
_____no_output_____
###Markdown
**Wavelength** is the distance between successive crests of a wave. Light is measured by its wavelength (in nanometres, nm) or frequency (in Hertz). The photons at each wavelength have different energies: the shorter the wavelength (nm) the higher the energy. **Visible light** consists of wavelengths within a spectrum that the human eye can perceive. Certain wavelengths within the spectrum correspond to a specific colour based upon how humans typically perceive light of that wavelength.Interact with the following widget to see the relationship between wavelength and visible light.
###Code
import ipywidgets
from ipywidgets import widgets, interact
style = {'description_width': 'initial'}
%matplotlib inline
def wv_color(wv):
fig = plt.figure(figsize=(20,8))
ax = plt.axes(xlim=(0, 2), ylim=(-2, 2))
ax.set_title("Visible Spectrum",fontsize=45)
ax.set_xticklabels([])
ax.set_yticklabels([])
#ax.axis("Off")
if 700<wv:
ax.set_facecolor("black")
ax.set_title("Infra Red",fontsize=45)
elif 670<=wv and wv<=700 :
ax.set_facecolor("#610000")
elif 640<=wv and wv<670:
ax.set_facecolor("#FF0000")
elif 610<=wv and wv<640:
ax.set_facecolor("#FF6900")
elif 580<=wv and wv<610:
ax.set_facecolor("#FFC000")
elif 550<=wv and wv<580:
ax.set_facecolor("#5BFF00")
elif 520<=wv and wv<550:
ax.set_facecolor("#00FFAB")
elif 480<=wv and wv<520:
ax.set_facecolor("#0082FF")
elif 450<=wv and wv<480:
ax.set_facecolor("#0008FF")
elif 420<=wv and wv<450:
ax.set_facecolor("#8F00FF")
elif 400<=wv and wv<420:
ax.set_facecolor("#610061")
elif wv<400:
ax.set_facecolor("black")
ax.set_title("Ultra Violet",fontsize=45)
plt.show()
interact(wv_color,wv=widgets.IntSlider(
value=300,
min=300,
max=800,
step=30,
description='Wavelength (nm)',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
));
###Output
_____no_output_____
###Markdown
Properties of Visible Light Below we illustrate intuitively the properties of visible light: **Rectilinear Propagation**: Light travels in a straight line. Light can also travel through vacuum, i.e. it does not require a medium to propagate. This property allows the light from the Sun to travel through space and reach the Earth. | ||-------------------|--------------|| |When light meets a surface, such as a mirror, it bounces off. This is known as **reflection**. Light can also pass through certain materials and change direction in doing so. This is known as **refraction**.| Reflection|Refraction||-------------------|--------------||||**Travels through mediums to different degrees**: Depending on the material, light will pass through a material either partially, completely or not at all. Such materials are known, respectively, as translucent, transparent and opaque. We will learn more about these materials in the next section. ------Reflection, Transmission and Absorption of LightWhen light strikes an object, a number of things could happen. For instance, light could be absorbed, in which case it is converted into heat. Light could also be reflected by the object. It could also be that is is transmitted through object itself. Different objects have distinct tendencies to absorb, reflect or transmit light of varying wavelengths. That is, one object might reflect the frequency that the human eye sees as green light while absorbing all other frequencies of visible light. A different object might transmit frequencies that the human eye sees as blue light, while absorbing all other frequencies of visible light.From this we can see that the manner in which visible light interacts with an object depends on:1.The wavelength of the light,2.The nature of the atoms of the object.Visible spectrum. Retrieved from http://www.funscience.in/study-zone/Physics/RefractionOfLight/ColoursOfObjects.php on June 26, 2018. Differences in Optical Properties of Various MaterialsWe learned that, depending on the nature of atoms of the object and the wavelength of light, light can either be transmitted, reflected or absorbed. It is then natural to wonder what types of materials will yield each scenario. Light absorption of different materialsLight absorption is the process through which light is absorbed and converted into energy (mostly heat). Recall light absorption depends on the wavelength of light and the object's atoms and how they behave. If light's wavelength and the object's nature of atoms are complementary, light is absorbed. Otherwise it is either reflected or it goes through the object. Examples of materials that absorb light include living organisms such as plants, animals and people. Materials that transmit lightMaterials that do not absorb light, such as transparent materials, have the property of allowing light to pass through. Some materials are partially transparent, absorbing part of the light and transmitting the rest. This property is what makes such materials to look tinted, since it only allows certain colours of light to pass through. Transparent vs translucent vs opaque materialsTo summarize the information above, materials that absorb light, that is they **do not** allow light to pass through them are *opaque*.Materials that allow light to pass through them are either *transparent* or *translucent*. It is important to notice the distinction between the two. While transparent materials allow light to pass through them **completely**, translucent materials allow light to pass through them **partially**. A phenomenon known as "refraction" occurs when light passes through an object. We will learn more about this in the experiment section of this notebook. Press the button below to demonstrate the differences between transparent, translucent and opaque objects.
###Code
import matplotlib.pyplot as plt
from matplotlib import patches
import ipywidgets
from ipywidgets import interact_manual,interact,widgets
%matplotlib inline
style = {'description_width': 'initial'}
def switch(switch_value):
fg_color = 'white'
fig = plt.figure(facecolor=fg_color, edgecolor=fg_color)
plt.subplots_adjust(left=5, bottom=None, right=11, top=3,
wspace=0.1, hspace=0.1)
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1,3,3)
switch=True
if switch_value==True:
bg_color = "#FFD340"
ax1.patch.set_facecolor(bg_color)
ax2.patch.set_facecolor(bg_color)
ax3.patch.set_facecolor(bg_color)
rect_1= patches.Rectangle((0.25,0.25),0.5,0.5,0,edgecolor="black",\
facecolor="#FFD340")
rect_2= patches.Rectangle((0.25,0.25),0.5,0.5,0,edgecolor="black",\
facecolor="#D7B74D")
rect_3= patches.Rectangle((0.25,0.25),0.5,0.5,0,edgecolor="white",\
facecolor="black")
else:
bg_color = 'black'
ax1.patch.set_facecolor(bg_color)
ax2.patch.set_facecolor(bg_color)
ax3.patch.set_facecolor(bg_color)
rect_1= patches.Rectangle((0.25,0.25),0.5,0.5,0,edgecolor="white",\
facecolor="black")
rect_2= patches.Rectangle((0.25,0.25),0.5,0.5,0,edgecolor="white",\
facecolor="black")
rect_3= patches.Rectangle((0.25,0.25),0.5,0.5,0,edgecolor="white",\
facecolor="black")
ax1.add_patch(rect_1)
ax2.add_patch(rect_2)
ax3.add_patch(rect_3)
ax1.set_xticklabels([])
ax2.set_xticklabels([])
ax3.set_xticklabels([])
ax1.set_yticklabels([])
ax2.set_yticklabels([])
ax3.set_yticklabels([])
ax1.set_title("Transparent object",fontsize=45)
ax2.set_title("Translucent object",fontsize=45)
ax3.set_title("Opaque object",fontsize=45)
ax1.set_autoscalex_on(b=1)
ax2.set_autoscalex_on(b=2)
ax3.set_autoscalex_on(b=2)
ax1.grid("Off")
ax2.grid("Off")
ax3.grid("Off")
plt.show()
interact(switch,switch_value = widgets.ToggleButton(
value=False,
description='Press Me',
disabled=False,
button_style='warning', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check'
));
###Output
_____no_output_____
###Markdown
Materials that reflect a beam of light as a coherent beamNow that we have learned about the different ways light behaves when in contact with different objects, we will pause and make a distinction between two different types of light: *coherent* and *incoherent* light.**Incoherent Light**Light emitted by means such as a light bulb or a flashlight is called "incoherent". Incoherence is the property of light in which photons in the wave frequencies of light oscillate in different directions. **Coherent Light**Coherent light, on the other hand, is characterized by the fact that its photons all have the same frequency. A good example of an object that emits coherent light is a laser. Coherent vs Incoherent Light. Retrieved from https://www.norwegiancreations.com/2018/04/laser-101-pt-1-the-basics/ on June 29, 2018.---Further reading[Visible light](http://www.physicsclassroom.com/class/light/Lesson-2/Light-Absorption,-Reflection,-and-Transmission);[Light absorption](https://www.wikilectures.eu/w/Light_absorption);[Light transmission](https://sciencing.com/light-transmitted-5127127.html);[Coherent and Incoherent Light](https://www.msnucleus.org/membership/html/k-6/as/technology/5/ast5_1a.html) The Law of ReflectionNow that we have a better understanding of what visible light is, and all the different scenarios that take place when light comes into contact with an object, we can learn in more detail what happens to light when it is reflected. We begin by defining the following concepts.**Incident light ray**: This is the incoming light ray.**Reflected light ray**: The ray that bounces off the surface.**Normal to the surface**: an imaginary line that is perpendicular to the surface. **Angle of incidence**: the angle formed by the incident ray and the normal.**Angle of reflection**: the angle formed by the reflected ray and the normal. We are now ready to explore what is known as the "Law of Reflection".Let us take a flat smooth surface, such as a mirror, and let us point a ray of light at the mirror. The Law of reflection states that the angle formed between the incident ray and the normal is equal to the angle between the reflected ray and the normal.This means that no matter what direction of the incident ray is, we can always determine the angle that the reflective ray will have with respect to the normal to the surface. Experiment: Law of ReflectionThe interactive below is designed to model what would happen if we reflect a ray of light on a flat smooth surface that reflects light. In this diagram, the red arrow represents the incidence ray while the blue arrow represents the reflected ray. We denote the surface as Surface and the normal to the surface as N. Imagine we place a protractor to help us measure the angles of incidence and reflection. We place the normal at the $0 ^{\circ}$ mark. Using the widget below, choose an initial angle for the incidence ray. Use the degrees on the diagram to help you measure the incidence and reflected angles. What is the angle formed between the reflected ray and the normal?
###Code
from __future__ import division
from ipywidgets import widgets,interact_manual,interact
import matplotlib
import numpy as np
import math
from math import ceil
import matplotlib.pyplot as plt
%matplotlib inline
style = {'description_width': 'initial'}
@interact(
theta1_refle = widgets.IntSlider(
value=40,
min=0,
max=90,
step=10,
description='Angle of Incidence',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
)
)
def plot_reflection_diagram(theta1_refle):
# radar green, solid grid lines
plt.rc('grid', color='#316931', linewidth=1, linestyle='--')
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
# force square figure and square axes looks better for polar, IMO
width, height = matplotlib.rcParams['figure.figsize']
size = min(width, height)
# make a square figure
fig = plt.figure(figsize=(10, 18))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True, facecolor='white')
ax.set_ylim(0,1)
ax.set_yticks(np.arange(0,1,0.5))
ax.set_theta_zero_location('N')
ax.set_yticklabels([])
ax.set_rmax(2.0)
plt.grid(True)
ax.set_thetamin(-90)
ax.set_thetamax(90)
ax.axhline()
#ax.set_title("And there was much rejoicing!", fontsize=20)
#This is the line I added:
ax.arrow((theta1_refle)/180.*np.pi, 0, 0, 1, alpha = 1.5, width = 0.015,
edgecolor = 'red', facecolor = 'red', lw = 2, zorder = 5)
# arrow at 45 degree
ax.arrow((-theta1_refle)/180.*np.pi, 0.0, 0, 1, alpha = 0.5, width = 0.015,
edgecolor = 'blue', facecolor = 'blue', lw = 2, zorder = 5)
x_s = [-2,2]#10*cos(90/180*np.pi)
y_s = [0,0]#10*sin(90/180*np.pi)
ax.plot(x_s,y_s,color='black',linestyle='solid',transform=ax.transData._b)
x_n = [0,0]#10*cos(90/180*np.pi)
y_n = [0,2]#10*sin(90/180*np.pi)
ax.plot(x_n,y_n,color='black',linestyle='solid',transform=ax.transData._b)
matplotlib.pyplot.text(0, 2.2, "N", fontsize=20,transform=ax.transData._b)
matplotlib.pyplot.text(2.2, 0, "Surface", fontsize=20,transform=ax.transData._b)
#ax.legend()
#arr3 = plt.plot([0,-1],[0,-1])
plt.show()
###Output
_____no_output_____
###Markdown
Questions1.What is the angle of reflection when we assume an initial angle of incidence of $20 ^{\circ}$? Assume you are measuring degrees as positive quantities, i.e. the sign does not matter in this case.
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact_manual(answer =widgets.Select(
options=["Select option","90 \N{DEGREE SIGN}",\
"0 \N{DEGREE SIGN}","20 \N{DEGREE SIGN}",\
"-20 \N{DEGREE SIGN}"],
value='Select option',
description="Angle of reflection",
disabled=False,
style=s
))
def reflective_angle_question(answer):
if answer=="Select option":
print("Click on the correct reflective angle.\nPress the 'Run Interact' button when you consider you have found the answer.")
elif answer=="20 \N{DEGREE SIGN}":
print("Correct!\nBy the law of reflection, the reflective angle is equal to the incidence angle.")
elif answer != "20 \N{DEGREE SIGN}" or answer != "Select Option":
print("Recall that the law of reflection states that the reflective angle is equal to the incidence angle. We are assuming positive quantities and also we assume that the incidence angle is equal to 20 \N{DEGREE SIGN}.")
def show_angle_true_answer(event):
print("The correct answer is: 20\N{DEGREE SIGN}.\nIndeed, the Law of Reflection states that the angle between the incident ray and the normal is equal to the angle between the reflected ray and the normal. So, if the angle of incidence is equal to 20\N{DEGREE SIGN}, then the Law of Reflection implies the angle of reflection is equal to 20\N{DEGREE SIGN}.")
your_button = widgets.Button(button_style='info',description="Show True Answer")
display(your_button)
your_button.on_click(show_angle_true_answer)
###Output
_____no_output_____
###Markdown
2.What happens when the angle of incidence (i.e. the angle between the incidence ray and the normal) is equal to $0^{\circ}$? Enter your answer and press "Run Interact" to save your response.
###Code
from ipywidgets import interact_manual,widgets
your_text_box = widgets.Textarea( value='', placeholder='', description='', disabled=False )
your_button = widgets.Button(button_style='info',description="Save Answer")
display(your_text_box)
display(your_button)
your_button.on_click( run_cells )
user_input = your_text_box.value
if(user_input != ''):
your_text_box.close()
your_button.close()
display(Markdown(user_input))
###Output
_____no_output_____
###Markdown
3.What happens when the angle of incidence (i.e. the angle between the incidence ray and the normal) is equal to $90^{\circ}$?
###Code
from ipywidgets import interact_manual,widgets
your_text_box = widgets.Textarea( value='', placeholder='', description='', disabled=False )
your_button = widgets.Button(button_style='info',description="Save Answer")
display(your_text_box)
display(your_button)
your_button.on_click( run_cells )
user_input = your_text_box.value
if(user_input != ''):
your_text_box.close()
your_button.close()
display(Markdown(user_input))
###Output
_____no_output_____
###Markdown
Refraction of LightWe learned that the Law of Reflection states that for a given flat smooth surface that reflects light and an incident ray, the angle of incidence is equal to the angle of reflection. This is, of course, assuming that all light is reflected.A natural question to ask is: what happens when a portion of the light ray is reflected while another passes through the surface? We will explore the answer to this question in this section. We begin by defining the following concepts.**Refraction**: this is, intuitively, the bending of light as it passes from one medium to another. If an incident ray hits an opaque illuminated object, then no light passes through the object, however if the ray hits a transparent medium (such as glass), then a portion of the light is reflected while another portion passes through the material. **Refracted ray**: as the light ray passes through the material, it changes direction. This new ray is what we call the refracted ray. **Angle of refraction**: this is the angle formed between the normal and the refracted ray. **Refractive index** of a material is the value calculated from the ratio of the speed of light in a vacuum to that in a second medium of greater density. What happens when light is refracted?As a light ray travels from a less dense material to a more dense material it slows down, making the refraction ray bend towards the normal. If on the other hand, a light ray travels from a more dense material to a less dense material, it speeds up, making the ray bend away from the normal. In this diagram, $n_1,n_2$ denote, respectively, the refractive index of the first and second medium while $\theta _1, \theta _2$ denote, respectively, the angle of incidence and the angle of refraction. Just like with reflection, there exists a Law of Refraction, also known as [Snells Law](https://www.britannica.com/science/Snells-law). Although we will not explore in detail the mathematics behind Snell's law, we will demonstrate it via the following interactive. Experiment: Law of RefractionIn this table you can find the index of refraction for different materials. | Material | Index of Refraction ||----------|---------------------||Vacuum | 1.000 ||Water at $20 ^{\circ}C$| 1.330 ||Sugar solution(30%) |1.380||Sugar solution (80%) |1.490||Oil, vegetable $50 ^{\circ}C$ |1.470|Salt | 1.520 ||Diamond | 2.417 |Indices of refraction obtained from [HyperPhysics](http://hyperphysics.phy-astr.gsu.edu/hbase/index.html) and [The Engineering ToolBox](https://www.engineeringtoolbox.com/refractive-index-d_1264.html)The widget below models what would happen if you point a light ray through one of materials on the table. As before, we denote a red arrow as the incidence ray and a blue arrow as the reflected ray. We will use a green arrow to denote the refracted ray. Use the widget below to model what would happen if you pointed a ray of light through two different materials. We assume the "top material" is vacuum. You can select the bottom material from the drop down menu. We assume there is a surface between the top material and the bottom material that reflects a portion of the light and refracts another portion. As before, we denote the normal to the surface as N. Once you have chosen a bottom material, select an angle of incidence. Press "Run Interact" afterwards. Observe the differences between the angle of refraction and the angle of incidence on the different materials provided.
###Code
from __future__ import division
from ipywidgets import widgets,interact_manual
import matplotlib
import numpy as np
import math
from matplotlib import patches
import matplotlib.pyplot as plt
from math import ceil, cos, sin
%matplotlib inline
style = {'description_width': 'initial'}
@interact(
n2 = widgets.Dropdown(
options={'Vacuum':1.000,\
'Water at 20C':1.330,\
'Sugar Solution (30%)':1.380,\
'Sugar Solution (80%)':1.490,\
'Oil, vegetable': 1.470,\
'Salt': 1.520,\
'Diamond': 2.417},
value = 1.000,
description = "Bottom material",
style =style
),
theta1 = widgets.IntSlider(
value=45,
min=0,
max=90,
step=5,
description='Angle of Incidence',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
)
)
def plot_refraction_diagram(n2,theta1):
index_dictionary = {1.000:"#FFFFFF",1.330:"#64D5FF",1.380:"#8BA5AE",\
1.490:"#80A4B0",1.470:"#F4D41E",1.520:"#F9F3D6",\
2.417:"#DAFCF7"}
n1 = 1.000
# force square figure and square axes looks better for polar, IMO
width, height = matplotlib.rcParams['figure.figsize']
size = min(width, height)
fig = plt.figure(figsize=(10, 18))
ax = fig.add_subplot(111,projection="polar", facecolor='white')
ax.set_theta_zero_location('N')
ax.set_ylim(0,1)
ax.set_yticks(np.arange(0,1,0.5))
ax.set_yticklabels([])
ax.set_rmax(2.0)
plt.grid(True)
ax.axhline()
pat_col_2 = index_dictionary[n2]
pat_col_1 = index_dictionary[n1]
ax.add_patch(
patches.Rectangle(
(0, 0), width=-1.5*math.pi, height=3, facecolor=pat_col_2
)
)
ax.add_patch(
patches.Rectangle(
(0, 0), width=0.5*math.pi, height=3, facecolor=pat_col_1
)
)
ax.add_patch(
patches.Rectangle(
(0, 0), width=-0.5*math.pi, height=3, facecolor=pat_col_1
)
)
ax.bar(0, 1).remove()
ax.arrow((theta1)/180.*np.pi, 0, 0, 1, alpha = 1.5, width = 0.015,label="Incidence Ray",
edgecolor = 'red', facecolor = 'red', lw = 2, zorder = 5)
x_s = [-2,2]#10*cos(90/180*np.pi)
y_s = [0,0]#10*sin(90/180*np.pi)
ax.plot(x_s,y_s,color='black',linestyle='solid',transform=ax.transData._b)
x_n = [0,0]#10*cos(90/180*np.pi)
y_n = [-2,2]#10*sin(90/180*np.pi)
ax.plot(x_n,y_n,color='black',linestyle='solid',transform=ax.transData._b)
#ax.axhline(y=0,xmin=0,xmax=10)
# arrow at 45 degree
ax.arrow((-theta1)/180.*np.pi, 0.0, 0, 1, alpha = 0.5, width = 0.015,label="Reflection Ray",
edgecolor = 'blue', facecolor = 'blue', lw = 2, zorder = 5)
matplotlib.pyplot.text(0, 2.2, "N", fontsize=20,transform=ax.transData._b)
matplotlib.pyplot.text(2.2, 0, "Surface", fontsize=20,transform=ax.transData._b)
if ((n1*math.sin(theta1*math.pi/180)/n2)<-1) or ((n1*math.sin(theta1*math.pi/180))/n2>1):
print("Angle of incidence: %i" %ceil(theta1) + "\N{DEGREE SIGN}")
print("Angle of reflection: %i" %ceil(theta1)+ "\N{DEGREE SIGN}")
print("\033[1mTotal Internal Reflection. You are at or past the critical angle.\033[0m\nRead more below to learn what this means.")
exit
else:
theta2=(math.asin(n1*math.sin(theta1*math.pi/180)/n2))*180/math.pi
print("Angle of incidence: %i" %ceil(theta1) + "\N{DEGREE SIGN}")
print("Angle of reflection: %i" %ceil(theta1)+ "\N{DEGREE SIGN}")
print("Angle of refraction: %i" %ceil(theta2) + "\N{DEGREE SIGN}")
ax.arrow((theta2 + 180)/180.*np.pi, 0.0, 0, 1, alpha = 0.5, width = 0.015,label="Refraction Ray",
edgecolor = 'green', facecolor = 'green', lw = 2, zorder = 5)
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Questions 1. What is the angle of refraction that is obtained when we pass a ray of light through oil, if we assume an angle of incidence of $15 ^{\circ}$? The answer will be printed on our model. Copy and paste the answer on the box below and press "Run Interact" when you are done.
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact_manual(answer =widgets.Textarea(
value=' ',
placeholder='Type something',
description='Your Answer:',
disabled=False,
style=s))
def get_answer_one(answer):
if "11" in answer:
print("Correct!")
else:
print("That angle seems off. The correct model's parameters are as follows:\Bottom material: Oil, vegetable\nAngle of incidence:15\N{DEGREE SIGN}")
def show_answer_one_true_answer(event):
print("The correct answer is: 11\N{DEGREE SIGN}.\nIn this widget, we assume first material: Vacuum, second material: oil, and incidence angle: 15\N{DEGREE SIGN}.\nAfter pressing Run Interact, we find\nAngle of incidence: 15\N{DEGREE SIGN}\nAngle of reflection: 15\N{DEGREE SIGN}\nAngle of refraction: 11\N{DEGREE SIGN}")
your_button = widgets.Button(button_style='info',description="Show True Answer")
display(your_button)
your_button.on_click(show_answer_one_true_answer)
###Output
_____no_output_____
###Markdown
2.What what is the angle of refraction obtained if we keep oil as the bottom material, but change the angle of incidence to $90 ^{\circ}$?. Copy and paste the answer on the box below and press "Run Interact" when you are done.
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact_manual(answer =widgets.Textarea(
value=' ',
placeholder='Type something',
description='Your Answer:',
disabled=False,
style=s))
def get_answer_one(answer):
if "43" in answer:
print("Correct!")
else:
print("That angle seems off. The new model's parameters are as follows:\Bottom material: Oil, vegetable\nAngle of incidence:15\N{DEGREE SIGN}")
def show_answer_two_true_answer(event):
print("The correct answer is: 43\N{DEGREE SIGN}.\nIn this widget, we assume first material is vacuum, the bottom material is oil, vegetable, and incidence angle: 90\N{DEGREE SIGN}.\nAfter pressing Run Interact, we find\nAngle of incidence: 90\N{DEGREE SIGN}\nAngle of reflection: 90\N{DEGREE SIGN} \nAngle of refraction: 43\N{DEGREE SIGN}")
your_button = widgets.Button(button_style='info',description="Show True Answer")
display(your_button)
your_button.on_click(show_answer_two_true_answer)
###Output
_____no_output_____
###Markdown
Total Internal ReflectionThere is an interesting phenomenon that occurs if the angle of incidence is greater than a certain "critical angle". The critical angle is the angle of incidence for which the angle of refraction is $90 ^{\circ}$ with respect to the normal to the surface. In general, this phenomenon takes place at the boundary between two transparent media when a ray of light in a medium of higher index of refraction approaches the other medium at an angle of incidence greater than the critical angle. We can observe this phenomenon in our model. Consider, for instance, diamonds. They posses an index of refraction of 2.417. Using the widget below we find that if the second material is a vacuum, then the critical angle is $25 ^{\circ}$. QuestionAssume the material on top of a surface is a diamond. Choose a bottom material from the drop down menu. Use the ball widget to find what the critical angle is for each of the materials in the displayable menu.An example has been found when the bottom material is vacuum. We find that the critical angle occurs when the angle of incidence is equal to $25 ^{\circ}$.
###Code
from __future__ import division
from ipywidgets import widgets,interact_manual
import matplotlib
import numpy as np
import math
from matplotlib import patches
import matplotlib.pyplot as plt
from math import ceil, cos, sin
%matplotlib inline
style = {'description_width': 'initial'}
@interact(
n2 = widgets.Dropdown(
options={'Vacuum':1.000,\
'Water at 20C':1.330,\
'Sugar Solution (30%)':1.380,\
'Sugar Solution (80%)':1.490,\
'Oil, vegetable': 1.470,\
'Salt': 1.520,\
'Diamond': 2.417},
value = 1.000,
description = "Bottom material",
style =style
),
theta1 = widgets.IntSlider(
value=25,
min=0,
max=90,
step=1,
description='Angle of Incidence',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
)
)
def plot_refraction_diagram_issue(n2,theta1):
index_dictionary = {1.000:"#FFFFFF",1.330:"#64D5FF",1.380:"#8BA5AE",\
1.490:"#80A4B0",1.470:"#F4D41E",1.520:"#F9F3D6",\
2.417:"#DAFCF7"}
n1= 2.417
# force square figure and square axes looks better for polar, IMO
width, height = matplotlib.rcParams['figure.figsize']
size = min(width, height)
fig = plt.figure(figsize=(10, 18))
ax = fig.add_subplot(111,projection="polar", facecolor='white')
ax.set_theta_zero_location('N')
ax.set_ylim(0,1)
ax.set_yticks(np.arange(0,1,0.5))
ax.set_yticklabels([])
ax.set_rmax(2.0)
plt.grid(True)
ax.axhline()
pat_col_2 = index_dictionary[n2]
pat_col_1 = index_dictionary[n1]
ax.add_patch(
patches.Rectangle(
(0, 0), width=-1.5*math.pi, height=3, facecolor=pat_col_2
)
)
ax.add_patch(
patches.Rectangle(
(0, 0), width=0.5*math.pi, height=3, facecolor=pat_col_1
)
)
ax.add_patch(
patches.Rectangle(
(0, 0), width=-0.5*math.pi, height=3, facecolor=pat_col_1
)
)
ax.bar(0, 1).remove()
ax.arrow((theta1)/180.*np.pi, 0, 0, 1, alpha = 1.5, width = 0.015,label="Incidence Ray",
edgecolor = 'red', facecolor = 'red', lw = 2, zorder = 5)
x_s = [-2,2]#10*cos(90/180*np.pi)
y_s = [0,0]#10*sin(90/180*np.pi)
ax.plot(x_s,y_s,color='black',linestyle='solid',transform=ax.transData._b)
x_n = [0,0]#10*cos(90/180*np.pi)
y_n = [-2,2]#10*sin(90/180*np.pi)
ax.plot(x_n,y_n,color='black',linestyle='solid',transform=ax.transData._b)
#ax.axhline(y=0,xmin=0,xmax=10)
# arrow at 45 degree
ax.arrow((-theta1)/180.*np.pi, 0.0, 0, 1, alpha = 0.5, width = 0.015,label="Reflection Ray",
edgecolor = 'blue', facecolor = 'blue', lw = 2, zorder = 5)
matplotlib.pyplot.text(0, 2.2, "N", fontsize=20,transform=ax.transData._b)
matplotlib.pyplot.text(2.2, 0, "Surface", fontsize=20,transform=ax.transData._b)
if ((n1*math.sin(theta1*math.pi/180)/n2)<-1) or ((n1*math.sin(theta1*math.pi/180))/n2>1):
print("Angle of incidence: %i" %ceil(theta1) + "\N{DEGREE SIGN}")
print("Angle of reflection: %i" %ceil(theta1)+ "\N{DEGREE SIGN}")
print("\033[1mTotal Internal Reflection. You are at or past the critical angle.\033[0m")
exit
else:
theta2=(math.asin(n1*math.sin(theta1*math.pi/180)/n2))*180/math.pi
print("Angle of incidence: %i" %ceil(theta1) + "\N{DEGREE SIGN}")
print("Angle of reflection: %i" %ceil(theta1)+ "\N{DEGREE SIGN}")
print("Angle of refraction: %i" %ceil(theta2) + "\N{DEGREE SIGN}")
ax.arrow((theta2 + 180)/180.*np.pi, 0.0, 0, 1, alpha = 0.5, width = 0.015,label="Refraction Ray",
edgecolor = 'green', facecolor = 'green', lw = 2, zorder = 5)
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
3.Assuming diamond is still the top material we picked and using the widget above, pick salt as the bottom material. What is the critical angle for these two materials?
###Code
from ipywidgets import interact_manual,widgets
s = {'description_width': 'initial'}
@interact_manual(answer =widgets.Textarea(
value=' ',
placeholder='Type something',
description='Your Answer:',
disabled=False,
style=s))
def get_answer_one(answer):
if "39" in answer:
print("Correct! Any angle below 39\N{DEGREE SIGN} will yield a refraction ray, while any angle greater than or equal to 39\N{DEGREE SIGN} will yield total internal reflection.")
else:
print("That angle seems off.\nIf Salt is the second material, the correct answer lies between 30 \N{DEGREE SIGN} and 40\N{DEGREE SIGN}.")
def show_answer_thr_true_answer(event):
print("The correct answer is: 39\N{DEGREE SIGN}.\nIn this widget, we assume first material: diamond, second material: salt.\nOur goal is to find the smallest angle for which we see a legend on the screen 'Total Internal Reflection. You are at or past the critical angle.'\nSuch legend appears first when the angle of incidence is equal to 39\N{DEGREE SIGN}")
your_button = widgets.Button(button_style='info',description="Show True Answer")
display(your_button)
your_button.on_click(show_answer_thr_true_answer)
###Output
_____no_output_____ |
PreFall2018/07-Exceptions.ipynb | ###Markdown
When Things Go Wrong: Exceptions and Errors Today we'll cover perhaps one of the most important aspects of using Python: dealing with errors and bugs in code. Three Classes of ErrorsTypes of bugs/errors in code, from the easiest to the most difficult to diagnose:1. **Syntax Errors:** Errors where the code is not valid Python (generally easy to fix)2. **Runtime Errors:** Errors where syntactically valid code fails to execute (sometimes easy to fix)3. **Semantic Errors:** Errors in logic (often very difficult to fix) Syntax ErrorsSyntax errors are when you write code which is not valid Python. For example:
###Code
X = [1, 2, 3
a = 4
y = 4*x + 3
def f():
return GARBAGE
###Output
_____no_output_____
###Markdown
No error is generated by defining this function. The error doesn't occur until the function is called.
###Code
f()
###Output
_____no_output_____
###Markdown
Note that if your code contains even a *single* syntax error, none of it will run:
###Code
a = 4
something == is wrong
print(a)
###Output
_____no_output_____
###Markdown
Even though the syntax error appears below the (valid) variable definition, the valid code is not executed. Runtime ErrorsRuntime errors occur when the code is **valid python code**, but are errors within the context of the program execution. For example:
###Code
print(Q)
a = 'aaa'
# blah blah blah
x = 1 + int(a)
print (x)
X = 1 / 0
import numpy as np
np.sum([1, 2, 3, 4])
np.sum?
x = [1, 2, 3]
print(x[100])
###Output
_____no_output_____
###Markdown
Unlike Syntax errors, RunTime errors occur **during code execution**, which means that valid code occuring before the runtime error *will* execute:
###Code
spam = "my all-time favorite"
eggs = 1 / 0
print(spam)
###Output
_____no_output_____
###Markdown
Semantic ErrorsSemantic errors are perhaps the most insidious errors, and are by far the ones that will take most of your time. Semantic errors occur when the code is **syntactically correct**, but produces the wrong result.By way of example, imagine you want to write a simple script to approximate the value of $\pi$ according to the following formula:$$\pi = \sqrt{12} \sum_{k = 0}^{\infty} \frac{(-3)^{-k}}{2k + 1}$$You might write a function something like this, using numpy's vectorized syntax:
###Code
total = 0
for k in ks:
total += (-3.0) ** -k / (2 * k + 1)
from math import sqrt
def approx_pi(nterms=100):
ks = np.arange(nterms)
ans = sqrt(12) * np.sum([-3.0 ** -k / (2 * k + 1) for k in ks])
if ans < 1:
import pdb; pdb.set_trace()
return ans
approx_pi(1000)
###Output
_____no_output_____
###Markdown
Looks OK, yes? Let's try it out:
###Code
approx_pi(1000)
###Output
_____no_output_____
###Markdown
Huh. That doesn't look like $\pi$. Maybe we need more terms?
###Code
k = 2
(-3.0) ** -k / (2 * k + 1)
approx_pi(1000)
###Output
_____no_output_____
###Markdown
Nope... it looks like the algorithm simply gives the wrong result. This is a classic example of a semantic error.**Question: can you spot the problem?** Runtime Errors and Exception HandlingNow we'll talk about how to handle RunTime errors (we skip Syntax Errors because they're pretty self-explanatory).Runtime errors can be handled through "exception catching" using ``try...except`` statements. Here's a basic example:
###Code
try:
print("this block gets executed first")
GARBAGE
except (ValueError, RuntimeError) as err:
print("this block gets executed if there's an error")
print (err)
print ("I am done!")
def f(x):
if isinstance(x, int) or isinstance(x, float):
return 1.0/x
else:
raise ValueError("argument must be an int.")
f(0)
def f(x):
try:
return 1.0/x
except (TypeError, ZeroDivisionError):
raise ValueError("Argument must be a non-zero number.")
f(0)
f('aa')
try:
print("this block gets executed first")
x = 1 / 0 # ZeroDivisionError
print("we never get here")
except:
print("this block gets executed if there's an error")
###Output
_____no_output_____
###Markdown
Notice that the first block executes **up until the point** of the Runtime error.Once the error is hit, the ``except`` block is executed.One important note: the above clause catches **any and all** exceptions. It is notgenerally a good idea to catch-all. Better is to name the precise exception you expect:
###Code
def safe_divide(a, b):
try:
return a / b
except:
# print("oops, dividing by zero. Returning None.")
return None
print(safe_divide(15, 3))
print(safe_divide(1, 0))
###Output
5.0
None
###Markdown
But there's a problem here: this is a **catch-all** exception, and will sometimes give us misleading information. For example:
###Code
safe_divide(15, 3)
###Output
_____no_output_____
###Markdown
Our program tells us we're dividing by zero, but we aren't! This is one reason you should **almost never** use a catch-all ``try..except`` statement, but instead specify the errors you're trying to catch:
###Code
def better_safe_divide(a, b):
try:
return a / b
except ZeroDivisionError:
print("oops, dividing by zero. Returning None.")
return None
better_safe_divide(15, 0)
better_safe_divide(15, 'three')
###Output
_____no_output_____
###Markdown
This also allows you to specify different behaviors for different exceptions:
###Code
def even_better_safe_divide(a, b):
try:
return a / b
except ZeroDivisionError:
print("oops, dividing by zero. Returning None.")
return None
except TypeError:
print("incompatible types. Returning None")
return None
even_better_safe_divide(15, 3)
even_better_safe_divide(15, 0)
even_better_safe_divide(15, 'three')
###Output
_____no_output_____
###Markdown
Remember this lesson, and **always specify your except statements!** I once spent an entire day tracing down a bug in my code which amounted to this. Raising Your Own ExceptionsWhen you write your own code, it's good practice to use the ``raise`` keyword to create your own exceptionswhen the situation calls for it:
###Code
import os # the "os" module has useful operating system stuff
def read_file(filename):
if not os.path.exists(filename):
raise ValueError("'{0}' does not exist".format(filename))
f = open(filename)
result = f.read()
f.close()
return result
###Output
_____no_output_____
###Markdown
We'll use IPython's ``%%file`` magic to quickly create a text file
###Code
%%file tmp.txt
this is the contents of the file
read_file('tmp.txt')
read_file('file.which.does.not.exist')
###Output
_____no_output_____
###Markdown
It is sometimes useful to define your own custom exceptions, which you can do easily via class inheritance:
###Code
class NonExistentFile(RuntimeError):
# you can customize exception behavior by defining class methods.
# we won't discuss that here.
pass
def read_file(filename):
if not os.path.exists(filename):
raise NonExistentFile(filename)
f = open(filename)
result = f.read()
f.close()
return result4o-
read_file('tmp.txt')
read_file('file.which.does.not.exist')
###Output
_____no_output_____
###Markdown
**Get used to throwing appropriate — and meaningful — exceptions in your code!** It makes reading and debugging your code much, much easier. More Advanced Exception HandlingThere is also the possibility of adding ``else`` and ``finally`` clauses to your try statements.You'll probably not need these often, but in case you encounter them some time, it's good to know what they do.The behavior looks like this:
###Code
try:
print("doing something")
except:
print("this only happens if it fails")
else:
print("this only happens if it succeeds")
try:
print("doing something")
raise ValueError()
except:
print("this only happens if it fails")
else:
print("this only happens if it succeeds")
###Output
_____no_output_____
###Markdown
Why would you ever want to do this?Mainly, it prevents the code within the ``else`` block from being caught by the ``try`` block.Accidentally catching an exception you don't mean to catch can lead to confusing results. The last statement you might use is the ``finally`` statement, which looks like this:
###Code
try:
print("do something")
except:
print("this only happens if it fails")
else:
print("this only happens if it succeeds")
finally:
print("this happens no matter what.")
try:
print("do something")
raise ValueError()
except:
print("this only happens if it fails")
else:
print("this only happens if it succeeds")
finally:
print("this happens no matter what.")
###Output
_____no_output_____
###Markdown
``finally`` is generally used for some sort of cleanup (closing a file, etc.) It might seem a bit redundant, though. Why not write the following?
###Code
try:
print("do something")
except:
print("this only happens if it fails")
else:
print("this only happens if it succeeds")
print("this happens no matter what.")
###Output
_____no_output_____
###Markdown
Write exception code that handles all exceptions except ZeroDivideError
###Code
x = 0
excpt = None
try:
1.0/x
except Exception as err:
if isinstance(err, ZeroDivisionError):
pass
else:
raise Exception("Got error.")
type(excpt)
###Output
_____no_output_____
###Markdown
The main difference is when the clause is used within a function:
###Code
def divide(x, y):
try:
result = x / y
except ZeroDivisionError:
print("division by zero!")
return None
else:
print("result is", result)
return result
finally:
print("some sort of cleanup")
divide(15, 3)
divide(15, 0)
###Output
_____no_output_____ |
examples/parallel_train/plot_parallel.ipynb | ###Markdown
Import relevant modules
###Code
import numpy as np
import os
import matplotlib.pyplot as plt
%matplotlib inline
from cycler import cycler
from pylab import rcParams
rcParams['figure.figsize'] = 8, 6
rcParams.update({'font.size': 15})
# color and linestyle cycle
#colors = [x['color'] for x in list(rcParams['axes.prop_cycle'])]
colors_base = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '0.3', '0.5', '0.75', 'chartreuse']
print 'colors_base', colors_base
colors = [item for sublist in [colors_base]*len(colors_base) for item in sublist] # replicate and flatten
print 'colors', colors, len(list(rcParams['axes.prop_cycle']))
lnstyl = [[l] * len(colors_base) for l in ['-', '--', ':', '.', '-.', '*', 'x']] # replicate per color
print 'lnstyl', lnstyl
lnstyl = [item for sublist in lnstyl for item in sublist] # flatten
plt.rc('axes', prop_cycle=(cycler('color', colors) + cycler('linestyle', lnstyl))) # define cycler
from nideep.eval.learning_curve import LearningCurve
from nideep.eval.eval_utils import Phase
import nideep.eval.log_utils as lu
def moving_avg(x, window_size):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(x, window, 'valid')
classnames = ['alarm', 'baby', 'crash', 'dog', 'engine', 'femaleSpeech', 'fire', 'footsteps',\
'knock', 'phone', 'piano']
classnames_scalar = ['alarm', 'baby', 'crash', 'dog', 'engine', 'femaleSpeech', 'fire', 'footsteps', 'general'\
'knock', 'phone', 'piano']
print("Done importing")
###Output
colors_base ['b', 'g', 'r', 'c', 'm', 'y', 'k']
colors ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'b', 'g', 'r', 'c', 'm', 'y', 'k', 'b', 'g', 'r', 'c', 'm', 'y', 'k', 'b', 'g', 'r', 'c', 'm', 'y', 'k', 'b', 'g', 'r', 'c', 'm', 'y', 'k', 'b', 'g', 'r', 'c', 'm', 'y', 'k', 'b', 'g', 'r', 'c', 'm', 'y', 'k'] 7
lnstyl [['-', '-', '-', '-', '-', '-', '-'], ['--', '--', '--', '--', '--', '--', '--'], [':', ':', ':', ':', ':', ':', ':'], ['.', '.', '.', '.', '.', '.', '.'], ['-.', '-.', '-.', '-.', '-.', '-.', '-.'], ['*', '*', '*', '*', '*', '*', '*'], ['x', 'x', 'x', 'x', 'x', 'x', 'x']]
Done importing
###Markdown
Merge multiple network definitions that share the same data layers into a single definition to train within the same single process:
###Code
from nideep.proto.proto_utils import Parser
from nideep.nets.net_merge import merge_indep_net_spec
# select network definitions to merge into a single prototxt
# You can also just repeat the same network over and over if you want to train the same network with different random initializations
p0 = './train_val_00.prototxt'
p1 = './train_val_01.prototxt'
p2 = './train_val_02.prototxt'
# load each network definition from file
nets = [Parser().from_net_params_file(p) for p in [p0,p1,p2]]
# merge and save merged prototxt to file
p_dst = './train_val_00_01_02.prototxt'
with open(p_dst, 'w') as f:
f.write(merge_indep_net_spec(nets))
# use p_dst file in your solver and train this 'network ensemble' like you would any single network.
###Output
_____no_output_____
###Markdown
After training, we look at the learning curves of the individual sub-networks
###Code
logs = [\
'./xD/caffe.eltanin.kashefy.log.INFO.20160818-105955.20804',
'./xE_03/caffe.eltanin.kashefy.log.INFO.20160818-145600.31621',
'./xE_04/caffe.eltanin.kashefy.log.INFO.20160818-150354.710',
]
print("Found %d logs" % (len(logs),))
for phase in [Phase.TRAIN, Phase.TEST]:
print phase
plt.figure()
for p in logs:
e = LearningCurve(p)
lc_keys = e.parse()[phase == Phase.TEST]
num_iter = e.list('NumIters', phase)
print('%s: %d %s iterations' % (os.path.basename(os.path.dirname(p)), num_iter.size, phase))
for lck_idx, lck in enumerate(lc_keys):
if 'nidx' in lck or ('NumIters' not in lck and 'rate' not in lck.lower() and 'seconds' not in lck.lower()):
try:
loss = e.list(lck, phase)
plt.plot(num_iter, loss, label='%s %s' % (os.path.basename(os.path.dirname(p)), lck))
except KeyError as kerr:
print("Inavlid values for %s %s" % (phase, lck))
ticks, _ = plt.xticks()
plt.xticks(ticks, ["%dK" % int(t/1000) for t in ticks])
plt.title(phase)
plt.xlabel('iterations')
plt.ylabel(' '.join([phase, 'cross entropy loss']))
#plt.xlim([0,20e3])
#plt.xlim([0,300e3])
plt.ylim([1,20])
plt.title('on %s set' % phase)
plt.legend(loc='upper right')
plt.grid()
###Output
Found 3 logs
Train
xD: 538 Train iterations
xE_03: 599 Train iterations
xE_04: 32 Train iterations
Test
xD: 5 Test iterations
xE_03: 5 Test iterations
xE_04: 0 Test iterations
|
label_new_dataset.ipynb | ###Markdown
Autolabel images with PyLabel, YOLOv5, and jupyter-bbox-widgetThis notebook is labeling tool that can used to annotate image datasets with bounding boxes, automatically suggest bounding boxes using an object detection model, and save the annotations in YOCO, COCO, or VOC format. The annotation interface uses the [jupyter-bbox-widget](https://github.com/gereleth/jupyter-bbox-widget). The bounding box detection uses PyTorch and a [VOLOv5](https://github.com/ultralytics/yolov5) model.
###Code
import logging
logging.getLogger().setLevel(logging.CRITICAL)
%pip install pylabel > /dev/null
from pylabel import importer
###Output
_____no_output_____
###Markdown
Import Images to Create a New DatasetIn this example there are no annotations created yet. The path should be the path to a directory with the images that you want to annotate. For this demonstration we will download a subset of the coco dataset.
###Code
import os, zipfile
#Download sample yolo dataset
os.makedirs("data", exist_ok=True)
!wget "https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip" -O data/coco128.zip
with zipfile.ZipFile("data/coco128.zip", 'r') as zip_ref:
zip_ref.extractall("data")
path_to_images = "data/coco128/images/train2017"
dataset = importer.ImportImagesOnly(path=path_to_images, name="coco128")
dataset.df.head(3)
###Output
_____no_output_____
###Markdown
Predict and Edit AnnotationsUse the jupyter_bbox_widget to inspect, edit, and save annotations without leaving the Jupyter notebook. Press predict to autolabel images using a pretrained model. For instructions and keyboard shortcuts for using this widget see https://github.com/gereleth/jupyter-bbox-widgetUsage.
###Code
classes = ['person','boat', 'bear', "car"]
dataset.labeler.StartPyLaber(new_classes=classes)
###Output
_____no_output_____
###Markdown
Instructions - The first image (000000000612.jpg) should show some bears. Select the bear cleass draw some some boxes around the bears and then save.- The next image should be a boat. (000000000404.jpg) Select the boat class, draw boxes around the boats, and save.- When you see an image with an object that is not in the current list of classes, add it as new class, draw boxes on the image using that class and save. At anytime, run the cell below to see how many classes you have labeled in the dataset.
###Code
dataset.analyze.class_counts
#Export the annotations in Yolo format
dataset.path_to_annotations = 'data/coco128/labels/newlabels/'
os.makedirs(dataset.path_to_annotations, exist_ok=True)
dataset.export.ExportToYoloV5()
###Output
_____no_output_____ |
calspread/Part6-Scheduling.ipynb | ###Markdown
Disclaimer SchedulingAn example crontab for scheduling data collection and paper trading is provided in [quantrocket.countdown.crontab.sh](quantrocket.countdown.crontab.sh). Code highlights The following line executes the custom script each morning at 8 AM to initiate real-time data collection for the combo:```shell0 8 * * mon-fri quantrocket satellite exec 'codeload.calspread.collect_combo.collect_combo' --params 'universe:cl-fut' 'contract_months:[1,2]' 'tick_db:cl-combo-tick' 'until:16:30:00 America/New_York'```The trading strategy runs every minute from 9 AM to 3:59 PM, with orders simply logged to flightlog:```shell* 9-15 * * mon-fri quantrocket moonshot trade 'calspread-native-cl' | quantrocket flightlog log -n 'quantrocket.moonshot'``` Install the crontab> This section assumes that you're not already using your `countdown` service for any scheduled tasks and that you haven't yet set its timezone. If you're already using `countdown`, you can edit your existing crontab, or add a new countdown service for New York tasks. See the usage guide for help. All the commands on the provided crontab are intended to be run in New York time. By default, the countdown timezone is UTC:
###Code
from quantrocket.countdown import get_timezone, set_timezone
get_timezone()
###Output
_____no_output_____
###Markdown
So first, set the countdown timezone to New York time:
###Code
set_timezone("America/New_York")
###Output
_____no_output_____
###Markdown
Install the crontab by moving it to the `/codeload` directory. (First open a flightlog terminal so you can see if it loads successfully.)
###Code
# move file over unless it already exists
![ -e /codeload/quantrocket.countdown.crontab* ] && echo 'oops, the file already exists!' || mv quantrocket.countdown.crontab.sh /codeload/
###Output
_____no_output_____ |
2020/lecture-code/lecture-21-cross-validation.ipynb | ###Markdown
Cross Validation[J. Nathan Matias](https://natematias.com), April 21, 2020This notebook is part of [COMM 4940: The Design & Governance of Field Experiments](https://natematias.com/courses/comm4940/)In today's class, we're discussing the idea of cross-validation, where we confirm the reproducibility of statistical tests by holding back data. The reading for today is here:* Koul, A., Becchio, C., & Cavallo, A. (2018). [Cross-validation approaches for replicability in psychology](https://www.frontiersin.org/articles/10.3389/fpsyg.2018.01117/full). Frontiers in Psychology, 9, 1117.Note that for this class, we're going to be thinking about cross-validation differently from prediction tasks (which is how it's often discussed in machine learning). Here, we're focused on meta-analysis, leading into analysis of the Upworthy Research Archive:* With prediction, you test the error in the overall model* With meta-analysis, it's fine if the model predicts things badly, so long as we learn the average treatment effect. In fact, adding multiple predictors might lead us to mis-estimate the average treatment effect.
###Code
options("scipen"=9, "digits"=4)
library(MASS)
library(ggplot2)
library(rlang)
library(gmodels) #includes CrossTable
library(corrplot)
library(stringr)
library(plm) #Fixed effects models
## DOCUMENTATION AT: https://cran.r-project.org/web/packages/DeclareDesign/DeclareDesign.pdf
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
options(repr.plot.width=7, repr.plot.height=3.5)
## Set Random Seed:
set.seed(35985)
###Output
_____no_output_____
###Markdown
Minimizing False Positives with Cross-ValidationCross-validation can help us minimize false positives in research.In the following example, we create a dataset with 100 normal distributions that aren't correlated in any way. We then see how many of those correlations survive cross validation
###Code
# total sample size
n.size = 1000
## create dataframe
nocorr.df <- data.frame(
id = seq(1, n.size),
a = rnorm(n.size),
b = rnorm(n.size),
c = rnorm(n.size),
d = rnorm(n.size),
e = rnorm(n.size),
f = rnorm(n.size),
g = rnorm(n.size),
h = rnorm(n.size),
i = rnorm(n.size),
j = rnorm(n.size)
)
###Output
_____no_output_____
###Markdown
Creating the Exploratory and Confirmatory (holdout) DatasetsHere, I create two separate, independent datasets from the larger data:* `nocorr.exploratory.df` is a 30% dataset for exploratory analysis* `nocorr.confirmatory.df` is a 70% holdout dataset for confirmatory analysisThere's no hard and fast rule for how large the datasets should be, though there are some tradeoffs, as I illustrate at the end of the notebook.
###Code
## the proportion of observations
## to include in the exploratory dataset
exp.proportion = 0.3
nocorr.exploratory.df <- nocorr.df[
sample(1:nrow(nocorr.df), n.size*exp.proportion, replace=FALSE),]
nocorr.confirmatory.df <- subset(nocorr.df, (id %in% nocorr.exploratory.df$id)!=TRUE)
print(paste(nrow(nocorr.exploratory.df), "in the exploratory dataset"))
print(paste(nrow(nocorr.confirmatory.df), "in the confirmatory dataset"))
###Output
[1] "300 in the exploratory dataset"
[1] "700 in the confirmatory dataset"
###Markdown
Developing a Model with the Exploratory Dataset
###Code
# Let's imagine I was looking for just any correlation in the dataset, with the goal of
# getting some kind of statistically-significant result. Here, we see that both b and d
# are associated with g
ndt <- cor.mtest(nocorr.exploratory.df, conf.level = .95)
ndc <- cor(nocorr.exploratory.df)
corrplot(ndc, p.mat=ndt$p, sig.level=0.05)
# in this linear regression model, we see that both b and d are associated with g
summary(lm(g ~ b + d, data=nocorr.exploratory.df))
###Output
_____no_output_____
###Markdown
Testing the spurious correlations with the confirmatory datasetWhen we test the correlations on a (larger) sample from the population, we fail to reject the null hypothesis that these results are too large to have arisen from chance.
###Code
summary(lm(g ~ b + d, data=nocorr.confirmatory.df))
###Output
_____no_output_____
###Markdown
Handling Overfitting with Cross-ValidationSometimes the best model that fits the data is so specific that it doesn't apply to anything beyond the data. That's what researchers call overfitting (and it's what happened in the above example too).By giving researchers a chance to test their model on more than one dataset, cross-validation enables researchers to confirm whether their model is overfit.For this example, we are using the [titanic dataset](http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.html), a record of who survived a profound tragedy in 1912 when 1,500 out of 2,208 people [died when the H.M.S. Titanic sunk in the Atlantic](https://en.wikipedia.org/wiki/Passengers_of_the_RMS_Titanic). This dataset includes information about 1309 passengers ([description here](http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3info.txt)).In this case, I want to know if people who paid more money for their ticket (`fare`) had a higher chance of survival, holding all else equal.
###Code
## Reset Random Seed:
set.seed(69982)
titanic.df <- read.csv("http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.csv")
titanic.df$is.minor <- as.numeric(titanic.df$age < 18)
t.exploratory.df <- titanic.df[
sample(1:nrow(titanic.df), nrow(titanic.df)*exp.proportion, replace=FALSE),]
t.confirmatory.df <- subset(titanic.df, (name %in% t.exploratory.df$name)!=TRUE)
print(paste(nrow(t.exploratory.df), "in the exploratory dataset"))
print(paste(nrow(t.confirmatory.df), "in the confirmatory dataset"))
colnames(t.exploratory.df)
###Output
[1] "392 in the exploratory dataset"
[1] "917 in the confirmatory dataset"
###Markdown
First, Develop an Exploratory ModelUsing the exploratory dataset, I can create a model that tests for the correlation I'm curious about. Here, I use the classic exploratory technique of fitting a sequence of models until I find the one that has the highest R2.The key hypothesis test will be whether `fare` is a statistically-significant predictor of survival, accounting for all other variables.
###Code
summary(lm(survived ~ fare, data=t.exploratory.df))
summary(lm(survived ~ fare + pclass, data=t.exploratory.df))
summary(lm(survived ~ fare + pclass + sex, data=t.exploratory.df))
summary(lm(survived ~ fare + pclass + sex + age, data=t.exploratory.df))
###Output
_____no_output_____
###Markdown
Here's the final model, which adjusts for passenger class, sex, age, and the number of siblings or parents on the boat.
###Code
summary(lm(survived ~ fare + pclass + sex + age + sibsp, data=t.exploratory.df))
###Output
_____no_output_____
###Markdown
Next test the hypothesis in the confirmatory datasetAt this point, there are two approaches that you could take when doing hypothesis tests (in contrast with prediction tasks, where we are testing the overall error of the model). * Test the hypotheses on the confirmatory dataset alone* Test the hypotheses on the combined confirmatory + exploratory dataset (the full dataset) (this is the approach we will take here)Here, when we look for a correlation within the full dataset, we fail to reject the null hypothesis. Accounting for other factors, in this model, there was no relationship between chance of survival and the fare that someone paid.
###Code
summary(lm(survived ~ fare + pclass + sex + age + sibsp, data=titanic.df))
###Output
_____no_output_____
###Markdown
So which is the case? Is there a relationship or not? In one sample, I fount support for my hypothesis. In the other, I did not. Which is right? My failure to reject the null hypothesis in the confirmatory dataset might be because there's no true relationship in the population, or it might be the luck of the draw. Cross-validation can also yield a false negative, where you miss an important relationship that is actually there. Imagine that the exploratory dataset didn't yield a statistically-significant relationship but there was genuinely a relationship in the population of Titanic passengers. And imagine that I hadn't gone on to test that hypothesis because it wasn't statistically-significant in the exploratory dataset. I might have missed an important relationship.In the case of the Titanic and the Upworthy Research Archive, we can't go back and increase the sample. If prediction were our goal, we could calculate root mean square error using the following techniques:* K-fold cross validation, where we create a series of exploratory sets and then test them against the confirmatory sets* Leave-one-out cross-validation, where we iterate through all possible samples and test the prediction against the left-out observationIn all of these cases, we would use measures like root mean square error to evaluate the goodness of fit of the model. Cross-Validation of a Hypothesis from the Upworthy Research ArchiveIn this example, I use the upper bound exploratory and confirmatory datasets of experiments naming notable people to:* develop a hypothesis with the exploratory dataset* test it with the full dataset
###Code
data_dir="../assignments/upworthy-archive-project/"
max.exp.df <- read.csv(paste(data_dir,"upworthy_archive_exploratory_max_effect_size_dataset.csv", sep=""))
max.conf.df <- read.csv(paste(data_dir,"upworthy_archive_confirmatory_max_effect_size_dataset.csv", sep=""))
headlines.df <- read.csv(paste(data_dir,"headlines.csv", sep=""))
###Output
_____no_output_____
###Markdown
Fit Exploratory ModelHere, I fit a fixed effects model on the hypothesis that including a notable person's name in a headline increases the chance that someone will click on the headline. In the upper bound dataset, across 40 tests, including a notable person's name in the headline increases the chance of someone clicking by 3 tenths of a percentage point.
###Code
summary(plm(clicked ~ has_treatment,
index="clickability_test_id",
model="within",
data=max.exp.df))
###Output
_____no_output_____
###Markdown
What does exploration look like for the Upworthy Research Archive?In this case, how might we adjust our model based on this exploratory data analysis, since we only have one predictor?* Maybe headline selection isn't precise enough: * If we don't have enough names in our list, maybe we're inadvertently comparing two headlines that both have notable names * Maybe the comparison headlines (without notable names) are very different in some other way that skews the results, for example have a different length or more positive/negative phrasing* Maybe there's a `mediating` factor, such as how famous the person is. Maybe we need to add another variable for how popular they were at the timeThese are all areas where further adjustment and exploration could improve the model or could lead to overfitting. Testing the Confirmatory HypothesisIn the next step, it's important to be very precise about what we are confirming. Options include:* a hypothesis about the existence of an effect (the hypothesis that has_treatment is statistically-significant)* a hypothesis about the sign of the effect (the hypothesis that has_treatment is greater than zero and statistically-significant)* a prediction about magnitude of the estimate (the hypothesis that has_treatment is at least X and statistically-significant)In this case, I'm choosing the following hypothesis test (which I did actually decide on before testing against the confirmatory dataset):**Hypothesis:** including a notable person's name in a headline as defined in `selecting_upworthy_archive_packages.py`, has an upper bound effect that is positive and statistically significant.Here, since we're doing meta-analysis and want to include all possible information, I'm *combining* the exploratory and confirmatory datasets into one final model.**Result:** the hypothesis is confirmed. On average across the upper-bound dataset of 297 A/B tests (and 2.1 million viewers), including a notable person's name in a headline increased the chance that someone clicked by 4 tenths of a percentage point, an effect that is positive and statistically-significant.
###Code
max.df <- rbind(max.exp.df, max.conf.df)
summary(plm(clicked ~ has_treatment,
index="clickability_test_id",
model="within",
data=max.df))
###Output
_____no_output_____
###Markdown
Other ways to cross-validateI used this technique because it is relatively straightforward and involves just expanding the available data. This is what I will be doing for the final.For meta-analysis, I'll be honest that I'm not (yet) sure how to conduct the other techniques, since the exploratory process is focused on defining variables and selecting the tests that go into the dataset.I'm not currently aware of anyone else who has used cross-validation to test hypotheses from a large dataset of experiments (though I bet this has been done in medicine).For correlational analyses like the Titanic example, k-fold and leave-one-subject-out are simpler because you can compare the Root Mean Square Error of a model with the key predictor (`fare`) to one without the predictor. If your preferre model is a better fit, then you uphold the finding from that model. Software EnvironmentThis example was generated using the following R configurations and libraries.
###Code
sessionInfo()
###Output
_____no_output_____ |
doc/source/auto_examples/02_general/plot_encode.ipynb | ###Markdown
Encode text into imageAn example of decode.encode. Start with the necessary imports--------------------------------
###Code
from os.path import join
import matplotlib.pyplot as plt
from nilearn import plotting
from gclda.model import Model
from gclda.decode import encode
from gclda.utils import get_resource_path
###Output
_____no_output_____
###Markdown
Load model----------------------------------
###Code
model_file = join(get_resource_path(), 'models/Neurosynth2015Filtered2',
'model_200topics_2015Filtered2_10000iters.pklz')
model = Model.load(model_file)
###Output
_____no_output_____
###Markdown
Encode text into image----------------------
###Code
text = 'painful stimulation during a language task'
text_img, topic_weights = encode(model, text)
###Output
_____no_output_____
###Markdown
Show encoded image---------------------
###Code
fig = plotting.plot_stat_map(text_img, display_mode='z',
threshold=0.00001,
cut_coords=[-2, 22, 44, 66])
###Output
_____no_output_____
###Markdown
Plot topic weights------------------
###Code
fig2, ax2 = plt.subplots()
ax2.plot(topic_weights)
ax2.set_xlabel('Topic #')
ax2.set_ylabel('Weight')
fig2.show()
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/Programming Exercise 8 - Anomaly Detection and Recommender Systems-checkpoint.ipynb | ###Markdown
Programming Exercise 8 - Anomaly Detection and Recommender Systems - [Anomaly Detection](Anomaly-Detection)- [Recommender Systems](Recommender-Systems)
###Code
# %load ../../../standard_import.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from sklearn.svm import OneClassSVM
from sklearn.covariance import EllipticEnvelope
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 150)
pd.set_option('display.max_seq_items', None)
#%config InlineBackend.figure_formats = {'pdf',}
%matplotlib inline
import seaborn as sns
sns.set_context('notebook')
sns.set_style('white')
###Output
_____no_output_____
###Markdown
Anomaly Detection
###Code
data1 = loadmat('data/ex8data1.mat')
data1.keys()
X1 = data1['X']
print('X1:', X1.shape)
plt.scatter(X1[:,0], X1[:,1], c='b', marker='x')
plt.title("Outlier detection")
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s)');
clf = EllipticEnvelope()
clf.fit(X1)
# Create the grid for plotting
xx, yy = np.meshgrid(np.linspace(0, 25, 200), np.linspace(0, 30, 200))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Calculate the decision function and use threshold to determine outliers
y_pred = clf.decision_function(X1).ravel()
percentile = 1.9
threshold = np.percentile(y_pred, percentile)
outliers = y_pred < threshold
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(14,5))
# Left plot
# Plot the decision function values
sns.distplot(y_pred, rug=True, ax=ax1)
# Plot the decision function values for the outliers in red
sns.distplot(y_pred[outliers], rug=True, hist=False, kde=False, norm_hist=True, color='r', ax=ax1)
ax1.vlines(threshold, 0, 0.9, colors='r', linestyles='dotted',
label='Threshold for {} percentile = {}'.format(percentile, np.round(threshold, 2)))
ax1.set_title('Distribution of Elliptic Envelope decision function values');
ax1.legend(loc='best')
# Right plot
# Plot the observations
ax2.scatter(X1[:,0], X1[:,1], c='b', marker='x')
# Plot outliers
ax2.scatter(X1[outliers][:,0], X1[outliers][:,1], c='r', marker='x', linewidths=2)
# Plot decision boundary based on threshold
ax2.contour(xx, yy, Z, levels=[threshold], linewidths=2, colors='red', linestyles='dotted')
ax2.set_title("Outlier detection")
ax2.set_xlabel('Latency (ms)')
ax2.set_ylabel('Throughput (mb/s)');
###Output
/home/ubuntu/anaconda3/lib/python3.6/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j
/home/ubuntu/anaconda3/lib/python3.6/site-packages/matplotlib/font_manager.py:1297: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
###Markdown
Recommender Systems
###Code
data2 = loadmat('data/ex8_movies.mat')
data2.keys()
Y = data2['Y']
R = data2['R']
print('Y:', Y.shape)
print('R:', R.shape)
Y
R
sns.heatmap(Y, yticklabels=False, xticklabels=False);
###Output
/home/ubuntu/anaconda3/lib/python3.6/site-packages/matplotlib/font_manager.py:1297: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
|
KMC_run_check.ipynb | ###Markdown
Check for rectangle overlaps
###Code
box = simulation_box(10,10)
constant = np.ones((50,))*10
constant2 = np.zeros((50,))
y = np.linspace(0,10)
x = np.linspace(0,10)
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
ax.plot(x, constant)
ax.plot(constant, y)
ax.plot(x, constant2)
ax.plot(constant2, y)
pos1 = np.array([0,0])
pos2 = np.array([5,9])
rect1 = rectangle(pos1, 3,2, 60)
rect2 = rectangle(pos2, 3,2, 120)
shifted_rect = shift_rectangle(rect1, rect2, box)
ax.scatter(rect1.vertices[:,0],rect1.vertices[:,1],c='r')
ax.scatter(rect2.vertices[:,0],rect2.vertices[:,1],c='b')
#ax.scatter(shifted_rect.vertices[:,0], shifted_rect.vertices[:,1])
check_overlap_rect(rect1, rect2, box)
###Output
_____no_output_____
###Markdown
Check for overlaps of points on a square lattice
###Code
a = 5
Lx = 100
Ly = 100
sl = AFP_square_lattice(a, Lx, Ly)
Nx = sl.Nx
Ny = sl.Ny
grids = sl.grids.reshape((Nx*Ny, 2))
pos_idx = np.array([5,5])
rect = rectangle(sl.grids[pos_idx[0],pos_idx[1]],20,30, 120)
overlap_indices = sl.square_offset(20,30,120)
nearby_idx = pos_idx + overlap_indices
nearby_pts = sl.grids[nearby_idx[:,0], nearby_idx[:,1]]
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
ax.scatter(rect.vertices[:,0], rect.vertices[:,1])
ax.scatter(grids[:,0],grids[:,1])
ax.scatter(nearby_pts[:,0], nearby_pts[:,1])
###Output
_____no_output_____
###Markdown
Check for events
###Code
a = 5
Lx = 300
Ly = 500
sl = AFP_square_lattice(a, Lx, Ly)
sys = AFP_system(sl, 30,50)
unbind = AFP_unbind(0.0002)
bind0 = AFP_bind0(2)
bind60 = AFP_bind60(2)
bind120 = AFP_bind120(2)
din = diffuse_in(0.001)
dout = diffuse_away(3.333)
events_list = [unbind, bind0, bind60, bind120, din, dout]
sim = AFP_sim(events_list, sys, verbose=True)
f = open("AFP.out","w")
f2 = open("rect.out","w")
f.close()
f2.close()
for i in range(100):
sim.update()
sim.step()
with open('AFP.out','a') as f:
f.write("{:.3f}\t".format(sim.t))
for i in range(len(sim.events)):
f.write("{:.3f}\t".format(sim.ev_R[i]))
for key in sim.system.num_dict:
f.write("{}\t".format(sim.system.num_dict[key]))
f.write("\n")
with open('rect.out','a') as f:
where0 = np.argwhere(sim.system.lattice.order_ == 2)
where60 = np.argwhere(sim.system.lattice.order_ == 3)
where120 = np.argwhere(sim.system.lattice.order_ == 4)
num_sites = len(where0) + len(where60) + len(where120)
grids = sim.system.lattice.grids
f.write("# TimeStep \t NumSites\n")
f.write("{:.3f}\t{}\n".format(sim.t, num_sites))
f.write("# pos.x \t pos.y \t Angle")
for i in range(len(where0)):
pos = grids[where0[i,0],where0[i,1]]
f.write("{:.3f}\t{:.3f}\t{:.3f}\n".format(pos[0],pos[1],0.0))
for i in range(len(where60)):
pos = grids[where60[i,0],where60[i,1]]
f.write("{:.3f}\t{:.3f}\t{:.3f}\n".format(pos[0],pos[1],60.0))
for i in range(len(where120)):
pos = grids[where120[i,0],where120[i,1]]
f.write("{:.3f}\t{:.3f}\t{:.3f}\n".format(pos[0],pos[1],120))
sim.system.lattice.order_.sum()
###Output
_____no_output_____
###Markdown
Check pymp
###Code
dat = read_dat_gen("AFP.out")
dat2 = open("rect.out")
dat2.readlines()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(dat[:,0],dat[:,-1],label='120')
ax.plot(dat[:,0],dat[:,-2],label='60')
#ax.plot(dat[:,0],(dat[:,-2] + dat[:,-1] + dat[:,-3])/100,label='60')
ax.plot(dat[:,0],dat[:,-3],label='0')
ax.legend()
ax.set_xlabel("ms")
ax.set_ylabel("# of proteins")
###Output
_____no_output_____ |
notebooks/01-PythonIntro/05. Modules.ipynb | ###Markdown
Modules One of the strengths of Python is that there are many built-in add-ons - or*modules* - which contain existing functions, classes, and variables which allow you to do complex tasks in only a few lines of code. In addition, there are many other third-party modules (e.g. Numpy, Scipy, Matplotlib, Astropy) that can be installed, and you can also develop your own modules that include functionalities you commonly use. The built-in modules are referred to as the *Standard Library*, and you canfind a full list of the available functionality in the [Python Documentation](http://docs.python.org/3/library/index.html). To use modules in your Python session or script, you need to **import** them. Thefollowing example shows how to import the built-in ``math`` module, whichcontains a number of useful mathematical functions:
###Code
import math
###Output
_____no_output_____
###Markdown
You can then access functions and other objects in the module with ``math.``, for example:
###Code
math.sin(2.3)
math.factorial(20)
math.pi
###Output
_____no_output_____
###Markdown
Because these modules exist, it means that if what you want to do is very common, it means it probably already exists, and you won't need to write it (making your code easier to read). For example, the ``numpy`` module contains useful functions for finding e.g. the mean, median, and standard deviation of a sequence of numbers:
###Code
import numpy as np
li = [1,2,7,3,1,3]
np.mean(li)
np.median(li)
np.std(li)
###Output
_____no_output_____
###Markdown
Notice that in the above case, we used: import numpy as np instead of: import numpy which shows that we can rename the module so that it's not as long to type in the program. Finally, it's also possible to simply import the functions needed directly:
###Code
from math import sin, cos
sin(3.4)
cos(3.4)
###Output
_____no_output_____
###Markdown
You may find examples on the internet that use e.g. from module import * but this is **not** recommended, because it will make it difficult to debug programs, since common debugging tools that rely on just looking at the programs will not know all the functions that are being imported. If you are not sure which module an object is coming from, you can inspect it.
###Code
import inspect
inspect.getmodule(sin)
###Output
_____no_output_____
###Markdown
Where to find modules and functions How do you know which modules exist in the first place? The Python documentation contains a [list of modules in the Standard Library](http://docs.python.org/3/library), but you can also simply search the web. Once you have a module that you think should contain the right kind of function, you can either look at the documentation for that module, or you can use the tab-completion in IPython: In [2]: math. math.acos math.degrees math.fsum math.pi math.acosh math.e math.gamma math.pow math.asin math.erf math.hypot math.radians math.asinh math.erfc math.isinf math.sin math.atan math.exp math.isnan math.sinh math.atan2 math.expm1 math.ldexp math.sqrt math.atanh math.fabs math.lgamma math.tan math.ceil math.factorial math.log math.tanh math.copysign math.floor math.log10 math.trunc math.cos math.fmod math.log1p math.cosh math.frexp math.modf Commonly used modules outside standard library - NumPy and Matplotlib There are many modules that are frequently used in astronomical data analysis. One of these modules, which has already been mentioned in this tutorial, is NumPy. NumPy provides an n-dimensional array object and routines for these objects (sorting, selecting, basic linear algebra and stats, among many others).The NumPy array is similar to the list data type in the sense that it acts as a container to store Python objects, but there are several reasons that you would want to use a numpy array over a list in scientific computing.1. NumPy arrays allow quick mathematical and other types of operations on large numbers of data. These operations are vectorized - absent of any explicit looping - in pre-compiled C code. For example, image convolution using 2D numpy ndarrays is significantly faster than looping over pixel values to do the computation.2. The NumPy modules has a large number of built in methods that operate on NumPy arrays. This makes code more consise and readable. For example, to calculate the standard deviation of a list of numbers in the absense of NumPy would require a block of code. With Numpy, it can be done in one line by calling the numpy.std() function. 2. Many existing python modules use NumPy arrays - it seems to be, logically, the default method of storing Python objects, particularly numerical data, in scientific computing. Another commonly used module is matplotlib that both allows for the creation of plots (histograms, scatter, etc.) quickly with single function calls, as well the option for a high level of customization. Let's use NumPy and matplotlib to show what can be done with a 2D image. First create a 2D image. The numpy `arange` function will give a 1D array of numbers between the upper and lower value specified. The `reshape` method on the array will reshape this 1D array into a 10x10 2D array.
###Code
import numpy as np
array_2d = np.array(np.arange(0,100).reshape(10,10))
print(array_2d)
###Output
_____no_output_____
###Markdown
We can visualize this array (or any other 2D image) with matplotlib. Let's show this array as a greyscale image, and add a colorbar and a title. Ignore the line beginning with '%' - this controls how output plots are displayed and will be discussed later in the tutorial.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(array_2d, cmap = 'Greys')
plt.colorbar()
plt.title('Test Image')
###Output
_____no_output_____
###Markdown
Let's say we'd like to edit a 3x3 box at the top right corner of this image. We can do this by indexing the 2d array and assigning that portion to a value -999. The convention for array indexing in Python is y,x.
###Code
array_2d[0:3, 0:3] = 100 #rows 0 through 3, columns 0 through 3 set to 100
plt.imshow(array_2d, cmap = 'Greys')
plt.colorbar()
plt.title('Test Image')
###Output
_____no_output_____ |
SOM_Color_Palette.ipynb | ###Markdown
IntroductionSimple Colour paletter problem has been used to demostrate dimensional reduction using SOM. Here the input is 3D vector which has been compressed to 2D (generally SOM are used to compress to two or three dimensions). This is represented on the SOM grid using colours to each 3D input and associating with approriate neuron (which activates the neuron the most) on 2D grid. This should get clear by looking at the MNIST handwritten problem or Simon Haykin Neural Network textbook problem on SOM (contextual map).
###Code
# Import Required Libraries
import numpy
from matplotlib import pyplot as plt
from copy import deepcopy
from matplotlib import patches as patches
###Output
_____no_output_____
###Markdown
DataGenerating color data. Which will be used for SOM. Raw data is represented by matrix $X$ which is 100x3 dimension, with each row representing RGB values. SOM Neural Network contains 2D 10x10 grid i.e. 100 neurons; weight matrix $W$ is 100x3 dimension and bias terms are zero. Also weight matirx must have Matrix $Index$ of 100x2 dimension associates neurons with 2D 10x10 grid. Activation function is linear activation function. SOM network grid and Raw data shape can be changed by changing the values in the Raw_Data_Shape and SOM_Network_Shape variables. SOM trains very well when data is normalsied. Also, the best-matching criterion, based on maximizing the inner product $\textbf{w}_{j}^{T}\textbf{x}$ is mathematically equivalent to minimizing the Euclidean distance between the vectors $\textbf{x}$ and $\textbf{w}_{j}$, provided that $\textbf{w}_{j}$ has unit length for all $j$. Hence normalise the raw data and initial guess weights to have unit lenght.
###Code
# Generate Data
Raw_Data_Shape = numpy.array([100, 3])
SOM_Network_Shape = numpy.array([20, 20])
X = numpy.random.randint(0, 256, (Raw_Data_Shape[0], Raw_Data_Shape[1]))
X_Norm = X/numpy.linalg.norm(X, axis=1).reshape(Raw_Data_Shape[0], 1)
W_Initial_Guess = numpy.random.uniform(0, 1, (SOM_Network_Shape[0]*SOM_Network_Shape[1], Raw_Data_Shape[1]))
W_Initial_Guess_Norm = W_Initial_Guess/numpy.linalg.norm(W_Initial_Guess, axis=1).reshape(SOM_Network_Shape[0]*SOM_Network_Shape[1], 1)
Index = numpy.mgrid[0:SOM_Network_Shape[0],0:SOM_Network_Shape[1]].reshape(2, SOM_Network_Shape[0]*SOM_Network_Shape[1]).T
###Output
_____no_output_____
###Markdown
Parameter Selection1 - Learning Rate: must not be allowed to decrease to zero otherwise, it is possible for the network to get stuck in a metastable state. A metastable state belongs to a configuration of the feature map with a topological defect.The exponential decay of learning rate guarantees against the possibility of metastable states. Learning Rate Decay:\begin{align*} \eta(epochs) = \eta_{0} \exp\left(\frac{-epochs}{\tau}\right) \end{align*}These desirable values are satisfied by the following choices in the formula $\eta_{0}$ = 0.1 and $\tau$ = 10002 - Varinace of the Gaussian neighbourhood function: Assuming the use of a two-dimensional lattice of neurons for the discrete map, set the initial size $\sigma_{0}$ of the neighborhood function equal to the “radius” of the lattice. Correspondingly, set the time constant $\tau$ as $\frac{1000}{log(\sigma_{0})}$. Variance Decay:\begin{align*} \sigma(epochs) = \sigma_{0} \exp\left(\frac{-epochs}{\tau}\right) \end{align*}3 - Maximum Epochs: Adaptation of the synaptic weights in the SOM network can be decomposed into two phases ordering/self-ordering phase followed by a convergence phase. It is during this first phase of the adaptive process that the topological ordering of the weight vectors takes place. The ordering phase may take as many as 1,000 epochs of the SOM algorithm, and possibly even more. Second phase of the adaptive process is needed to finetune the feature map and therefore provide an accurate statistical quantification of the input space. Moreover, the number of iterations needed for convergence depends strongly on the dimensionality of the input space. As a general rule, the number of iterations constituting the convergence phase must be at least 500 times the number of neurons in the network. Thus, the convergence phase may have to go on for thousands, and possibly even tens of thousands, of iterations.
###Code
# Parameters
Epoch = 0
Max_Epoch = 55000
eta_0 = 0.1
eta_time_const = 1000
sigma_0 = numpy.max(SOM_Network_Shape) * 0.5
sigma_time_const = 1000/numpy.log10(sigma_0)
# Required Functions
def winning_neuron(x, W):
# Also called as Best Matching Neuron/Best Matching Unit (BMU)
return numpy.argmin(numpy.linalg.norm(x - W, axis=1))
def update_weights(lr, var, x, W, Grid):
i = winning_neuron(x, W)
d = numpy.square(numpy.linalg.norm(Grid - Grid[i], axis=1))
# Topological Neighbourhood Function
h = numpy.exp(-d/(2 * var * var))
W = W + lr * h[:, numpy.newaxis] * (x - W)
return W
def decay_learning_rate(eta_initial, epoch, time_const):
return eta_initial * numpy.exp(-epoch/time_const)
def decay_variance(sigma_initial, epoch, time_const):
return sigma_initial * numpy.exp(-epoch/time_const)
# Main Loop
W_new = deepcopy(W_Initial_Guess_Norm)
eta = deepcopy(eta_0)
sigma = deepcopy(sigma_0)
while Epoch <= Max_Epoch:
# Update Weights
i = numpy.random.randint(0, Raw_Data_Shape[0])
W_new = update_weights(eta, sigma, X_Norm[i], W_new, Index)
# Print
# print('Epoch: ', Epoch, ' Learning Rate: ', eta, ' Varinance: ', sigma, '\n')
# Next...
eta = decay_learning_rate(eta_0, Epoch, eta_time_const)
sigma = decay_variance(sigma_0, Epoch, sigma_time_const)
Epoch += 1
print('Optimal Weights Reached!!!')
###Output
Optimal Weights Reached!!!
###Markdown
TestHere we show all the inputs to the SOM network and see which neuron gets activated and place our input over there. There can be some overlaps between inputs hence we find places where there are no inputs place and we find which input is closest and place there.
###Code
# Test
W_final = deepcopy(W_new)
Colour = numpy.zeros((SOM_Network_Shape[0]*SOM_Network_Shape[1], 3))
for i in range(0, Raw_Data_Shape[0]):
bmu = winning_neuron(X_Norm[i], W_final)
Colour[bmu] = X_Norm[i]
Zero_Pos = numpy.where(~Colour.any(axis=1))[0] # numpy.where(Colour[:, 0] == 0)[0]
for i in range(0, Zero_Pos.size):
temp = numpy.array([])
for j in range(0, Raw_Data_Shape[0]):
a = numpy.linalg.norm(X_Norm[j] - W_final[Zero_Pos[i]])
temp = numpy.concatenate((temp, [a]), axis=0)
bmu = numpy.argmin(temp)
Colour[Zero_Pos[i]] = X_Norm[bmu]
# Plot
fig = plt.figure()
# setup axes
ax = fig.add_subplot(111, aspect='equal')
ax.set_xlim((0, SOM_Network_Shape[0]))
ax.set_ylim((0, SOM_Network_Shape[1]))
ax.set_title('Self-Organising Map after %d iterations' % Max_Epoch)
# plot the rectangles
i = 0
for x in range(0, SOM_Network_Shape[0]):
for y in range(0, SOM_Network_Shape[1]):
ax.add_patch(patches.Rectangle((x, y), 1, 1, facecolor=Colour[i], edgecolor='none'))
i += 1
plt.savefig('Self-Organising Map.pdf')
plt.show()
###Output
_____no_output_____ |
4 - Rational Approximations to Quasiperiodically forced pendulum.ipynb | ###Markdown
4 - Rational Approximations to Quasiperiodically forced pendulumI want to understand the bifurcation diagram of the system$$ \frac{dX}{dt} + \sin(X) = a + b_1 \sin(\omega t) + b_2 \sin(\frac{p}{q} \omega t + \phi_0) $$ Specifically, I want to understand the nature of attractors with everything fixed except the initial phase shift.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.integrate
from numba import jit,njit
###Output
_____no_output_____
###Markdown
Integrator
###Code
@njit
def f(phi):
'''
Current-phase relationship for the junction
'''
return np.sin(phi)
@njit
def current( t, a, b1, b2, Omega, eta, phi_0):
'''
Current applied to the junction
'''
return a + b1 * np.sin(Omega*t) + b2 * np.sin(eta*Omega*t + phi_0)
@njit
def dy_dt(y, t, a, b1, b2, Omega, eta, phi_0):
'''
y = phi
dphi_dt = -sin(phi) + a + b1 * sin(Omega*t) + b2 * sin(eta*Omega*t + phi_0)
'''
return -f(y) + current(t,a,b1,b2,Omega,eta,phi_0)
def integrate(params):
periods = params['periods']
points_per_period = params['points_per_period']
num_points = points_per_period*periods
t_vec = np.linspace(0,periods*2*np.pi/params['Omega'],num_points)
y_0 = params['y_0']
y_vec = scipy.integrate.odeint(dy_dt,y_0,t_vec,args=(params['a'],params['b1'],params['b2'],params['Omega'],params['eta'],params['phi_0']))
return y_vec[:,0],t_vec
def calc_voltage(y_vec,t_vec,params):
'''
Calculate voltage by averaging the phase velocity
Normalized so that steps are integers.
'''
dy_dt_vec = np.gradient(y_vec,t_vec)
T = t_vec[-1] - t_vec[0]
voltage = scipy.integrate.simps(dy_dt_vec,t_vec)/(T*params['Omega'])
return voltage
###Output
_____no_output_____
###Markdown
Solution at a single point
###Code
params = {
'Omega' : 1,
'periods' : 1000,
'points_per_period' : 500,
'y_0' : 0,
'a' : 0.5,
'b1' : 2,
'b2' : 2,
'eta' : 2,
'phi_0' : 0,
}
y_vec,t_vec = integrate(params)
dy_dt_vec = np.gradient(y_vec,t_vec)
voltage = calc_voltage(y_vec,t_vec,params)
fig,ax = plt.subplots(1,2,figsize=(12,4))
ax[0].plot(t_vec/(2*np.pi/params['Omega']),y_vec/(2*np.pi),'b')
ax[0].set_xlabel(r"Time $\left( \frac{2\pi}{\Omega} \right)$",fontsize=14)
ax[0].set_ylabel(r"$\frac{\phi}{2\pi}$",fontsize=20)
ax[0].grid(True)
ax[1].plot(t_vec/(2*np.pi/params['Omega']),dy_dt_vec,'r')
ax[1].set_xlabel(r"Time $\left( \frac{2\pi}{\Omega} \right)$",fontsize=14)
ax[1].set_ylabel(r"$\frac{d\phi}{dt}$",fontsize=20)
ax[1].grid(True)
plt.tight_layout()
print('Voltage',voltage)
###Output
Voltage 0.6185285055848794
###Markdown
Poincare Map
###Code
points_per_period = params['points_per_period']
eta = params['eta']
omega = params['Omega']
phi1_samp = y_vec[::points_per_period]
phi2_samp = eta*omega*t_vec[::points_per_period]
phi1_wrap = np.arctan2(np.sin(phi1_samp),np.cos(phi1_samp))
phi2_wrap = np.arctan2(np.sin(phi2_samp),np.cos(phi2_samp))
%matplotlib inline
plt.figure(dpi=100)
# number of points to skip to avoid seeing the transient
init_skip = 10
plt.scatter(phi1_wrap[init_skip:]/np.pi,phi2_wrap[init_skip:]/np.pi,color='goldenrod',s=10,marker='.')
plt.xlabel(r"$\phi_1/\pi$",fontsize=16)
plt.ylabel(r"$\phi_2/\pi$",fontsize=16)
ax = plt.gca()
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
plt.ylim([-1,1])
plt.xlim([-1,1])
###Output
_____no_output_____
###Markdown
Attractor for different $\phi_0$
###Code
phi_0_vec = np.linspace(0,np.pi,10)
params = {
'Omega' : 1,
'periods' : 1000,
'points_per_period' : 500,
'y_0' : 0,
'a' : 0.0,
'b1' : 2.7,
'b2' : 2.7,
'eta' : 1.618,
}
points_per_period = params['points_per_period']
eta = params['eta']
omega = params['Omega']
%matplotlib inline
plt.figure(dpi=100)
for ind,phi_0 in enumerate(phi_0_vec):
params['phi_0'] = phi_0
y_vec,t_vec = integrate(params)
dy_dt_vec = np.gradient(y_vec,t_vec)
phi1_samp = y_vec[::points_per_period]
phi2_samp = eta*omega*t_vec[::points_per_period]
phi1_wrap = np.arctan2(np.sin(phi1_samp),np.cos(phi1_samp))
phi2_wrap = np.arctan2(np.sin(phi2_samp),np.cos(phi2_samp))
# number of points to skip to avoid seeing the transient
init_skip = 10
plt.scatter(phi1_wrap[init_skip:]/np.pi,phi2_wrap[init_skip:]/np.pi, s=10,marker='.')
plt.xlabel(r"$\phi_1/\pi$",fontsize=16)
plt.ylabel(r"$\phi_2/\pi$",fontsize=16)
ax = plt.gca()
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
plt.ylim([-1,1])
plt.xlim([-1,1])
###Output
_____no_output_____
###Markdown
Voltage vs $\phi_0$
###Code
phi_0_vec = np.linspace(-np.pi,np.pi,250)
params = {
'Omega' : 1,
'periods' : 250,
'points_per_period' : 200,
'y_0' : 0,
'a' : 0.0,
'b1' : 2.8,
'b2' : 2.8,
'eta' :1.5,
}
voltage_vec = np.zeros(len(phi_0_vec))
for ind,phi_0 in enumerate(phi_0_vec):
params['phi_0'] = phi_0
y_vec,t_vec = integrate(params)
dy_dt_vec = np.gradient(y_vec,t_vec)
voltage = calc_voltage(y_vec,t_vec,params)
voltage_vec[ind] = voltage
%matplotlib inline
plt.figure(dpi=100)
plt.plot(phi_0_vec,voltage_vec,color='b')
plt.xlabel(r"$\phi_0$",fontsize=16)
plt.ylabel(r"$V$",fontsize=16)
ax = plt.gca()
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
#plt.ylim([-0.7,0.7])
###Output
_____no_output_____ |
process_video_low_frequency_frame.ipynb | ###Markdown
Process Videos
###Code
# From Python
# It requires OpenCV installed for Python
import sys
import csv
import cv2
import os
from sys import platform
import argparse
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
import numpy as np
import math
from scipy.stats import mode
import time
import pdb
from IPython.core.debugger import Tracer
# Remember to add your installation path here
# Option b
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
sys.path.insert(0,r'/home/lingheng/openpose_python_lib/python/openpose')
# Parameters for OpenPose. Take a look at C++ OpenPose example for meaning of components. Ensure all below are filled
try:
from openpose import *
except:
raise Exception('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
NET_RESOLUTION = 736#368
CALCULATE_EVERY_X_FRAME = 3
MODEL_POSE = "COCO"#"COCO" #"MPI" #"BODY_25" #"MPI_4_layers"
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x{}".format(NET_RESOLUTION) # if crop video, this should be changged and must be mutplies of 16.
params["model_pose"] = MODEL_POSE
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.25
params["scale_number"] = 4
params["render_threshold"] = 0.05
# If GPU version is built, and multiple GPUs are available, set the ID here
params["num_gpu_start"] = 0
params["disable_blending"] = False
# Ensure you point to the correct path where models are located
params["default_model_folder"] = "/home/lingheng/openpose/models/"
# Construct OpenPose object allocates GPU memory
openpose = OpenPose(params)
args = dict()
args['video']='/home/lingheng/project/lingheng/ROM_Video_Process/ROM_raw_videos_clips/Sep_12/Camera1_Sep_12_1300_1400_Parameterized_Learning_Agent_Lingheng_0.mp4'
camera_index = args['video'].split('Camera')[1].split('_')[0]
camera_index == 1
# if __name__ == "__main__":
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-v", "--video", default='/home/lingheng/project/lingheng/ROM_raw_videos/Camera1_test.mp4', help="path to the video file")
# ap.add_argument("-o", "--output_directory", default='/home/lingheng/project/lingheng/ROM_processed_videos', help="directory to save processed video")
# args = vars(ap.parse_args())
# construct the argument parser and parse the arguments
args = dict()
args['video']='/home/lingheng/project/lingheng/ROM_Video_Process/ROM_raw_videos_clips/Sep_12/Camera1_Sep_12_1300_1400_Parameterized_Learning_Agent_Lingheng_0.mp4'
args['output_directory']='/home/lingheng/project/lingheng/ROM_Video_Process/ROM_raw_videos_clips_processed/Sep_12'
if args.get("video", None) is None:
raise Error("No input video!!")
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
########################################################################
# Estimate Occupancy #
########################################################################
# frames per second (fps) in the raw video
fps = camera.get(cv2.CAP_PROP_FPS)
frame_count = 1
print("Raw frames per second: {0}".format(fps))
# prepare to save video
(grabbed, frame) = camera.read()
## downsample frame
#downsample_rate = 0.5
#frame = cv2.resize(frame,None,fx=downsample_rate, fy=downsample_rate, interpolation = cv2.INTER_LINEAR)
# Crop videos from Camera1 or Camera2
camera_index = int(args['video'].split('Camera')[1].split('_')[0])
original_h, original_w, channels= frame.shape
if camera_index == 1:
# crop frame: Camera1
top_edge = int(original_h*(1/10))
down_edge = int(original_h*1)
left_edge = int(original_w*(1/5))
right_edge = int(original_w*(4/5))
elif camera_index == 2:
# TODO: crop frame: Camera2
top_edge = int(original_h*(1/10))
down_edge = int(original_h*(4/5))
left_edge = int(original_w*(2.5/5))
right_edge = int(original_w*(1))
else:
# crop frame: test video
top_edge = int(original_h*(1/10))
down_edge = int(original_h*1)
left_edge = int(original_w*(1/5))
right_edge = int(original_w*(4/5))
print('Crop: Video not from Camera1 or Camera2!')
frame_cropped = frame[top_edge:down_edge,left_edge:right_edge,:].copy() # must use copy(), otherwise slice only return address i.e. not hard copy
cropped_h, cropped_w, channels = frame_cropped.shape
fwidth = cropped_w
fheight = cropped_h
print("Frame width:{}, Frame height:{}.".format(cropped_w , cropped_h))
# Define the polygon of Core Interest Area for videos from Camera1 or Camera2
if camera_index == 1:
# polygon for Camera1
point_1 = [int(0.17 * cropped_w), int(0.20 * cropped_h)]
point_2 = [int(0.17 * cropped_w), int(0.62 * cropped_h)]
point_3 = [int(0.44 * cropped_w), int(0.82 * cropped_h)]
point_4 = [int(0.61 * cropped_w), int(0.72 * cropped_h)]
point_5 = [int(0.61 * cropped_w), int(0.20 * cropped_h)]
core_interest_area_polygon = np.array([point_1,point_2,point_3,point_4,point_5])
elif camera_index == 2:
# polygon for Camera2
point_1 = [int(0.15 * cropped_w), int(0.05 * cropped_h)]
point_2 = [int(0.15 * cropped_w), int(0.65 * cropped_h)]
point_3 = [int(0.95 * cropped_w), int(0.75 * cropped_h)]
point_4 = [int(0.95 * cropped_w), int(0.05 * cropped_h)]
core_interest_area_polygon = np.array([point_1,point_2,point_3,point_4])
else:
# polygon for test video
point_1 = [int(0.17 * cropped_w), int(0.20 * cropped_h)]
point_2 = [int(0.17 * cropped_w), int(0.62 * cropped_h)]
point_3 = [int(0.44 * cropped_w), int(0.82 * cropped_h)]
point_4 = [int(0.61 * cropped_w), int(0.72 * cropped_h)]
point_5 = [int(0.61 * cropped_w), int(0.20 * cropped_h)]
print('Polygon: Video not from Camera1 or Camera2!')
core_interest_area_polygon = np.array([point_1,point_2,point_3,point_4,point_5])
# get output video file name
file_path = args["video"].split('/')
file_name, _= file_path[-1].split('.')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if not os.path.exists(args['output_directory']):
os.makedirs(args['output_directory'])
output_video_filename = os.path.join(args['output_directory'],'{}_processed_{}_{}_{}.avi'.format(file_name,params["model_pose"],NET_RESOLUTION,CALCULATE_EVERY_X_FRAME))
out_camera_frame_whole = cv2.VideoWriter(output_video_filename,fourcc, fps, (fwidth,fheight))
# get output estimated occupancy file name
out_occupancy_whole = os.path.join(args['output_directory'],'{}_processed_occupancy_whole_{}_{}_{}.csv'.format(file_name,params["model_pose"],NET_RESOLUTION,CALCULATE_EVERY_X_FRAME))
out_occupancy_core = os.path.join(args['output_directory'],'{}_processed_occupancy_core_{}_{}_{}.csv'.format(file_name,params["model_pose"],NET_RESOLUTION,CALCULATE_EVERY_X_FRAME))
out_occupancy_margin = os.path.join(args['output_directory'],'{}_processed_occupancy_margin_{}_{}_{}.csv'.format(file_name,params["model_pose"],NET_RESOLUTION,CALCULATE_EVERY_X_FRAME))
with open(out_occupancy_whole, 'a') as csv_datafile:
fieldnames = ['Time', 'Occupancy']
writer = csv.DictWriter(csv_datafile, fieldnames = fieldnames)
writer.writeheader()
with open(out_occupancy_core, 'a') as csv_datafile:
fieldnames = ['Time', 'Occupancy']
writer = csv.DictWriter(csv_datafile, fieldnames = fieldnames)
writer.writeheader()
with open(out_occupancy_margin, 'a') as csv_datafile:
fieldnames = ['Time', 'Occupancy']
writer = csv.DictWriter(csv_datafile, fieldnames = fieldnames)
writer.writeheader()
# loop over the frames of the video
total_frame_number = camera.get(cv2.CAP_PROP_FRAME_COUNT)
print('Total frame number: {}'.format(total_frame_number))
start_time = time.time()
ignore_frame_count = CALCULATE_EVERY_X_FRAME
for frame_count in range(int(total_frame_number)):
if frame_count % 200 == 0:
print('Processing frame: {}'.format(frame_count))
print('Elapsed time: {}s'.format(time.time() - start_time))
(grabbed, frame) = camera.read()
# TODO: it's not necessary to process every frame.
# Observation is received in 10hz i.e. each observation takes 100millisecond.
# Each frame take 33millisecond, so we could estimate occupancy every 3 frame.
if ignore_frame_count == CALCULATE_EVERY_X_FRAME:
ignore_frame_count = 1
else:
ignore_frame_count += 1
continue
if grabbed == True:
frame_time = camera.get(cv2.CAP_PROP_POS_MSEC) #Current position of the video file in milliseconds.
## downsample frame
#frame = cv2.resize(frame,None,fx=downsample_rate, fy=downsample_rate, interpolation = cv2.INTER_LINEAR)
# crop frame
frame_cropped = frame[top_edge:down_edge,left_edge:right_edge,:].copy() # must use copy()
# 1. Whole Interest Area
# Output keypoints and the image with the human skeleton blended on it
# (num_people, 25_keypoints, x_y_confidence) = keypoints_whole_interest_area.shape
keypoints_whole_interest_area, output_image_whole_interest_area = openpose.forward(frame_cropped, True)
# 2. Core Interest Area
core_interest_area_mask = np.zeros(frame_cropped.shape[:2], np.uint8)
cv2.drawContours(core_interest_area_mask, [core_interest_area_polygon], -1, (255, 255, 255), -1, cv2.LINE_AA)
core_interest_area = cv2.bitwise_and(output_image_whole_interest_area, frame_cropped, mask=core_interest_area_mask)
# 3. Margin Interest Area
margin_interest_area = cv2.bitwise_xor(output_image_whole_interest_area, core_interest_area)
# TODO: infer occupancy from "keypoints_whole_interest_area"
# draw the text and timestamp on the frame
occupancy_whole = keypoints_whole_interest_area.shape[0]
occupancy_core = 0
occupancy_margin = 0
for people in keypoints_whole_interest_area:
# Sort all keypoints and pick up the one with the highest confidence
# Meaning of keypoints (https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/output.md)
ordered_keypoints = people[people[:,2].argsort(),:] # increasing order
x, y = ordered_keypoints[-1][:2]
#pdb.set_trace()
# Choose the one with higher confidence to calculatate occupancy and location
if cv2.pointPolygonTest(core_interest_area_polygon, (x, y), False) == 1:
occupancy_core += 1
else:
occupancy_margin += 1
cv2.drawContours(output_image_whole_interest_area, [core_interest_area_polygon], -1, (255, 255, 0), 2, cv2.LINE_AA)
cv2.putText(output_image_whole_interest_area, "Whole Occupancy: {}, Core Occupancy: {}, Margin Occupancy: {}".format(occupancy_whole, occupancy_core, occupancy_margin), (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.putText(core_interest_area, "Core Occupancy: {}".format(occupancy_core), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.putText(margin_interest_area, "Margin Occupancy: {}".format(occupancy_margin), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
# save estimated occupancy data
fieldnames = ['Time', 'Occupancy']
with open(out_occupancy_whole, 'a') as csv_datafile:
writer = csv.DictWriter(csv_datafile, fieldnames = fieldnames)
writer.writerow({'Time':frame_time, 'Occupancy': occupancy_whole})
with open(out_occupancy_core, 'a') as csv_datafile:
writer = csv.DictWriter(csv_datafile, fieldnames = fieldnames)
writer.writerow({'Time':frame_time, 'Occupancy': occupancy_core})
with open(out_occupancy_margin, 'a') as csv_datafile:
writer = csv.DictWriter(csv_datafile, fieldnames = fieldnames)
writer.writerow({'Time':frame_time, 'Occupancy': occupancy_margin})
# save processed videos
out_camera_frame_whole.write(output_image_whole_interest_area)
else:
# Pass this frame if cannot grab an image.
print('Frame: {}, grabbed={} and frame={}'.format(frame_count, grabbed, frame))
def subplot_estimated_occupancy(occupancy_whole, occupancy_core, occupancy_margin, fig_filename, smooth_flag = False):
"""
Plot and save estimated occupancy in Three Interest Area.
Args:
occupancy_whole (pd.DataFrame): occupancy in Whole Interest Area
occupancy_core (pd.DataFrame): occupancy in Core Interest Area
occupancy_margin (pd.DataFrame): occupancy in Margin Interest Area
fig_filename (string): filename of the saved figure
smooth_flag (bool): indicates whether the occupancy is smoothened
"""
ymin = 0
ymax = 20
ystep = 4
lw=1.5
plt.figure()
# Whole Interest Area
plt.subplot(3, 1, 1)
plt.plot(occupancy_whole['Time']/1000, occupancy_whole['Occupancy'], 'b-', lw, alpha=0.6)
plt.xlabel('time/second')
plt.ylabel('# of visitors')
plt.ylim(ymin, ymax)
plt.yticks(np.arange(ymin,ymax,ystep))
if smooth_flag == False:
plt.title('Estimated # of visitors in Whole Interest Area')
else:
plt.title('Smooth Estimated # of visitors in Whole Interest Area')
plt.grid(True, linestyle=':')
# Core Interest Area
plt.subplot(3, 1, 2)
plt.plot(occupancy_core['Time']/1000, occupancy_core['Occupancy'], 'r-', lw, alpha=0.6)
plt.xlabel('time/second')
plt.ylabel('# of visitors')
plt.ylim(ymin, ymax)
plt.yticks(np.arange(ymin,ymax,ystep))
plt.title('Estimated # of visitors in Core Interest Area')
if smooth_flag == False:
plt.title('Estimated # of visitors in Core Interest Area')
else:
plt.title('Smooth Estimated # of visitors in Core Interest Area')
plt.grid(True, linestyle=':')
# Margin Interest Area
plt.subplot(3, 1, 3)
plt.plot(occupancy_margin['Time']/1000, occupancy_margin['Occupancy'], 'g-', lw, alpha=0.6)
plt.xlabel('time/second')
plt.ylabel('# of visitors')
plt.ylim(ymin, ymax)
plt.yticks(np.arange(ymin,ymax,ystep))
if smooth_flag == False:
plt.title('Estimated # of visitors in Margin Interest Area')
else:
plt.title('Smooth Estimated # of visitors in Margin Interest Area')
plt.grid(True, linestyle=':')
plt.tight_layout()
plt.savefig(fig_filename, dpi = 300)
def plot_estimated_occupancy(occupancy_whole, occupancy_core, occupancy_margin, fig_filename, smooth_flag = False):
"""
Args:
smooth_flag (bool): indicates whether the occupancy is smoothened
"""
ymin=0
ymax=20
ystep=4
plt.figure()
# Whole Interest Area
plt.plot(occupancy_whole['Time']/1000, occupancy_whole['Occupancy'], 'r-', lw=1.5, alpha=0.6)
# Core Interest Area
plt.plot(occupancy_core['Time']/1000, occupancy_core['Occupancy'], 'g-', lw=1.5, alpha=0.6)
# Margin Interest Area
plt.plot(occupancy_margin['Time']/1000, occupancy_margin['Occupancy'], 'b-', lw=1.5, alpha=0.6)
plt.legend(('Whole Interest Area','Core Interest Area','Margin Interest Area'))
plt.xlabel('time/second')
plt.ylabel('# of visitors')
plt.ylim(ymin, ymax, ystep)
if smooth_flag == False:
plt.title('Estimated # of visitors in Three Interest Areas')
else:
plt.title('Smooth Estimated # of visitors in Three Interest Areas')
plt.grid(True, linestyle=':')
plt.tight_layout()
plt.savefig(fig_filename, dpi = 300)
def moving_smoothing(values, window_size, smooth_type='mode', stride = 1):
"""
Smoothen estimated occupancy.
Args:
values (pandas.DataFrame):
values['Time']: time in millisecond
values['Occupancy']: estimated # of visitors
window_size(int): the size of sliding window
smooth_type (string):
1. 'mode'
2. 'mean'
3. 'min'
4. 'median'
stride (int): the stride between two consecutive windows
Returns:
smooth_time (np.array): smooth time i.e. the max time in each window
smooth_occupancy (np.array): smooth occupancy i.e. the mode occupancy in each window
"""
group_time = []
group_occupancy = []
for i in range(0, math.ceil((len(values['Time'])-window_size+1)/stride)):
group_time.append(values['Time'][i:i+window_size])
group_occupancy.append(values['Occupancy'][i:i+window_size])
smooth_time = []
smooth_occupancy = []
for i in range(len(group_time)):
smooth_time.append(min(group_time[i])) # max time in the group
if smooth_type == 'mode':
smooth_occupancy.append(mode(group_occupancy[i])[0][0]) # mode occupancy in the group
elif smooth_type == 'mean':
smooth_occupancy.append(np.round(np.mean(group_occupancy[i])))
#smooth_occupancy.append(np.mean(group_occupancy[i]))
elif smooth_type == 'min':
smooth_occupancy.append(np.round(np.min(group_occupancy[i])))
#smooth_occupancy.append(np.min(group_occupancy[i]))
elif smooth_type == 'median':
smooth_occupancy.append(np.round(np.median(group_occupancy[i])))
#smooth_occupancy.append(np.median(group_occupancy[i]))
else:
print('Please choose a proper smooth_type.')
smooth_values = pd.DataFrame(data={'Time': np.array(smooth_time),
'Occupancy': np.array(smooth_occupancy,dtype=int)})
return smooth_values#np.array(smooth_time), np.array(smooth_occupancy)
def interpret_senario(occupancy_whole, occupancy_core, occupancy_margin, senarios_truth_table):
"""
Args:
occupancy_whole (pd.DataFrame): estimation of coccupancy in whole intrest area
occupancy_core (pd.DataFrame): estimation of coccupancy in core intrest area
occupancy_margin (pd.DataFrame): estimation of coccupancy in margin intrest area
senarios_truth_table (pandas.DataFrame): senarios truth table which has information on
how to interpret senario.
Returns:
senario_sequence (np.array): sequnce of interpreted senario discription according to "Senario Truth Value Table"
event_sequence (np.array): sequence of interpreted senario code according to "Senario Truth Value Table"
Note: Different from "Senario Truth Value Table", in this sequence we convert all impossible cases into 0 rather than their original senario code.
event_time (np.array): the time of each event in millisecond.
"""
senario_sequence = []
event_sequence = []
event_time = []
for i in range(len(occupancy_whole['Occupancy'])-1):
change_x = occupancy_core['Occupancy'][i+1] - occupancy_core['Occupancy'][i]
change_y = occupancy_margin['Occupancy'][i+1] - occupancy_margin['Occupancy'][i]
change_z = occupancy_whole['Occupancy'][i+1] - occupancy_whole['Occupancy'][i]
# code:
# 0: hold
# 1: increase
# 2: decrease
if change_x == 0:
x = 0
elif change_x > 0:
x = 1
elif change_x < 0:
x = 2
if change_y == 0:
y = 0
elif change_y > 0:
y = 1
elif change_y < 0:
y = 2
if change_z == 0:
z = 0
elif change_z > 0:
z = 1
elif change_z < 0:
z = 2
# convert ternary to decimal
senario_index = z + y*3 + x*3^2
senario_sequence.append(senarios_truth_table['Explanation'][senario_index])
if senarios_truth_table['Truth value'][senario_index] == 0:
# convert all impossible cases into 0
event_sequence.append(0)
#event_sequence.append(senario_index)
else:
event_sequence.append(senario_index)
event_time.append(occupancy_whole['Time'][i])
return np.array(senario_sequence), np.array(event_sequence), np.array(event_time)
def plot_detected_interesting_event(senario_sequence, event_sequence, event_time, fig_filename):
ymin = 0
ymax = 26.0005
ystep = 1
plt.figure(figsize=(10, 6))
plt.scatter(event_time/1000, event_sequence)
plt.xlabel('time/second')
plt.ylabel('Event Description')
plt.ylim(ymin, ymax)
plt.yticks(np.arange(ymin,ymax,ystep), senarios_truth_table['Explanation'],
rotation=45, fontsize = 6)
ax2 = plt.twinx()
plt.ylabel('Event Code')
plt.yticks(np.arange(ymin,ymax,ystep), np.arange(ymin,ymax,ystep))
plt.title('Detected Interesting Events')
plt.grid(True, linestyle=':')
plt.tight_layout()
plt.savefig(fig_filename, dpi = 300)
def tag_interesting_event_description_on_video(video_filename,
smooth_type, window_size, stride,
senario_sequence, event_sequence, event_time):
"""
Args:
video_filename (string): filename of video
smooth_type (string): smooth type (hyper-parameter of smooth method)
window_size (int): size of smooth window (hyper-parameter of smooth method)
stride (int): stride size (hyper-parameter of smooth method)
senario_sequence (np.array): sequnce of interpreted senario discription according to "Senario Truth Value Table"
event_sequence (np.array): sequence of interpreted senario code according to "Senario Truth Value Table"
Note: Different from "Senario Truth Value Table", in this sequence we convert all impossible cases into 0 rather than their original senario code.
event_time (np.array): the time of each event in millisecond.
"""
camera = cv2.VideoCapture(video_filename)
(grabbed, frame) = camera.read()
fheight, fwidth, channels= frame.shape
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out_tagged_camera_frame = cv2.VideoWriter(video_filename.split('.avi')[0]+'_tagged_smooth_type_{}_window_size_{}_stride_{}.avi'.format(smooth_type,window_size,stride),fourcc, camera.get(cv2.CAP_PROP_FPS), (fwidth,fheight))
# loop over the frames of the video
total_frame_number = camera.get(cv2.CAP_PROP_FRAME_COUNT)
max_line_character_num = 60 # 60 characters each line
detected_event_time = 0
detected_event_senario = ''
line_num = 1
for frame_count in range(len(event_time)):
if frame_count % 200 == 0:
print('Processing frame: {}'.format(frame_count))
(grabbed, frame) = camera.read()
if grabbed == True:
cv2.putText(frame, "smooth_type: {}, window_size: {}, stride: {}.".format(smooth_type,window_size,stride), (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
time = camera.get(cv2.CAP_PROP_POS_MSEC) #Current position of the video file in milliseconds.
event_index = frame_count
if event_sequence[event_index] != 0: # 0 means 'impossible event'
detected_event_time = time
detected_event_senario = senario_sequence[event_index]
cv2.putText(frame, "Detect Interesting Event at: {}s.".format(int(detected_event_time/1000)), (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
line_num = np.ceil(len(detected_event_senario)/max_line_character_num)
for i in range(int(line_num)):
if i < line_num:
cv2.putText(frame, "{}".format(detected_event_senario[i*max_line_character_num:(i+1)*max_line_character_num]), (10, 180+30*(i)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
else:
cv2.putText(frame, "{}".format(detected_event_senario[i*max_line_character_num:end]), (10, 180+30*(i)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
else: # repeat text from last detected event
cv2.putText(frame, "Detect Interesting Event at:{}s".format(int(detected_event_time/1000)), (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
for i in range(int(line_num)):
if i < line_num:
cv2.putText(frame, "{}".format(detected_event_senario[i*max_line_character_num:(i+1)*max_line_character_num]), (10, 180+30*(i)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
else:
cv2.putText(frame, "{}".format(detected_event_senario[i*max_line_character_num:end]), (10, 180+30*(i)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
# save processed videos
out_tagged_camera_frame.write(frame)
else:
# Pass this frame if cannot grab an image.
print('Frame: {}, grabbed={} and frame={}'.format(frame_count, grabbed, frame))
########################################################################
# Smoothen Estimated Occupancy, then detect interesting event #
########################################################################
# read estimated occupancy in Three Interest Areas
occupancy_whole = pd.read_csv(out_occupancy_whole)
occupancy_core = pd.read_csv(out_occupancy_core)
occupancy_margin = pd.read_csv(out_occupancy_margin)
# save plot of estimated occupancy in Three Interest Areas
fig_filename = os.path.join(args['output_directory'], '{}_Subplot_Estimated_Occupancy.png'.format(file_name))
subplot_estimated_occupancy(occupancy_whole, occupancy_core, occupancy_margin, fig_filename)
fig_filename = os.path.join(args['output_directory'], '{}_Plot_Estimated_Occupancy.png'.format(file_name))
plot_estimated_occupancy(occupancy_whole, occupancy_core, occupancy_margin, fig_filename)
# smoothen
window_size = 1
smooth_type='mean'
stride = 1
smooth_occupancy_whole = moving_smoothing(occupancy_whole, window_size, smooth_type)
smooth_occupancy_core = moving_smoothing(occupancy_core, window_size, smooth_type)
smooth_occupancy_margin = moving_smoothing(occupancy_margin, window_size, smooth_type)
fig_filename = os.path.join(args['output_directory'], '{}_Subplot_Smooth_Estimated_Occupancy.png'.format(file_name))
subplot_estimated_occupancy(smooth_occupancy_whole,
smooth_occupancy_core,
smooth_occupancy_margin,
fig_filename,
smooth_flag = True)
fig_filename = os.path.join(args['output_directory'], '{}_Plot_Smooth_Estimated_Occupancy.png'.format(file_name))
plot_estimated_occupancy(smooth_occupancy_whole,
smooth_occupancy_core,
smooth_occupancy_margin,
fig_filename,
smooth_flag = True)
# load Senario Truth Table
senarios_truth_table = pd.read_csv('analize_visitor_in_and_out_senario_truth_table.csv')
# Interpret
senario_sequence, event_sequence, event_time = interpret_senario(smooth_occupancy_core,
smooth_occupancy_margin,
smooth_occupancy_whole,
senarios_truth_table)
# Plot interesting events
fig_filename = os.path.join(args['output_directory'], '{}_Plot_Interesting_Event_smooth_type_{}_window_size_{}_stride{}'.format(file_name, smooth_type, window_size, stride))
plot_detected_interesting_event(senario_sequence, event_sequence, event_time, fig_filename)
# Tag
tag_interesting_event_description_on_video(output_video_filename,
smooth_type, window_size, stride,
senario_sequence, event_sequence, event_time)
###Output
_____no_output_____ |
Sparks1.ipynb | ###Markdown
TSF GRIP TASK 1 :-
###Code
#importing all libraries
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
#reading data
link="http://bit.ly/w-data"
data = pd.read_csv(link)
data.head()
data.describe()
X = DataFrame(data, columns=['Hours'])
Y = DataFrame(data, columns=['Scores'])
X
Y
plt.figure(figsize=(10,6))
plt.scatter(X, Y, alpha=0.8)
plt.title('Hours vs Percentage')
plt.xlabel('Hours')
plt.ylabel('Percentage')
plt.ylim(15, 100)
plt.xlim(1, 10)
plt.show()
regression = LinearRegression()
regression.fit(X, Y)
###Output
_____no_output_____
###Markdown
Slope coefficient :-
###Code
regression.coef_
###Output
_____no_output_____
###Markdown
Intercept :-
###Code
regression.intercept_
plt.figure(figsize=(10,6))
plt.scatter(X, Y, alpha=0.8)
# Adding the regression line here:
plt.plot(X, regression.predict(X), color='red', linewidth=3)
plt.title('Hours vs Percentage')
plt.xlabel('Hours')
plt.ylabel('Percentage')
plt.ylim(15, 100)
plt.xlim(1, 10)
plt.show()
###Output
_____no_output_____
###Markdown
Getting R-square from Regression :-
###Code
regression.score(X, Y)
###Output
_____no_output_____
###Markdown
What will be predicted score if a student studies for 9.25 hrs/ day ?
###Code
hours = 9.25
own_pred = regression.predict([[hours]])
print("No of Hours = {}".format(hours))
print("Predicted Score = {}".format(own_pred[0]))
###Output
No of Hours = 9.25
Predicted Score = [92.90985477]
###Markdown
Checking it using regression coefficient and regression intercept :-
###Code
value = (regression.coef_ *hours) + regression.intercept_
value
###Output
_____no_output_____ |
ml/ensemble/bagging-pasting.ipynb | ###Markdown
Bagging 大概有37%的样本取不到1. m个样本,每个样本被取到的概率为 $\frac{1}{m}$,则不被取到的概率为 $1- \frac{1}{m}$2. m次采样后不被取到的概率为 $(1- \frac{1}{m})^m$, 当m趋于&\infi&时,概率约等于$(1- \frac{1}{m})^m -> \frac{1}{m} \approx 37\%$
###Code
# 使用决策树,因为其非参数的学习,会产生差异化比较大
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
# bootstrap 放回取样
# max_samples 每个分类器看多少样本
# n_estimators 有多少个分类器
bagging_clf = BaggingClassifier(DecisionTreeClassifier(),
n_estimators=500, max_samples=100,
bootstrap=True)
bagging_clf.fit(X_train, y_train)
bagging_clf.score(X_test, y_test)
bagging_clf2 = BaggingClassifier(DecisionTreeClassifier(),
n_estimators=5000, max_samples=100,
bootstrap=True)
bagging_clf2.fit(X_train, y_train)
bagging_clf2.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
oob (out of bag)
###Code
# bootstrap 放回取样
# max_samples 每个分类器看多少样本
# n_estimators 有多少个分类器
# oob_score 记录有没有被取到
bagging_clf3 = BaggingClassifier(DecisionTreeClassifier(),
n_estimators=500, max_samples=100,
bootstrap=True, oob_score=True)
bagging_clf3.fit(X_train, y_train)
bagging_clf3.oob_score_
###Output
_____no_output_____
###Markdown
n_jobs
###Code
%%time
bagging_clf4 = BaggingClassifier(DecisionTreeClassifier(),
n_estimators=5000, max_samples=100,
bootstrap=True)
bagging_clf4.fit(X_train, y_train)
%%time
bagging_clf4 = BaggingClassifier(DecisionTreeClassifier(),
n_estimators=5000, max_samples=100,
bootstrap=True, n_jobs=-1)
bagging_clf4.fit(X_train, y_train)
###Output
Wall time: 3.24 s
###Markdown
bootstrap_features
###Code
# bootstrap_features 对样本特征进行随机采样,方式为放回采样
# max_features 每次对特征随机取样,取多少个
random_subspaces_clf = BaggingClassifier(DecisionTreeClassifier(),
n_estimators=500, max_samples=100,
bootstrap=True, n_jobs=-1, oob_score=True,
max_features=1, bootstrap_features=True)
random_subspaces_clf.fit(X_train, y_train)
random_subspaces_clf.score(X_test, y_test)
###Output
_____no_output_____ |
Exercícios de Lista - Feito.ipynb | ###Markdown
Faça um Programa que leia um vetor de 5 números inteiros e mostre-os.
###Code
vetor = []
for i in range(5):
vetor.append(input("Digite um valor "))
print("O vetor é {0}".format(vetor))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia um vetor de 10 números reais e mostre-os na ordem inversa.
###Code
vetor = []
for i in range(10):
vetor.append(float(input("Digite um valor ")))
vetor.reverse()
print("O vetor é {0}".format(vetor))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia 4 notas, mostre as notas e a média na tela.
###Code
vetor = []
vetor.append(float(input("Digite o valor da nota 1 ")))
vetor.append(float(input("Digite o valor da nota 2 ")))
vetor.append(float(input("Digite o valor da nota 3 ")))
vetor.append(float(input("Digite o valor da nota 4 ")))
media = (vetor[0] + vetor[1] + vetor[2] + vetor[3])/4
print("Sua media é {0}".format(media))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia um vetor de 10 caracteres, e diga quantas consoantes foram lidas. Imprima as consoantes.
###Code
vetor = []
soma = 0
for i in range(10):
vetor.append(str(input("Digite um caracter ").upper()))
print(vetor[i])
if vetor[i] == "A" or vetor[i] == "E" or vetor[i] == "I" or vetor[i] == "O" or vetor[i] == "U" :
print("Vogal")
else:
soma += 1
print("O valor de consoantes são {0}".format(soma))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia 20 números inteiros e armazene-os num vetor. Armazene os números pares no vetor PAR e os números IMPARES no vetor impar. Imprima os três vetores.
###Code
vetor = [] vetorPar = [] vetorImp = []
for i in range(20):
vetor.append(int(input("Digite um valor ")))
if vetor[i] % 2 == 0:
vetorPar.append(vetor[i])
print("Par")
else:
vetorImp.append(vetor[i])
print("Impar")
print("O valos par são {0} é os valores impares são {1}".format(vetorPar,vetorImp))
###Output
_____no_output_____
###Markdown
vetor = []
vetorPar = []
vetorImp = []
for i in range(20):
vetor.append(int(input("Digite um valor ")))
if vetor[i] % 2 == 0:
vetorPar.append(vetor[i])
print("Par")
else:
vetorImp.append(vetor[i])
print("Impar")
print("O valos par são {0} é os valores impares são {1}".format(vetorPar,vetorImp)) Faça um Programa que peça as quatro notas de 10 alunos, calcule e armazene num vetor a média de cada aluno, imprima o número de alunos com média maior ou igual a 7.0.
###Code
nome = []
media = []
mediaAluno = 0.0
for i in range(10):
nome.append(str(input("Digite o nome do aluno ")))
p1 = float(input("Digite a p1 do aluno em questâo "))
p2 = float(input("Digite a p2 do aluno em questâo "))
p3 = float(input("Digite a p3 do aluno em questâo "))
p4 = float(input("Digite a p4 do aluno em questâo "))
media.append( (p1 + p2 + p3 + p4) / 4)
print(media)
if media [i] >= 7:
mediaAluno += media[i]
print("A quantidade de alunos aprovados são " + str(mediaAluno))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia um vetor de 5 números inteiros, mostre a soma, a multiplicação e os números.
###Code
num = []
soma = 0
mult = 1
for i in range(5):
num.append(int(input("Digite um numero ")))
soma += num[i]
mult *= num[i]
for i in range(5):
print("O valor digitado é {0} sua soma e {1} e sua multiplicação é {2}".format(num[i], soma,mult))
###Output
Digite um numero10
Digite um numero20
Digite um numero30
Digite um numero40
Digite um numero50
O valor digitado é 10 sua soma e 150 e sua multiplicação é 12000000
O valor digitado é 20 sua soma e 150 e sua multiplicação é 12000000
O valor digitado é 30 sua soma e 150 e sua multiplicação é 12000000
O valor digitado é 40 sua soma e 150 e sua multiplicação é 12000000
O valor digitado é 50 sua soma e 150 e sua multiplicação é 12000000
###Markdown
Faça um Programa que peça a idade e a altura de 5 pessoas, armazene cada informação no seu respectivo vetor. Imprima a idade e a altura na ordem inversa a ordem lida.
###Code
idade = []
altura = []
for i in range(5):
idade.append(int(input("Digite sua idade")))
altura.append(float(input("Digite sua altura")))
idade.reverse()
altura.reverse()
print("Suas idades são {0} e suas alturas são {1}".format(idade,altura))
###Output
_____no_output_____
###Markdown
Faça um Programa que leia um vetor A com 10 números inteiros, calcule e mostre a soma dos quadrados dos elementos do vetor.
###Code
num = []
potencia = []
for i in range(10):
num.append(int(input("Digite um valor inteiro")))
potencia.append(float(pow(num[i],2)))
print("Seus numeros são {0} é suas potencia são {1}".format(num,potencia))
vetor = []
###Output
_____no_output_____
###Markdown
Faça um Programa que leia dois vetores com 10 elementos cada. Gere um terceiro vetor de 20 elementos, cujos valores deverão ser compostos pelos elementos intercalados dos dois outros vetores.
###Code
for i in range(10):
vetor.append(float(input("Digite um valor ")))
vetor.append(float(input("Digite um valor ")))
print(vetor)
###Output
_____no_output_____
###Markdown
Altere o programa anterior, intercalando 3 vetores de 10 elementos cada.
###Code
nome = []
idade = []
altura = []
for i in range(30):
nome.append(str(input("Digite seu nome ")))
idade.append(int(input("Digite sua idade ")))
altura.append(float(input("Digite sua altura ")))
soma += altura[i]
media = soma / 30
for j in range(30):
if idade[j] > 13 and altura[j] < media:
print("O aluno {0} possui idade maior que 13 anos é sua sua altura é {1}".format(nome[j], altura[j]))
###Output
_____no_output_____
###Markdown
Foram anotadas as idades e alturas de 30 alunos. Faça um Programa que determine quantos alunos com mais de 13 anos possuem altura inferior à média de altura desses alunos.
###Code
import pandas
excel = read.excel(altura_por_idade.xls)
alt = excel.co
for i in range (excel.lenght)
if idade[i] > 13:
if alt[i] < alt_media:
print("O/A {0} tem altura inferior a media {1} sua altura é {2}".format(nome[i], alt_media, alt))
###Output
_____no_output_____
###Markdown
Faça um programa que receba a temperatura média de cada mês do ano e armazene-as em uma lista. Após isto, calcule a média anual das temperaturas e mostre todas as temperaturas acima da média anual, e em que mês elas ocorreram (mostrar o mês por extenso: 1 – Janeiro, 2 – Fevereiro, . . . ).
###Code
ano = []
media_ano = []
media = 0
for i in range (12):
ano.append(str(input("Digite o mês de referencia ")))
media_ano.append(float(input("Digite a temperatura media de referencia ")))
media = media_ano[i] + media
print("A Media anual é {0}".format(media / 12))
###Output
Digite o mês de referencia jan
Digite a temperatura media de referencia 10
Digite o mês de referencia fev
Digite a temperatura media de referencia 20
Digite o mês de referencia mar
Digite a temperatura media de referencia 30
Digite o mês de referencia abril
Digite a temperatura media de referencia 40
Digite o mês de referencia maio
Digite a temperatura media de referencia 50
Digite o mês de referencia jun
Digite a temperatura media de referencia 60
Digite o mês de referencia jul
Digite a temperatura media de referencia 70
Digite o mês de referencia ago
Digite a temperatura media de referencia 80
Digite o mês de referencia set
Digite a temperatura media de referencia 90
Digite o mês de referencia out
Digite a temperatura media de referencia 100
Digite o mês de referencia nov
Digite a temperatura media de referencia 110
Digite o mês de referencia dez
Digite a temperatura media de referencia 120
A Media anual é 65.0
###Markdown
Utilizando listas faça um programa que faça 5 perguntas para uma pessoa sobre um crime. As perguntas são:
"Telefonou para a vítima?"
"Esteve no local do crime?"
"Mora perto da vítima?"
"Devia para a vítima?"
"Já trabalhou com a vítima?" O programa deve no final emitir uma classificação sobre a participação da pessoa no crime. Se a pessoa responder positivamente a 2 questões ela deve ser classificada como "Suspeita", entre 3 e 4 como "Cúmplice" e 5 como "Assassino". Caso contrário, ele será classificado como "Inocente".
###Code
resp = []
soma = 0
print("Responda as perguntas abaixo com sim ou não, por favor")
resp.append(str(input("Telefonou para a vítima?")).upper())
resp.append(str(input("Esteve no local do crime?")).upper())
resp.append(str(input("Mora perto da vítima?")).upper())
resp.append(str(input("Devia para a vítima?")).upper())
resp.append(str(input("Já trabalhou com a vítima?")).upper())
for i in range (len(resp)):
if resp[i] == "SIM":
soma += 1
if soma == 2:
print("suspeito")
elif soma == 3 or soma == 4:
print("Cúmplice")
elif soma == 5:
print("Assassino")
else:
print("Inocente")
###Output
Responda as perguntas abaixo com sim ou não, por favor
Telefonou para a vítima?não
Esteve no local do crime?Nao
Mora perto da vítima?NÂO
Devia para a vítima?NÃo
Já trabalhou com a vítima?NãO
Inocente
###Markdown

###Code
valor,acima_valor, maiorquesete = [], [], []
i = 0
soma = 0
while True:
valor.append(float(input("Digite qualquer valor - Para sair digite -1 ")))
if valor [i] == -1:
valor.pop(i)
break;
soma += valor[i]
i += 1
media = soma / len(valor)
i = 0
for i in range(len(valor)):
if valor[i] > media:
acima_valor.append(valor[i])
if valor[i] < 7:
maiorquesete.append(valor[i])
print("A quantidade digitada foi: {0}".format(len(valor)))
print("Valores digitados na ordem foi: {0}".format(valor))
print("Valores digitados na ordem reversa que foi: {0}".format(valor.reverse()))
print("Valores total somados: {0}".format(soma))
print("Valor da media: {0}".format(media))
print("Valores digitados acima da media: {0}".format(acima_valor))
print("Valores digitados abaixo de sete: {0}".format(maiorquesete))
print("Programa terminou com sucesso")
###Output
Digite qualquer valor - Para sair digite -1 10
Digite qualquer valor - Para sair digite -1 20
Digite qualquer valor - Para sair digite -1 30
Digite qualquer valor - Para sair digite -1 40
Digite qualquer valor - Para sair digite -1 50
Digite qualquer valor - Para sair digite -1 60
Digite qualquer valor - Para sair digite -1 70
Digite qualquer valor - Para sair digite -1 80
Digite qualquer valor - Para sair digite -1 90
Digite qualquer valor - Para sair digite -1 100
Digite qualquer valor - Para sair digite -1 1
Digite qualquer valor - Para sair digite -1 2
Digite qualquer valor - Para sair digite -1 3
Digite qualquer valor - Para sair digite -1 4
Digite qualquer valor - Para sair digite -1 -1
A quantidade digitada foi: 14
Valores digitados na ordem foi: [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 1.0, 2.0, 3.0, 4.0]
Valores digitados na ordem reversa que foi: None
Valores total somados: 560.0
Valor da media: 40.0
Valores digitados acima da media: [50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
Valores digitados abaixo de sete: [1.0, 2.0, 3.0, 4.0]
Programa terminou com sucesso
###Markdown

###Code
salarioBase = 200
vendedores = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(0, 10):
valorVendas = float(input('Informe o valor das vendas do vendedor: '))
salario = valorVendas * 0.09 + salarioBase
indice = int(salario / 100) - 1
if (indice > 9):
indice = 9
elif (indice < 1):
indice = 1
vendedores[indice - 1] += 14
for i in range(0, 9):
print ('{0} - {1} : {2}'.format(i * 100 + 200, (i + 1) * 100 + 199, vendedores[i]))
###Output
Informe o valor das vendas do vendedor: 1000
Informe o valor das vendas do vendedor: 2000
Informe o valor das vendas do vendedor: 3000
Informe o valor das vendas do vendedor: 4000
Informe o valor das vendas do vendedor: 500
Informe o valor das vendas do vendedor: 6000
Informe o valor das vendas do vendedor: 9000
Informe o valor das vendas do vendedor: 70000
Informe o valor das vendas do vendedor: 80000
Informe o valor das vendas do vendedor: 50000
200 - 299 : 2
300 - 399 : 1
400 - 499 : 1
500 - 599 : 1
600 - 699 : 0
700 - 799 : 1
800 - 899 : 0
900 - 999 : 0
1000 - 1099 : 4
###Markdown

###Code
nome = input("Digite o seu nome ")
frase = ["Primeiro","Segundo","Terceiro","Quarto","Quinto"]
i = calc = 0
distancia = []
while i < 5:
distancia.append(float(input("Digite o valor do seu {0} Salto: ".format(frase[i]))))
calc += distancia[i]
i += 1
media = calc / 5
print("""Resultado Final:
Atleta: {0}
Saltos: {1}
Média dos saltos: {2}""".format(nome,distancia,media))
###Output
Digite o seu nomevitor
Digite o valor do seu Primeiro Salto:10
Digite o valor do seu Segundo Salto:20
Digite o valor do seu Terceiro Salto:30
Digite o valor do seu Quarto Salto:40
Digite o valor do seu Quinto Salto:50
Resultado Final:
Atleta: vitor
Saltos: [10.0, 20.0, 30.0, 40.0, 50.0]
Média dos saltos: 30.0
###Markdown

###Code
print("Quem Foi o melhor jogador")
votosAtletas = [0] * 23
numeroAtleta = -1
totalVotos = 0
while numeroAtleta != 0:
numeroAtleta = int(input("Informe um valor entre 1 e 23 ou 0 para sair! "))
if 0 > valor > 23:
print("Digite um valor valido entre 1 e 23")
continue
if numeroAtleta != 0:
votosAtletas[numeroAtleta - 1] += 1
totalVotos += 1
print ('Resultado da votacao: ')
print ('Foram computados {0} votos'.format(totalVotos))
print ('Jogador Votos %')
contador = 1
melhorJogador = 0
for votosAtleta in votosAtletas:
if (votosAtleta > 0):
print ('{0} {1} {2}%'.format(contador, votosAtleta, round(votosAtleta / float(totalVotos) * 100),2))
if (votosAtleta > votosAtletas[melhorJogador]):
melhorJogador = contador - 1
contador += 1
print ('O melhor jogador foi o numero {0}, com {1} votos, correspondendo a {2} do total de votos'
.format(melhorJogador + 1, votosAtletas[melhorJogador],votosAtletas[melhorJogador] / round(float(totalVotos) * 100),2))
###Output
Quem Foi o melhor jogador
Informe um valor entre 1 e 5 ou 0 para sair! 1
Informe um valor entre 1 e 5 ou 0 para sair! 1
Informe um valor entre 1 e 5 ou 0 para sair! 1
Informe um valor entre 1 e 5 ou 0 para sair! 2
Informe um valor entre 1 e 5 ou 0 para sair! 2
Informe um valor entre 1 e 5 ou 0 para sair! 5
Informe um valor entre 1 e 5 ou 0 para sair! 20
Informe um valor entre 1 e 5 ou 0 para sair! 0
Resultado da votacao:
Foram computados 7 votos
Jogador Votos %
1 3 43%
2 2 29%
5 1 14%
20 1 14%
O melhor jogador foi o numero 1, com 3 votos, correspondendo a 0.004285714285714286 do total de votos
###Markdown

###Code
win = unix = linux = net = mac = out = total = 0
num = 1
while num != 0:
print("Qual o melhor sistema operacional?")
print("----------------------------------------")
print("As possiveis respostas são:")
print("")
print("0 - Sair do programa")
print("1 - Windows Server")
print("2 - Unix")
print("3 - Linux")
print("4 - Netware")
print("5 - Mac Os")
print("6 - Outros")
num = int(input("Digite em quem você deseja votar"))
if num >= 1 or num <= 6:
total += 1
if num == 1:
win += 1
elif num == 2:
unix += 1
elif num == 3:
linux += 1
elif num == 4:
net += 1
elif num == 5:
mac += 1
elif num == 6:
out += 1
elif 1 > num > 6:
print("Digite um valor valido o programa será reiniciado")
print("Qual o melhor sistema operacional?")
print("Sistema Operacional Votos %")
print("------------------- ----- ---")
print("Windows Server {0} {1}%".format(win, round((win/total)*100,2)))
print("Unix {0} {1}%".format(unix,round((unix/total)*100,2)))
print("Linux {0} {1}%".format(linux,round((linux/total)*100,2)))
print("Netware {0} {1}%".format(net,round((net/total)*100,2)))
print("Mac Os {0} {1}%".format(mac,round((mac/total)*100,2)))
print("Outros {0} {1}%".format(out,round((out/total)*100,2)))
###Output
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar1
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar1
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar1
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar1
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar1
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar2
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar2
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar3
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar4
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar5
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar6
Qual o melhor sistema operacional?
----------------------------------------
As possiveis respostas são:
0 - Sair do programa
1 - Windows Server
2 - Unix
3 - Linux
4 - Netware
5 - Mac Os
6 - Outros
Digite em quem você deseja votar0
Qual o melhor sistema operacional?
Sistema Operacional Votos %
------------------- ----- ---
Windows Server 5 41.67%
Unix 2 16.67%
Linux 1 8.33%
Netware 1 8.33%
Mac Os 1 8.33%
Outros 1 8.33%
###Markdown

###Code
print ('Comparativo de Consumo de Combustivel')
veiculos = []
consumo = []
preco = 2.25
for i in range(1, 6):
veiculos.append(input('Veiculo %d: ' % i))
consumo.append(float(input('Km por litro: ')))
print ('Relatorio Final')
for i in range(0, 5):
custo = 1000 / consumo[i]
gasto = custo * preco
print ('%d - %s - %.2f - %.1f litros - R$ %.2f' % (i + 1, veiculos[i], consumo[i], custo, gasto))
if ('menorConsumo' not in vars()) or (consumo[i] > consumo[menorConsumo]):
menorConsumo = i
print ('O menor consumo eh do %s' % veiculos[menorConsumo])
###Output
Comparativo de Consumo de Combustivel
Veiculo 1: Unix
Km por litro: 10
Veiculo 2: i30
Km por litro: 20
Veiculo 3: ix35
Km por litro: 20
Veiculo 4: hb
Km por litro: 20
Veiculo 5: fiesta
Km por litro: 40
Relatorio Final
1 - Unix - 10.00 - 100.0 litros - R$ 225.00
2 - i30 - 20.00 - 50.0 litros - R$ 112.50
3 - ix35 - 20.00 - 50.0 litros - R$ 112.50
4 - hb - 20.00 - 50.0 litros - R$ 112.50
5 - fiesta - 40.00 - 25.0 litros - R$ 56.25
O menor consumo eh do fiesta
###Markdown
Faça um programa que simule um lançamento de dados. Lance o dado 100 vezes e armazene os resultados em um vetor . Depois, mostre quantas vezes cada valor foi conseguido. Dica: use um vetor de contadores(1-6) e uma função para gerar numeros aleatórios, simulando os lançamentos dos dados.
###Code
import random
dado = []
soma_dado1, soma_dado2, soma_dado3, soma_dado4, soma_dado5, soma_dado6 = 0, 0, 0, 0, 0, 0
for i in range(100):
dado.append(random.randint(1, 6))
if dado[i] == 1:
soma_dado1 += 1
elif dado[i] == 2:
soma_dado2 += 1
elif dado[i] == 3:
soma_dado3 += 1
elif dado[i] == 4:
soma_dado4 += 1
elif dado[i] == 5:
soma_dado5 += 1
elif dado[i] == 6:
soma_dado6 += 1
print("O dado na face 1 foi visto {0}".format(soma_dado1))
print("O dado na face 2 foi visto {0}".format(soma_dado2))
print("O dado na face 3 foi visto {0}".format(soma_dado3))
print("O dado na face 4 foi visto {0}".format(soma_dado4))
print("O dado na face 5 foi visto {0}".format(soma_dado5))
print("O dado na face 6 foi visto {0}".format(soma_dado6))
print("teste {0}".format(len(dado)))
###Output
O dado na face 1 foi visto 17
O dado na face 2 foi visto 16
O dado na face 3 foi visto 14
O dado na face 4 foi visto 18
O dado na face 5 foi visto 15
O dado na face 6 foi visto 20
teste 100
|
kaggle/titanic/titanic_sample.ipynb | ###Markdown
数据探索
###Code
train = pd.read_csv("D:/shareddir/PythonCode/PycharmProjects/ml/kaggle/titanic/all/train.csv")
test = pd.read_csv("D:/shareddir/PythonCode/PycharmProjects/ml/kaggle/titanic/all/test.csv")
#数据量
print train.shape
print test.shape
#基本的统计描述,最大值,最小值,中位数,方差,标准差等
train.describe()
#查看特征缺失情况
train.isnull().sum()
train.head()
train.tail()
test.describe()
test.isnull().sum()
test.head()
###Output
_____no_output_____
###Markdown
特征数据可视化,查看训练集和测试集特征分布
###Code
plt.rc('font', size=13) #matplotlib.rcParams字典变量保存了全局配置,plt.rc方法可直接修改该配置变量;本行为设置文字大小size为13
fig = plt.figure(figsize=(18, 8))
alpha = 0.6
ax1 = plt.subplot2grid((2,3), (0,0))
train.Age.fillna(train.Age.median()).plot(kind='kde', color='#FA2379', label='train', alpha=alpha)
test.Age.fillna(test.Age.median()).plot(kind='kde', label='test', alpha=alpha)
ax1.set_xlabel('Age')
ax1.set_title("What's the distribution of age?" )
plt.legend(loc='best')
ax2 = plt.subplot2grid((2,3), (0,1))
train.Pclass.value_counts().plot(kind='barh', color='#FA2379', label='train', alpha=alpha)
test.Pclass.value_counts().plot(kind='barh', label='test', alpha=alpha)
ax2.set_ylabel('Pclass')
ax2.set_xlabel('Frequency')
ax2.set_title("What's the distribution of Pclass?" )
plt.legend(loc='best')
ax3 = plt.subplot2grid((2,3), (0,2))
train.Sex.value_counts().plot(kind='barh', color='#FA2379', label='train', alpha=alpha)
test.Sex.value_counts().plot(kind='barh', label='test', alpha=alpha)
ax3.set_ylabel('Sex')
ax3.set_xlabel('Frequency')
ax3.set_title("What's the distribution of Sex?" )
plt.legend(loc='best')
ax4 = plt.subplot2grid((2,3), (1,0), colspan=2)
train.Fare.fillna(train.Fare.median()).plot(kind='kde', color='#FA2379', label='train', alpha=alpha)
test.Fare.fillna(test.Fare.median()).plot(kind='kde', label='test', alpha=alpha)
ax4.set_xlabel('Fare')
ax4.set_title("What's the distribution of Fare?" )
plt.legend(loc='best')
ax5 = plt.subplot2grid((2,3), (1,2))
train.Embarked.value_counts().plot(kind='barh', color='#FA2379', label='train', alpha=alpha)
test.Embarked.value_counts().plot(kind='barh', label='test', alpha=alpha)
ax5.set_ylabel('Embarked')
ax5.set_xlabel('Frequency')
ax5.set_title("What's the distribution of Embarked?" )
plt.legend(loc='best')
ax6 = plt.subplot2grid((2,3), (1,2))
train.Survived.value_counts().plot(kind='barh', color='#FA2379', label='train', alpha=alpha)
#test.Survived.value_counts().plot(kind='kde', label='test', alpha=alpha)
ax5.set_ylabel('Survived')
ax5.set_xlabel('Frequency')
ax5.set_title("What's the distribution of Survived?" )
plt.legend(loc='best')
plt.tight_layout()
train.Survived.value_counts()
train.Age.value_counts()
#plot参数使用kde与density效果一样
fig = plt.figure(figsize=(15, 6))
train[train.Survived==0].Age.value_counts().plot(kind='kde', color='#FA2379', label='Not Survived', alpha=alpha)
train[train.Survived==1].Age.value_counts().plot(kind='kde', label='Survived', alpha=alpha)
plt.xlabel('Age')
plt.title("What's the distribution of Age?" )
plt.legend(loc='best')
plt.grid()
fig = plt.figure(figsize=(15, 6))
train[train.Survived==0].Age.value_counts().plot(kind='density', color='#FA2379', label='Not Survived', alpha=alpha)
train[train.Survived==1].Age.value_counts().plot(kind='density', label='Survived', alpha=alpha)
plt.xlabel('Age')
plt.title("What's the distribution of Age?" )
plt.legend(loc='best')
plt.grid()
male_survived = train[train.Sex == "male"].Survived.value_counts()
print male_survived
#type(male_survived)
die_num = male_survived[0]
survived_num = male_survived[1]
print die_num
print survived_num
print float(die_num)/survived_num
print "ratio of die:",float(die_num)/(die_num+survived_num)
female_survived = train[train.Sex == "female"].Survived.value_counts()
print female_survived
die_num = female_survived[1]
survived_num = female_survived[0]
print die_num
print survived_num
print "ratio of die:",float(die_num)/(die_num+survived_num)
test.isnull().sum()
train[train.Embarked.isnull()]
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax = train.boxplot(column='Fare', by=['Embarked','Pclass'], ax=ax)
plt.axhline(y=80, color='green')
ax.set_title('', y=1.1)
train[train.Embarked.isnull()][['Fare', 'Pclass', 'Embarked']]
_ = train.set_value(train.Embarked.isnull(), 'Embarked', 'C')
test[test.Fare.isnull()]
print ("The top 5 most common value of Fare")
#test[(test.Pclass==3)&(test.Embarked=='S')].Fare.value_counts().head()
test[(test.Pclass == 3) & (test.Embarked =="S")].Fare.value_counts()
train.isnull().sum()
_ = test.set_value(test.Fare.isnull(),"Fare",8.05)
test.isnull().sum()
full = pd.concat([train,test],ignore_index = True)
full
full.head()
full.Cabin.value_counts()
_ = full.set_value(full.Cabin.isnull(),"Cabin","U0")
full.describe()
###Output
_____no_output_____
###Markdown
特征工程
###Code
import re
names = full.Name.map(lambda x: len(x.split()))
names
full.Fare
full.Fare.reshape(-1,1)
full.Fare.shape
###Output
_____no_output_____ |
notebooks/model_comparison_galaxy.ipynb | ###Markdown
Crossvalidation Results for IPhone Data * Model hyperparameters and pipelines come from optimization notebooks * Random forest wins on all metrics
###Code
knn_features = [
"iphone",
"samsunggalaxy",
"googleandroid",
"iphonecampos",
"samsungcampos",
"iphonecamneg",
"iphonecamunc",
"iphonedispos",
"samsungdispos",
"iphonedisneg",
"iphonedisunc",
"iphoneperpos",
"samsungperpos",
"iphoneperneg",
"samsungperneg",
"iphoneperunc",
]
gradient_boosting_features = [
"iphone",
"samsunggalaxy",
"googleandroid",
"iphonecampos",
"samsungcampos",
"iphonecamneg",
"samsungcamneg",
"iphonecamunc",
"samsungcamunc",
"iphonedispos",
"samsungdispos",
"iphonedisneg",
"samsungdisneg",
"iphonedisunc",
"samsungdisunc",
"iphoneperpos",
"samsungperpos",
"iphoneperneg",
"iphoneperunc",
"samsungperunc",
]
random_forest_features = [
"iphone",
"samsunggalaxy",
"ios",
"googleandroid",
"iphonecampos",
"samsungcampos",
"iphonecamneg",
"samsungcamneg",
"iphonecamunc",
"samsungcamunc",
"iphonedispos",
"samsungdispos",
"iphonedisneg",
"samsungdisneg",
"iphonedisunc",
"samsungdisunc",
"iphoneperpos",
"samsungperpos",
"iphoneperneg",
"samsungperneg",
"iphoneperunc",
"samsungperunc",
"iosperunc",
]
knn_feature_selector = X.columns.isin(knn_features)
gradient_boosting_feature_selector = X.columns.isin(gradient_boosting_features)
random_forest_feature_selector = X.columns.isin(random_forest_features)
def keep_knn_features(X):
return X[:, knn_feature_selector]
def keep_gradient_boosting_features(X):
return X[:, gradient_boosting_feature_selector]
def keep_random_forest_features(X):
return X[:, random_forest_feature_selector]
pipelines = {
"KNN": make_pipeline(
VarianceThreshold(),
RobustScaler(),
FunctionTransformer(keep_knn_features, validate=False),
KNeighborsRegressor(n_neighbors=16, p=2),
),
"Gradient Boosting": make_pipeline(
VarianceThreshold(),
FunctionTransformer(keep_gradient_boosting_features, validate=False),
GradientBoostingRegressor(),
),
"Random Forest": make_pipeline(
VarianceThreshold(),
FunctionTransformer(keep_random_forest_features, validate=False),
RandomForestRegressor(
n_estimators=667, max_depth=12, min_samples_split=4, n_jobs=3
),
),
}
scores = crossvalidate_pipeline_scores(
X=X, y=y, pipelines=pipelines, n_splits=30, random_state=random_state
)
plot_scores(scores=scores)
###Output
_____no_output_____ |
notebooks/classification_transfert_learning_with_target.ipynb | ###Markdown
Load constantes
###Code
with open("config.yaml",'r') as config_file:
config = yaml.safe_load(config_file)
IMAGE_WIDTH = config["image_width"]
IMAGE_HEIGHT = config["image_height"]
IMAGE_DEPTH = config["image_depth"]
DATA_DIR= pathlib.Path(config["data_dir"])
MODELS_DIR = pathlib.Path(config["models_dir"])
TARGET_NAME= config["target_name"]
DATA_TRAIN_FILE= config["data_train_file"]
DATA_TEST_FILE= config["data_test_file"]
###Output
_____no_output_____
###Markdown
Functions
###Code
def load_resize_image(path,height,width):
"""Load an image and resize it to the target size
Parameters:
----------
path (Path): path to the file to load and resize
height (int): the height of the final resized image
width(int): the width of the resized image
Return
------
numpy.array containing resized image
"""
return np.array(Image.open(path).resize((width,height)))
def build_x_and_y(df: pd.DataFrame, target: str, images: str):
"""build x tensor and y tensor for model fitting.
parameters
----------
df(pd.DataFrame): dataframe
target(str): name of target column
images (str): name of resized images column
Returns
-------
x (numpy.array): numpy.array of x values
y (numpy.array): numpy.array of y values
"""
x= np.array(df[images].to_list())
y=tf.keras.utils.to_categorical(df[target].astype('category').cat.codes)
return x,y
def build_image_database(path,target):
""" Build a pandas dataframe with target class and access path to images.
Parameters:
- path (Path): Path pattern to read csv file containing images information
- target(str): The second column to extract from the file
Return:
A pandas dataframe,
-------
"""
#Load file
_df= pd.read_csv(path,
names=["all"],
)
#Recover data
_df["image_id"]=_df["all"].apply(lambda x: x.split(' ')[0])
_df[target]=_df["all"].apply(lambda x: ' '.join(x.split(' ')[1:]))
_df[target].unique()
#Create path
_df["path"]= _df['image_id'].apply( lambda x: DATA_DIR/"images"/(x+'.jpg'))
return _df.drop(columns=["all"])
def build_classification_model(nbre_classes):
"""Build a tensorflow model using information from target and images columns in dataframes
Parameters
----------
- df (pandas.dataFrame): dataframe with target and images columns
- target (str): column name for target variable
- images (str): column name for images
Returns
------
tensorflow model built & compiled
"""
#Initialize a base model base on imagenet
image_net = keras.applications.Xception(
weights='imagenet',
input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_DEPTH),
include_top=False)
#Freeze all the layers of the model
image_net.trainable = False
#Create input layer
inputs = keras.Input(shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_DEPTH))
#Data augmentation, as the number of images is not a lot
data_augmentation=keras.Sequential(
[keras.layers.RandomFlip("horizontal"), keras.layers.RandomRotation(0.1),]
)
x = data_augmentation(inputs)
scale_layer = keras.layers.Rescaling(scale=1 / 127.5, offset=-1)
x = scale_layer(x)
x = image_net(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x)
#Create output layer
outputs = keras.layers.Dense(nbre_classes,activation='softmax')(x)
model = keras.Model(inputs, outputs)
#Compile the model
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.CategoricalCrossentropy(),
metrics=[keras.metrics.CategoricalCrossentropy()])
return model
def show_image(df,row,target):
"""show the image in the ligne row and the associated target column
Args:
df (pandas.dataFrame): the dataframe of images
row (int): the index of the row
target (string): the column name of the associated label
Return
------
None
"""
assert target in df.columns, f"Column {target} not found in dataframe"
assert 'path' in df.columns, f"Column path doens't not exit in dataframe"
_img = plt.imread(df.loc[row,'path'])
plt.imshow(_img)
return
def classify_images(images,model,classes_names=None):
"""Classify images through a tensorflow model.
Parameters:
-----------
images(np.array): set of images to classify
model (tensorflow.keras.Model): tensorflow/keras model
Returns
-------
predicted classes
"""
results = model.predict(images)
classes = np.argmax(results,axis=1)
if classes_names is not None:
classes = np.array(classes_names[classes])
return classes
def save_model(model ,saving_dir=MODELS_DIR,basename=TARGET_NAME,append_time=False):
"""Save tf/Keras model in saving_dir folder
Parameters
----------
model (tf/Keras model): model to be saved
saving_dir (path): location to save model file
basename (str): the basename of the model
append_time (bool): indicate if the time will be append to the basename
"""
model_name = f"{basename}{'_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') if append_time else ''}"
model.save(f"{saving_dir}/learning/{model_name}.h5")
return model_name
###Output
_____no_output_____
###Markdown
Read train & test file
###Code
train_df = build_image_database(DATA_DIR/DATA_TRAIN_FILE,TARGET_NAME)
test_df = build_image_database(DATA_DIR/DATA_TEST_FILE,TARGET_NAME)
# Previous the dataframe
train_df.head()
test_df.head()
###Output
_____no_output_____
###Markdown
View some images
###Code
show_image(train_df, np.random.randint(0,train_df.shape[0]), TARGET_NAME)
show_image(test_df,np.random.randint(0,test_df.shape[0]),TARGET_NAME)
###Output
_____no_output_____
###Markdown
Resize Images
###Code
#Resize train images
train_df['resized_image'] = train_df.apply(
lambda r: load_resize_image(r['path'],IMAGE_HEIGHT,IMAGE_WIDTH),
axis=1)
#Resize test images
test_df['resized_image'] = test_df.apply(
lambda r: load_resize_image(r['path'],IMAGE_HEIGHT,IMAGE_WIDTH),
axis=1)
###Output
_____no_output_____
###Markdown
Split dataset into x and y
###Code
X_train,y_train = build_x_and_y(train_df,TARGET_NAME,'resized_image')
X_test,y_test = build_x_and_y(test_df,TARGET_NAME,'resized_image')
classes_names = train_df[TARGET_NAME].astype('category').cat.categories
###Output
_____no_output_____
###Markdown
Build & train the model
###Code
model = build_classification_model(len(classes_names))
model.summary()
%load_ext tensorboard
!rm -rf ./logs
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
%%time
model.fit(X_train,y_train,epochs=5,batch_size = 32,validation_data=(X_test,y_test))
%tensorboard --logdir logs/fit
###Output
_____no_output_____
###Markdown
Evaluation of the model
###Code
classify_images(X_test[10:20],model,classes_names)
#Compute accuracy of the model
transfert_learning_prediction = model.predict(X_test).argmax(axis=1)
transfert_learning_accuracy = np.mean(
y_test.argmax(axis=1)==transfert_learning_prediction
)
transfert_learning_accuracy
###Output
2022-04-10 15:25:21.142788: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 163823616 exceeds 10% of free system memory.
###Markdown
Save the model
###Code
model_name = save_model(model,MODELS_DIR)
with open(MODELS_DIR/"classes"/f"{model_name}.yaml","w") as classe_file:
yaml.dump(list(classes_names),classe_file)
###Output
_____no_output_____
###Markdown
Compare Transfert with SVM and Neural network
###Code
#Compute accuracy for svm
with open(MODELS_DIR/f"svms/{model_name}.h5","rb") as file:
svm_model = pickle.load(file)
X_svm_test = np.array(test_df.apply(lambda row: np.ndarray.flatten(row["resized_image"]),axis=1).to_list())
_predicted_classe = svm_model.predict(X_svm_test).astype(int)
svm_accuracy = np.mean(_predicted_classe==y_test.argmax(axis=1))
svm_accuracy
neural_model = tf.keras.models.load_model(MODELS_DIR/f"neural_networks/{model_name}.h5")
x= np.array(test_df["resized_image"].to_list())
neural_prediction = neural_model.predict(x).argmax(axis=1)
neural_accuracy = np.mean(y_test.argmax(axis=1)==neural_prediction)
neural_accuracy
print(f"""
======Accuracy=====
Neural network: {round(neural_accuracy,2)},
SVC: {round(svm_accuracy,2)},
Transfert learning: {round(transfert_learning_accuracy,2)}
""")
###Output
======Accuracy=====
Neural network: 0.22,
SVC: 0.29,
Transfert learning: 0.34
|
colab_examples/colab_tutorial_credit_scoring.ipynb | ###Markdown
If you you run this notebook in colab you need to run these inithial steps in order for colab to work:Make sure you are running in a python 3 environment. You can change your runtime environment by choosingRuntime > Change runtime typein the menu.
###Code
!python --version
!pip install -q -U \
aif360==0.2.2 \
tqdm==4.38.0 \
numpy==1.17.4 \
matplotlib==3.1.1 \
pandas==0.25.3 \
scipy==1.3.2 \
scikit-learn==0.21.3 \
cvxpy==1.0.25 \
scs==2.1.0 \
numba==0.42.0 \
networkx==2.4 \
imgaug==0.2.6 \
BlackBoxAuditing==0.1.54 \
lime==0.1.1.36
###Output
[K |████████████████████████████████| 56.4MB 41kB/s
[K |████████████████████████████████| 61kB 7.6MB/s
[K |████████████████████████████████| 163kB 59.5MB/s
[K |████████████████████████████████| 3.2MB 43.3MB/s
[K |████████████████████████████████| 634kB 55.1MB/s
[K |████████████████████████████████| 2.6MB 38.8MB/s
[K |████████████████████████████████| 276kB 51.4MB/s
[?25h Building wheel for scs (setup.py) ... [?25l[?25hdone
Building wheel for imgaug (setup.py) ... [?25l[?25hdone
Building wheel for BlackBoxAuditing (setup.py) ... [?25l[?25hdone
Building wheel for lime (setup.py) ... [?25l[?25hdone
###Markdown
Notes- The above pip command is created using AIF360's [requirements.txt](https://github.com/josephineHonore/AIF360/blob/master/requirements.txt). At the moment, the job to update these libraries is manual.- The original notebook uses Markdown to display formated text. Currently this is [unsupported](https://github.com/googlecolab/colabtools/issues/322) in Colab.- We have added code to fix the random seeds for reproducibility
###Code
def printb(text):
"""Auxiliar function to print in bold.
Compensates for bug in Colab that doesn't show Markdown(diplay('text'))
"""
print('\x1b[1;30m'+text+'\x1b[0m')
###Output
_____no_output_____
###Markdown
Detecting and mitigating age bias on credit decisions The goal of this tutorial is to introduce the basic functionality of AI Fairness 360 to an interested developer who may not have a background in bias detection and mitigation. Biases and Machine LearningA machine learning model makes predictions of an outcome for a particular instance. (Given an instance of a loan application, predict if the applicant will repay the loan.) The model makes these predictions based on a training dataset, where many other instances (other loan applications) and actual outcomes (whether they repaid) are provided. Thus, a machine learning algorithm will attempt to find patterns, or generalizations, in the training dataset to use when a prediction for a new instance is needed. (For example, one pattern it might discover is "if a person has salary > USD 40K and has outstanding debt < USD 5, they will repay the loan".) In many domains this technique, called supervised machine learning, has worked very well.However, sometimes the patterns that are found may not be desirable or may even be illegal. For example, a loan repay model may determine that age plays a significant role in the prediction of repayment because the training dataset happened to have better repayment for one age group than for another. This raises two problems: 1) the training dataset may not be representative of the true population of people of all age groups, and 2) even if it is representative, it is illegal to base any decision on a applicant's age, regardless of whether this is a good prediction based on historical data.AI Fairness 360 is designed to help address this problem with _fairness metrics_ and _bias mitigators_. Fairness metrics can be used to check for bias in machine learning workflows. Bias mitigators can be used to overcome bias in the workflow to produce a more fair outcome. The loan scenario describes an intuitive example of illegal bias. However, not all undesirable bias in machine learning is illegal it may also exist in more subtle ways. For example, a loan company may want a diverse portfolio of customers across all income levels, and thus, will deem it undesirable if they are making more loans to high income levels over low income levels. Although this is not illegal or unethical, it is undesirable for the company's strategy.As these two examples illustrate, a bias detection and/or mitigation toolkit needs to be tailored to the particular bias of interest. More specifically, it needs to know the attribute or attributes, called _protected attributes_, that are of interest: race is one example of a _protected attribute_ and age is a second. The Machine Learning WorkflowTo understand how bias can enter a machine learning model, we first review the basics of how a model is created in a supervised machine learning process. First, the process starts with a _training dataset_, which contains a sequence of instances, where each instance has two components: the features and the correct prediction for those features. Next, a machine learning algorithm is trained on this training dataset to produce a machine learning model. This generated model can be used to make a prediction when given a new instance. A second dataset with features and correct predictions, called a _test dataset_, is used to assess the accuracy of the model.Since this test dataset is the same format as the training dataset, a set of instances of features and prediction pairs, often these two datasets derive from the same initial dataset. A random partitioning algorithm is used to split the initial dataset into training and test datasets.Bias can enter the system in any of the three steps above. The training data set may be biased in that its outcomes may be biased towards particular kinds of instances. The algorithm that creates the model may be biased in that it may generate models that are weighted towards particular features in the input. The test data set may be biased in that it has expectations on correct answers that may be biased. These three points in the machine learning process represent points for testing and mitigating bias. In AI Fairness 360 codebase, we call these points _pre-processing_, _in-processing_, and _post-processing_. AI Fairness 360We are now ready to utilize AI Fairness 360 (`aif360`) to detect and mitigate bias. We will use the German credit dataset, splitting it into a training and test dataset. We will look for bias in the creation of a machine learning model to predict if an applicant should be given credit based on various features from a typical credit application. The protected attribute will be "Age", with "1" (older than or equal to 25) and "0" (younger than 25) being the values for the privileged and unprivileged groups, respectively.For this first tutorial, we will check for bias in the initial training data, mitigate the bias, and recheck. More sophisticated machine learning workflows are given in the author tutorials and demo notebooks in the codebase.Here are the steps involved Step 1: Write import statements Step 2: Set bias detection options, load dataset, and split between train and test Step 3: Compute fairness metric on original training dataset Step 4: Mitigate bias by transforming the original dataset Step 5: Compute fairness metric on transformed training dataset Step 1 Import StatementsAs with any python program, the first step will be to import the necessary packages. Below we import several components from the `aif360` package. We import the GermanDataset, metrics to check for bias, and classes related to the algorithm we will use to mitigate bias.
###Code
# Load all necessary packages
import sys
sys.path.insert(1, "../")
import numpy as np
np.random.seed(0)
from aif360.datasets import GermanDataset
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.algorithms.preprocessing import Reweighing
from IPython.display import Markdown, display
###Output
_____no_output_____
###Markdown
Step 2 Load dataset, specifying protected attribute, and split dataset into train and testIn Step 2 we load the initial dataset, setting the protected attribute to be age. We then splits the original dataset into training and testing datasets. Although we will use only the training dataset in this tutorial, a normal workflow would also use a test dataset for assessing the efficacy (accuracy, fairness, etc.) during the development of a machine learning model. Finally, we set two variables (to be used in Step 3) for the privileged (1) and unprivileged (0) values for the age attribute. These are key inputs for detecting and mitigating bias, which will be Step 3 and Step 4.
###Code
SEED = 42
dataset_orig = GermanDataset(
protected_attribute_names=['age'], # this dataset also contains protected
# attribute for "sex" which we do not
# consider in this evaluation
privileged_classes=[lambda x: x >= 25], # age >=25 is considered privileged
features_to_drop=['personal_status', 'sex'] # ignore sex-related attributes
)
dataset_orig_train, dataset_orig_test = dataset_orig.split([0.7], shuffle=True, seed=SEED)
privileged_groups = [{'age': 1}]
unprivileged_groups = [{'age': 0}]
###Output
_____no_output_____
###Markdown
Step 3 Compute fairness metric on original training datasetNow that we've identified the protected attribute 'age' and defined privileged and unprivileged values, we can use aif360 to detect bias in the dataset. One simple test is to compare the percentage of favorable results for the privileged and unprivileged groups, subtracting the former percentage from the latter. A negative value indicates less favorable outcomes for the unprivileged groups. This is implemented in the method called mean_difference on the BinaryLabelDatasetMetric class. The code below performs this check and displays the output, showing that the difference is -0.169905.
###Code
metric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
#display(Markdown("#### Original training dataset"))
printb("#### Original training dataset")
print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_train.mean_difference())
###Output
[1;30m#### Original training dataset[0m
Difference in mean outcomes between unprivileged and privileged groups = -0.127143
###Markdown
Step 4 Mitigate bias by transforming the original datasetThe previous step showed that the privileged group was getting 17% more positive outcomes in the training dataset. Since this is not desirable, we are going to try to mitigate this bias in the training dataset. As stated above, this is called _pre-processing_ mitigation because it happens before the creation of the model. AI Fairness 360 implements several pre-processing mitigation algorithms. We will choose the Reweighing algorithm [1], which is implemented in the `Reweighing` class in the `aif360.algorithms.preprocessing` package. This algorithm will transform the dataset to have more equity in positive outcomes on the protected attribute for the privileged and unprivileged groups.We then call the fit and transform methods to perform the transformation, producing a newly transformed training dataset (dataset_transf_train).`[1] F. Kamiran and T. Calders, "Data Preprocessing Techniques for Classification without Discrimination," Knowledge and Information Systems, 2012.`
###Code
RW = Reweighing(unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
dataset_transf_train = RW.fit_transform(dataset_orig_train)
###Output
_____no_output_____
###Markdown
Step 5 Compute fairness metric on transformed datasetNow that we have a transformed dataset, we can check how effective it was in removing bias by using the same metric we used for the original training dataset in Step 3. Once again, we use the function mean_difference in the BinaryLabelDatasetMetric class. We see the mitigation step was very effective, the difference in mean outcomes is now 0.0. So we went from a 17% advantage for the privileged group to equality in terms of mean outcome.
###Code
metric_transf_train = BinaryLabelDatasetMetric(dataset_transf_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
printb("#### Transformed training dataset")
print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_train.mean_difference())
###Output
[1;30m#### Transformed training dataset[0m
Difference in mean outcomes between unprivileged and privileged groups = 0.000000
|
notebooks/exploration/FI.ipynb | ###Markdown
Exploration of activity changesThis notebook explores the following features of neural activity:- Decoding - Using - Logistic Regression - Random forest - XGBoost - Model characteristics - Confusion matrix - "Distance"/"predict_proba" matrices - Feature importances - Comparing - Before/after changepoint - Autoshape/DRRD - Differences - Comparing - Before/after changepoint - Autoshape/DRRD - Measures - Distances - Similarities - Synchronization - Via spikeutils Table of Contents1 Imports and functions2 Feature importances2.1 XGBoost2.2 rbfSVM2.3 Logistic Regression3 Ignoring time Imports and functions
###Code
import os
os.chdir('../../')
import pandas as pd
import numpy as np
from itertools import combinations
from sklearn.model_selection import GroupShuffleSplit, cross_val_predict, GroupKFold
from sklearn.base import clone
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from spikelearn.data import io
from spikelearn.data.selection import select, to_feature_array
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from spikelearn.models import shuffle_val_predict
def xyt(df):
ndf = df.reset_index()
y = ndf.time.values
trial = ndf.trial.values
X = ndf.drop(['time', 'trial'],axis=1).values
return X, y, trial
def baseline_correct(fa, label):
epochs = io.load(label, 'epoched_spikes')
mb = 1/epochs.baseline.apply(len).reset_index().drop('trial', axis=1).groupby('unit').apply(lambda x: np.mean(np.hstack(x.baseline)))
effective_baseline = select(epochs, is_selected=True).baseline.apply(len)+ mb
effective_baseline = pd.DataFrame(effective_baseline).reset_index().pivot(columns='unit', index='trial')
df = fa.reset_index().apply(lambda x: [x[2:].values - (1/5)*effective_baseline.loc[x.trial].values], axis=1)
corrected = pd.DataFrame(np.vstack(df.values).squeeze(), index=fa.index, columns=fa.columns)
return corrected
def product(X):
return np.array(list(map(lambda x: x[0]*x[1], combinations(X,2))))
def add_products(X):
products = np.apply_along_axis(product, 1, X)
return np.hstack((X, products))
def encode_by_rate_quantile(X, q):
le, oh = LabelEncoder(), OneHotEncoder()
df = pd.DataFrame(X).apply(lambda x: pd.qcut(x, q, duplicates='drop'))
df = df.apply(lambda x: le.fit_transform(x))
sp = pd.DataFrame(index=df.index)
for i in df.columns:
sp = sp.join(pd.DataFrame(oh.fit_transform(df[i].values.reshape(-1,1)).todense()), rsuffix='_unit_%d'%i, lsuffix='_unit_%d'%i)
return sp
###Output
_____no_output_____
###Markdown
Feature importances
###Code
data = io.load('DRRD 7', 'wide_smoothed')
X, y, trial = to_feature_array(select(data, _min_duration=1.5), True)
activity = pd.DataFrame(X, index=pd.Index(y, name='time')).reset_index()
mean = activity.groupby('time').mean()
std = activity.groupby('time').std()/np.sqrt(np.unique(trial).shape[0])
mean = mean.reset_index().melt(id_vars='time', var_name='unit', value_name='mean').set_index(['unit', 'time'])
std = std.reset_index().melt(id_vars='time', var_name='unit', value_name='std').set_index(['unit', 'time'])
actv = pd.read_csv('data/results/changepoint/feature_importance_activity.csv')
import pandas as pd
import numpy as np
import sys
sys.path.append('.')
from numpy import dot
import matplotlib.pyplot as plt
import seaborn as sns
from itertools import product, count
data = pd.read_csv('data/results/changepoint/feature_importance.csv').drop('Unnamed: 0', axis=1)
actv = pd.read_csv('data/results/changepoint/feature_importance_activity.csv')
id_vars = ['rat','dataset','num_trials', 'unit', 'time', 'logC']
each_mean = data.groupby(id_vars + ['when']).mean().reset_index(-1)
same_mean = data.groupby(id_vars).mean()
same_std = data.groupby(id_vars).std()
aux = (each_mean-same_mean)/same_std
aux['when'] = each_mean.when.values
data = aux.reset_index()
pred.columns
pd.core.groupby.DataFrameGroupBy.
pred = pd.read_csv('data/results/changepoint/feature_importance_predictions.csv')
poi = pred.groupby('num_trials').get_group(50)
score = pd.DataFrame(poi.groupby(['rat', 'set', 'logC','cv','when']).apply(lambda x: x[['predictions','true']].corr().iloc[0,1]), columns=['score']).reset_index()
one_score = score.groupby(['rat','logC']).get_group(('DRRD 7', -1.5)).groupby(['set','when']).mean()
one_score
data = pd.read_csv('data/results/changepoint/feature_importance.csv')
actv = pd.read_csv('data/results/changepoint/feature_importance_activity.csv')
id_vars = ['rat','dataset','num_trials', 'unit', 'time', 'logC']
each_mean = data.groupby(id_vars + ['when']).mean().reset_index(-1)
same_mean = data.groupby(id_vars).mean()
same_std = data.groupby(id_vars).std()
aux = (each_mean-same_mean)/same_std
aux['when'] = each_mean.when.values
data = aux.reset_index()
pred = pd.read_csv('data/results/changepoint/feature_importance_predictions.csv')
pred = pred.groupby('num_trials').get_group(50)
score = pd.DataFrame(pred.groupby(['rat', 'set', 'logC','cv','when']).apply(lambda x: x[['predictions','true']].corr().iloc[0,1]), columns=['score']).reset_index()
plt.figure(figsize=(12,24))
rat = 'DRRD 10'; logC=-1.5
one_rat = actv.groupby(['rat', 'logC', 'num_trials']).get_group((rat, logC, 50))
for i, unit in enumerate(one_rat.unit.unique()):
plt.subplot(one_rat.unit.nunique(), 3, 3*(i+1)-1)
for when, c in [('init', 'b'),('end', 'r')]:
local = one_rat.set_index(['unit','time']).groupby('when').get_group(when)
local.loc[unit]['mean'].plot(color=c)
plt.fill_between(local.loc[unit]['std'].index.values,
local.loc[unit]['mean'] + local.loc[0]['std'],
local.loc[unit]['mean'] - local.loc[0]['std'],alpha=.4,color=c)
plt.axis('off');
ax = plt.subplot(1,3,1)
d = data.groupby(['rat', 'logC', 'num_trials','when']).get_group((rat, logC, 50, 'end'))
d = d.pivot(index='unit', columns='time', values='value')
sns.heatmap(d.values, ax=ax, vmin = -2, vmax = 2, cbar = False, cmap='RdBu_r')
one_score = score.groupby(['rat','logC']).get_group((rat, logC)).groupby(['set','when']).mean()
ax = plt.subplot(1,3,3)
sns.barplot(x='set',y='score',hue='when',data=one_score.reset_index(), ax=ax); plt.ylim([0,1])
plt.title('logC:', logC)
plt.figure(figsize=(12,24))
rat = 'DRRD 10'
one_rat = actv.groupby(['rat', 'logC', 'num_trials']).get_group((rat, -1.5, 50))
for i, unit in enumerate(one_rat.unit.unique()):
plt.subplot(one_rat.unit.nunique(), 3, 3*(i+1)-1)
for when, c in [('init', 'b'),('end', 'r')]:
local = one_rat.set_index(['unit','time']).groupby('when').get_group(when)
local.loc[unit]['mean'].plot(color=c)
plt.fill_between(local.loc[unit]['std'].index.values,
local.loc[unit]['mean'] + local.loc[0]['std'],
local.loc[unit]['mean'] - local.loc[0]['std'],alpha=.4,color=c)
plt.axis('off');
ax = plt.subplot(1,3,1)
d = data.groupby(['rat', 'logC', 'num_trials','when']).get_group((rat, -1.5, 50, 'end'))
d = d.pivot(index='unit', columns='time', values='value')
sns.heatmap(d.values, ax=ax, vmin = -2, vmax = 2, cbar = False, cmap='RdBu_r')
ax = plt.subplot(1,3,3)
sns.barplot(x='set',y='score',hue='when',data=one_score.reset_index(), ax=ax); plt.ylim([0,1])
plt.show()
plt.savefig('feature_importance_'+rat+str(logC))
###Output
_____no_output_____
###Markdown
XGBoost
###Code
from xgboost import XGBClassifier
xgb = XGBClassifier()
sh = GroupShuffleSplit(5, .2, .8)
res2 = shuffle_val_predict(xgb, X, y, trial,sh, get_weights=False)
train = res.groupby('set').get_group('train')
test = res.groupby('set').get_group('test')
plt.figure(figsize=(14,6))
plt.subplot(1,2,1)
sns.heatmap(confusion_matrix(train.true, train.predictions))
plt.subplot(1,2,2)
sns.heatmap(confusion_matrix(test.true, test.predictions))
X,y, trial = xyt(corrected)
ns = io.load('DRRD 8', 'narrow_smoothed')
train = res2.groupby('set').get_group('train')
test = res2.groupby('set').get_group('test')
plt.figure(figsize=(14,6))
plt.subplot(2,2,1)
sns.heatmap(confusion_matrix(train.true, train.predictions))
plt.subplot(2,2,2)
sns.heatmap(confusion_matrix(test.true, test.predictions))
plt.subplot(2,2,3)
sns.heatmap(train.groupby('true').mean().iloc[:,:10])
plt.subplot(2,2,4)
sns.heatmap(test.groupby('true').mean().iloc[:,:10])
xgb.fit(X, y)
def xgb(label, method = 'proba'):
# params = io.load(rat, 'XGboost')
# params = dict(zip(params.columns, params.values.reshape(-1)))
# params['max_depth'] = int(params['max_depth'])
# params['n_estimators'] = int(params['n_estimators'])
# params['min_child_weight'] = int(params['min_child_weight'])
clf = XGBClassifier(n_estimators=12, subsample=.5, gamma=0.1, max_depth=16)
s = io.load(label, 'wide_smoothed')
X,y, trial = to_feature_array(select(s.reset_index(), _min_duration=1.5, _min_trial=100, is_tired=False).set_index(['trial','unit']), subset='cropped')
res = shuffle_val_predict(clf, X, y, trial, GroupShuffleSplit(30, 10, 80),method)
clf = clf.fit(X, y)
return res, clf
plt.figure(figsize=(12, 20))
for i, rat in enumerate(['DRRD %d'%n for n in [7,8,9,10]]):
res, clf = xgb(rat)
plt.subplot(4, 2, 2*i+1)
sns.heatmap(res.drop(['cv','group'],axis=1).groupby('true').mean()); plt.title(rat)
plt.subplot(4, 2, 2*i+2)
plt.bar(np.arange(clf.feature_importances_.shape[0]),clf.feature_importances_)
res, clf = xgb('DRRD 8', 'predict')
sns.heatmap(res.drop(['cv','group'],axis=1).groupby('true').mean())
###Output
_____no_output_____
###Markdown
--- rbfSVM
###Code
from sklearn.svm import SVC
params = io.load('DRRD 7', 'rbfSVM')
params= dict(zip(params.columns, params.values.reshape(-1)))
clf = SVC(**params)
def get_predictions_or_proba(clf, X, mode):
"""
Local helper function to ease the switching between predict_proba and predict
"""
if mode == 'predict':
return clf.predict(X)
elif mode in ['proba','probability']:
try:
return clf.predict_proba(X)
except:
return clf.decision_function(X)
n_shuffles=10
train_size=.8
test_size=.2
results = pd.DataFrame(columns = ['trial', 'shuffle', 'predictions','true'])
sh = GroupShuffleSplit(n_splits=n_shuffles, train_size=train_size,test_size=test_size)
for i, (train_idx, test_idx) in enumerate(sh.split(X,y,trial)):
clf_local = clone(clf)
clf_local.fit(X[train_idx,:],y[train_idx])
predictions = get_predictions_or_proba(clf_local, X[test_idx], 'predict' )
true = y[test_idx]
results = results.append(pd.DataFrame({'shuffle':i, 'predictions': predictions,
'trial':trial[test_idx], 'true':true} ) )
###Output
_____no_output_____
###Markdown
Logistic Regression - classificar se é inicio ou fim.- testar com as trials intermediárias
###Code
from ipywidgets import interact, HBox, interactive_output, interact_manual
import ipywidgets.widgets as wdg
from IPython.display import display
from scipy.stats import pearsonr
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
%load_ext autoreload
%autoreload 2
d = io.load('DRRD 8', 'wide_smoothed')
s = select(d, _min_duration=1.5, is_tired=False, is_selected=True)
init = select(s, takefrom='init', maxlen=100)
end = select(s, takefrom='end', maxlen=100)
Xi, yi, tri = to_feature_array(init, True)
Xe, ye, tre = to_feature_array(end, True)
clf = LogisticRegression()
srat = wdg.Dropdown(options=['DRRD %d'%i for i in [7,8,9,10]])
sdset = wdg.Dropdown(options=['narrow_smoothed', 'narrow_smoothed_norm','narrow_smoothed_viz', 'wide_smoothed'])
snsplits = wdg.IntSlider(min=2,max=10)
d = HBox([srat,sdset,snsplits])
def choose_rat(label, n_splits, dset):
unselected = io.load(label, dset).reset_index()
minTrial=unselected.trial.min()
maxTrial=unselected.trial.max()
# TODO change selection to inside, and add selection 'range'
sh = GroupShuffleSplit(n_splits=n_splits, train_size=.8,test_size=.2)
@interact_manual(logC = wdg.FloatSlider(min=-6, max=8,step=.25, continuous_update=False),
penalty = wdg.Dropdown(options=['l1','l2']),
trials = wdg.IntRangeSlider(min=minTrial, max=maxTrial, value=(minTrial, maxTrial), continuous_update=False),
duration = wdg.FloatRangeSlider(min=.5,max=10, value=(1.5,10), continuous_update=False),
intercept=True)
def choose_regularization(logC, penalty,intercept, trials, duration):
data = select(unselected, is_selected=True, _min_duration=duration[0], _max_duration=duration[1], _mineq_trial=trials[0], _maxeq_trial=trials[1]).set_index(['trial', 'unit'])
Xab, y, trial = xyt(to_feature_array(data, False, 'full'))#baseline_correct(
X = ss.fit_transform(Xab)#add_products
toi = np.logical_and(y>200, y<800)
X = X[toi]; y=y[toi]; trial = trial[toi]
# Create classifier
clf = LogisticRegression(C=10**logC, penalty=penalty, fit_intercept=intercept)
# Data holder for results
allres = pd.DataFrame()
fig, ax = plt.subplots(2, 2, figsize=(20,12));
#disp =display(fig,display_id='fig')
res, weights_full = shuffle_val_predict(clf, df, cv=sh, get_weights = True)
#allres = allres.append(res.groupby('true').mean().drop(['cv', 'group'],axis=1)).groupby('true').mean()
train = res.groupby('set').get_group('train')
test = res.groupby('set').get_group('test')
train_proba = train.groupby('true').mean().drop(['cv', 'group','predictions', 'mean'],axis=1)
test_proba = test.groupby('true').mean().drop(['cv', 'group','predictions', 'mean'],axis=1)
# Plot train results
axt = plt.subplot(3,4,1); sns.heatmap(train_proba, ax=axt, cbar=False); plt.title('Train probabilities')
axt = plt.subplot(3,4,2); sns.heatmap(confusion_matrix(train.true, train.predictions), ax=axt, cbar=False); plt.title('Train predictions')
# Plot test results
axt = plt.subplot(3,4,5); sns.heatmap(test_proba, ax=axt, cbar=False); plt.title('Test probabilities')
axt = plt.subplot(3,4,6); sns.heatmap(confusion_matrix(test.true, test.predictions), ax=axt, cbar=False); plt.title('Test predictions')
c = sns.palettes.color_palette('viridis', X.shape[1])
weights = weights_full.applymap(abs).groupby('unit').mean().reset_index().value
w = (weights.sort_values()/weights.max()).values **2
rank = weights.argsort().values
activity = pd.DataFrame(X[:,rank], index=y).reset_index()
mean = activity.groupby('index').mean()
std = activity.groupby('index').std()/np.sqrt(np.unique(trial).shape[0])
# Weights barplot
axt = plt.subplot(3,4,3)
sns.barplot('unit','value', data=weights_full.abs(), ax=axt, palette=np.array(c)[np.argsort(rank)]); plt.title('(abs) Weight of each neuron')
# Mean activity with weight transparency
axt = plt.subplot(3,4,4)
for i, line in mean.transpose().iterrows():
plt.plot(line, alpha=w[i], linewidth=4*(i**1.1)/X.shape[1], color=c[i])
plt.fill_between(np.unique(y), line+std[i], line-std[i], alpha=w[i]/2, color=c[i])
plt.title('Mean activity of neurons')
train_r = pearsonr(train.true, train['mean'])[0]
test_r = pearsonr(test.true, test['mean'])[0]
rs = pd.DataFrame([train_r, test_r],index=['Train','Test'], columns =['r']).reset_index()
sns.barplot('r','index',data=rs,ax=plt.subplot(6,4,11)); plt.xlim([0,0.5]); plt.title('Score using expected value')
train_r = pearsonr(train.true, train.predictions)[0]
test_r = pearsonr(test.true, test.predictions)[0]
rs = pd.DataFrame([train_r, test_r],index=['Train','Test'], columns =['r']).reset_index()
sns.barplot('r','index',data=rs,ax=plt.subplot(6,4,15)); plt.xlim([0,0.5]); plt.title('Score using maximum likelihood')
# Weights x Time (ovr)
plt.subplot(3,4,8)
mean_weight = (weights_full.groupby(['unit','time']).agg(np.mean)).value.reset_index().pivot(columns='time', index='unit').iloc[rank].reset_index(drop=True)
var_weight = (weights_full.groupby(['unit','time']).agg(np.std)/np.sqrt(weights_full.shuffle.unique().shape[0])).value.reset_index().pivot(columns='time', index='unit').iloc[rank].reset_index(drop=True)
for i in range(weights_full.unit.unique().shape[0]):
line = mean_weight.iloc[i,:].values
std = var_weight.iloc[i,:].values
plt.plot(np.unique(y),line, alpha=w[i], linewidth=4*(i**1.1)/X.shape[1], color=c[i])
plt.fill_between(np.unique(y), line+std, line-std, alpha=w[i]/2, color=c[i])
plt.title('Mean weight for each time')
# Trials being used
def trial_matrix(df):
A = np.zeros((2000,2))
A[df.groupby('set').get_group('train').group.unique(),0] = -1
A[df.groupby('set').get_group('test').group.unique(),1] = 1
return A.transpose()
trial_sets = pd.DataFrame(res.groupby(['cv']).apply(trial_matrix), columns=['Trials'])
each_set = pd.DataFrame.join(trial_sets.applymap(lambda x: (x==1).sum()), trial_sets.applymap(lambda x: (x==-1).sum()), lsuffix='_test', rsuffix='_train').reset_index().melt(id_vars='cv')
sns.barplot(x='value', y='variable', data=each_set, ax=plt.subplot(3,4,12))
#sns.heatmap(set1, ax=plt.subplot(3,4,12))
# TODO jointplot mean activity vs weight at each time, with one point per shuffle
#sns.jointplot()
plt.tight_layout()
out = interactive_output(choose_rat, dict(label=srat, n_splits = snsplits, dset = sdset))
display(d, out)
###Output
_____no_output_____
###Markdown
Ignoring time
###Code
data_ = select(io.load('DRRD 7', 'narrow_smoothed'), _min_duration=1.5, _max_duration=4.5, is_selected=True)
data = to_feature_array( select( data_, is_tired = False),
False, 'cropped')#.unstack(-1).reset_index()
dataraw = io.load('DRRD 7', 'narrow_smoothed')
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
label, dset = 'DRRD 8', 'wide_smoothed'
data = select( io.load(label, dset), _min_duration=1.5, _max_duration=4.,
is_selected=True, is_tired=False )
data = to_feature_array(data, False, 'cropped')
times = data.reset_index().time.unique()
crop = 4
to_use_times = times[crop:-crop]
N_SPLITS = 15
%time res = shuffle_val_predict( clf, data, n_splits=N_SPLITS )
from sklearn.metrics import confusion_matrix
res.score.mean()
sns.heatmap(res.proba.groupby('true_label').mean().drop('group',axis=1))
sns.heatmap(res.proba.groupby('true_label').mean().drop('group',axis=1))
crop_res = shuffle_val_predict(clf, select(data.reset_index(),
time_in_=to_use_times).set_index(['trial', 'time']), n_splits=N_SPLITS)
sns.heatmap(crop_res.proba.groupby('true_label').mean().drop('group',axis=1))
behav = io.load('DRRD 8', 'behavior')
behav.duration.reset_index().plot.scatter('trial', 'duration')
behav.duration.rolling(10).apply(changepoint).plot(color='r', linewidth=3)
from spikelearn.
###Output
_____no_output_____ |
jupyterbook/content-de/python/lab/ex09-modules.ipynb | ###Markdown
Exercise 9 - ModulesSo far, almost everything we have used or encountered has been a core element of the Python language. By now, we know enough to be able to accomplish almost any computational task. However, writing all the code from scratch can be a time-consuming affair. One of the great attractions of Python as a programming language for scientific development is the ready availability of a large number of 'modules' - ready-to-use collections of functions - which provide good-quality code for most common tasks.To use code from a module, you need to import it. This is conventionally done at the start of your Python file, although in principle it can be done at any point before you need to use functions from the module. There are several variant forms of import statement, but the simplest is```pythonimport ```where `` is the name of the module you wish to use. Once you have done this, you can access functions within the module by typing `.()`. For example, the `datetime` module provides a range of functions to work with date and/or time information. To calculate the amount of time elapsed between 10am on 24 September 2018 (when this course started) and its conclusion at 4pm on 5 October, we can do the following:```pythonimport datetimea = datetime.datetime(2018, 9, 24, 10, 0, 0) Year/Month/Day/Hour/Min/Secb = datetime.datetime(2018, 10, 5, 16, 0, 0)print(b - a)```Here, we are using a `datetime()` function within the `datetime` module.
###Code
# Try it here!
###Output
_____no_output_____
###Markdown
Sometimes, modules have long names and we expect to make heavy use of the module's functions. In this case, we might find having to preface each function name with the full module name rather tedious. Python therefore allows us to assign a shorthand name to any module when we import it, by adding `as ` after the `import` statement. For example, in a later exercise we will use the module `matplotlib.animation` to generate animations. We will import this as follows:```pythonimport matplotlib.animation as anima = anim.FuncAnimation(...)```If we did not use `as anim` when we imported the module, we would have to type `a = matplotlib.animation.FuncAnimation(...)` - which gets tedious rather quickly.Sometimes, we know we only wish to use one or two functions from a module. In this case, we may prefer not to import the entire module, but instead use```pythonfrom import , ```This makes `` and `` available in our program, and we do not need to preface them by any module name. For example, the `math` module provides a number of functions for mathematical operations. If we just want to be able to compute exponentials and sines, we could do```pythonfrom math import exp, sina = exp(3) * sin(1.6)print(a)```but we would not have access to anything else within the `math` library.
###Code
# Try it here!
###Output
_____no_output_____
###Markdown
The reason Python requires you to preface function calls with the module name, or import specific functions, is to avoid 'namespace clashes', where two functions with the same name are loaded. For example, many modules might provide a function called `save`, which does something appropriate to that module's purpose. By encouraging you to specify `.save()`, or specifically import a `save` routine from one particular module, Python tries to avoid the bugs and confusion that could otherwise occur.However, if you really want to expose all the functions in a module, Python allows you to use an import statement of the form```pythonfrom import *```This works in exactly the same way as `from import `, except that it uses the 'wild-card' character `*` to indicate that *everything* in the module should be made available. A few modules are intended to be used in this way, but in general, it is best avoided unless you have a reason to need it. Getting to grips with `datetime`An important skill is to be able to work out what a particular module does, and how to use it, without being taught about it. **&10148; Write a program that asks the user to enter their birth date, and then calculates how long it will be until their next birthday.**You will need to use the [official documentation](https://docs.python.org/3.6/library/datetime.html) for the `datetime` module, as well as [other resources](http://www.google.com.au). You will find the `datetime.datetime.strptime()` and `datetime.datetime.now()` functions useful.
###Code
# Try it here!
###Output
_____no_output_____
###Markdown
NumPyAn important module (or, really, collection of modules) for scientists is NumPy ('Numerical Python'). This provides a wide range of tools and data structures for working with numerical data, and for implementing matrix-vector calculations.It is conventional to use `import numpy as np` when importing NumPy. NumPy then provides a fairly comprehensive set of mathematical functions, including- `np.sin()`, `np.cos()`, `np.tan()` - Trigonometric functions- `np.arcsin()`, `np.arccos()`, `np.arctan()` - Inverse trigonometric functions- `np.arctan2()` - [Two-argument version of the inverse tangent function](https://en.wikipedia.org/wiki/Atan2) that returns value in the correct quadrant- `np.sinh()`, `np.cosh()`, `np.tanh()`, `np.arcsinh()`, `np.arccosh()`, `np.arctanh()` - Hyperbolic functions and their inverses- `np.exp()`, `np.log()` - Exponentiation and its inverse, the natural logarithm- `np.log10()` - Logarithm to base-10NumPy also provides some mathematical constants, including `np.pi` and `np.e`.
###Code
# Try it here!
###Output
_____no_output_____
###Markdown
However, the core feature of NumPy is the array data type. This allows us to create structured grids containing data, which can then be accessed, transformed and used efficiently. Where numerical data is to be stored or processed, a NumPy array is likely to be the most effective mechanism to use. There are two main ways to create an array. First, we can use the `np.array()` function, to build an array from a list (or similar):```pythona = np.array([1, 2, 3])b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])print(a)print(b)```Second, we can use various functions that create 'standard' arrays:- `np.ones(dims)` - Array filled with elements equal to `1`- `np.zeros(dims)` - Array filled with elements equal to `0`- `np.eye(N)` - $N \times N$ identity matrix (ones on diagonal, zero otherwise)- `np.arange(start, stop, step)` - Create ascending sequence of numbers from `start`, in intervals of `step`, finishing before `stop`. If only two (unlabelled) arguments are given, it is assumed that `step=1`. If only one argument is given, it is additionally assumed that `start = 0`.- `np.linspace(start, stop, N)` - Create an array containing $N$ equally-spaced points, with the first one being at `start`, and the last being at `stop`.Here, `dims` is a list or tuple specifying the number of elements in each dimension of the array: for example,`np.ones([3])` creates a one-dimensional array, identical to `np.array([1, 1, 1])`, whereas `np.zeros([3, 6, 2, 2, 3])` creates a five-dimensional array filled with zeros.Many of the array-creation routines take an optional argument, `dtype=type`, where `type` is a string. This specifies the data type which can be stored in the array. For example, `np.ones([3, 3], dtype='int')` creates an array of integer type, while `np.zeros([3, 3], dtype='bool')` creates an array of Boolean (True/False) values, initialised to `False`.**&10148; Try each of these ways of building an array, and make sure you understand how they work.**
###Code
# Try it here!
###Output
_____no_output_____
###Markdown
Array elements can be accessed using `[...]` and a (zero-based) index for each dimension, or `:` to denote all elements within that dimension: we can thus obtain arrays that are a subset of a larger array.```pythona = np.array([3, 4, 5])print(a[2]) Prints 5b = np.zeros([3, 4, 4, 2, 4])print(b[:, 3, 3, :, 0]) Prints a 3 x 2 matrix of zerosb[:, 3, 3, :, 0] = np.ones([3, 2])print(b[:, :, 3, 0, 0]) Prints 3 x 4 matrix with column of zeros```As we have already seen with lists, it is possible to specify a limited range of any index by using syntax of the form `start:stop:step`.
###Code
# Try it here!
###Output
_____no_output_____
###Markdown
NumPy also provides tools for linear algebra - for example, if `a` and `b` are 1-D arrays that represent vectors, we can compute the dot product using `a.dot(b)`. We will not discuss linear algebra here; for more details, there are many resources [such as this one](http://people.duke.edu/~ccc14/pcfb/numpympl/LinearAlgebra.html) online.One very useful feature of NumPy is that it has various functions to help read and write data from files without the hassle of doing it yourself. If you have a plain text file that contains a dataset as columns, you can call `np.loadtxt(filename)`. This will automatically open the file, read it, and return its contents in a NumPy array. If your file has a 'header' containing information in a different format from the main data table, you may need to tell NumPy to ignore some rows: this is done by passing the argument `skiprows = ` to `np.loadtxt()`. Similarly, there is an `np.savetxt()` function which will write the contents of an array to a plain text file.**&10148; Use `np.loadtxt()` and `np.savetxt()` to repeat Exercise 8.** Notice how much easier it is!
###Code
# Try it here!
###Output
_____no_output_____
###Markdown
There are many other useful modules. Some key ones include:- `scipy` - a large collection of tools for accomplishing a wide range of mathematical and scientific tasks- `matplotlib` - Plotting figures- `stats` - statistical calculations- `pandas` - working with datasetsWe will meet some of these in later exercises. Creating new modulesYou can also create your own modules. This allows you to 'package up' functions that you use regularly, and re-use them in different programs. This is much better than routinely copying and pasting functions from one file to another, which usually ends up leading to confusion: one inevitably ends up with multiple versions of the function that all work in slightly different ways. By using a module, you guarantee that all programs that use it 'see' the same function. Note that Python's ability to have 'optional' function arguments, with default values (`def function(..., var1=default1, var2=default2...)`) is often very useful for adding enhancements to a function without breaking any existing programs.To create a module, you simply create a plain text file (using a text editor, or by selecting `New > Text File` within Jupyter's file browser window), and name it `.py`. Place whatever functions you wish to include within the module in this file, and save it. Then, you will be able to `import ` and access the functions, as with any other module.**&10148; Package your birthday calculator into a module, and test it.**
###Code
# Try it here!
###Output
_____no_output_____ |
DataScience/COVID_SIR/COVID_SIR_Model.ipynb | ###Markdown
BIO101 Epidemiology Assignment By Kartikey Sharma and Surya Shukla
###Code
# Importing libraries
import pandas as pd #dataframes
import math #mathematical functions
import scipy #integration
import numpy as np #arrays
import seaborn as sns #plotting
import matplotlib.pyplot as plt #plotting
%matplotlib inline
from sklearn import preprocessing #ml
from scipy import integrate, optimize #optimizing the process
import warnings
warnings.filterwarnings('ignore') #to ignore the cases where division by 0 occurs
# Importing data
# The Day 1 is starting from 07/03/2020 (7th March 2020) when the first Confirmed case was reported
d=pd.read_csv("C:/Users/uttam/anaconda3/BIOProj/GithubData.csv")
d.tail(10)
d.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 105 entries, 0 to 104
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Province_State 105 non-null object
1 Country_Region 105 non-null object
2 Days Passed 105 non-null int64
3 Infected 105 non-null int64
4 Deaths 105 non-null int64
5 Recovered 105 non-null int64
6 Active 70 non-null float64
7 Fatal per Confirmed 105 non-null float64
8 Recovered per Confirmed 105 non-null float64
9 Fatal per(Fatal or recovered) 105 non-null float64
10 Growth Rate 105 non-null float64
11 Daily Confirmed 105 non-null int64
12 Population 105 non-null int64
13 Susceptible 105 non-null int64
dtypes: float64(5), int64(7), object(2)
memory usage: 11.6+ KB
###Markdown
There is missing data(35)in the 'Active' column
###Code
d.corr()
###Output
_____no_output_____
###Markdown
The correlation between Confirmed and Days Passed,Death and Days Passed,Recovered and Days Passed,Confirmed and Deaths, Confirmed and Recovered,Recovered and Deaths is VERY HIGH. This implies that they are heavily correlated. Exploratory Data Analysis Visualising Data with respect to Days Passed
###Code
# confirmed cases
plt.figure(figsize=(20,10))
plt.title("Time vs Infected cases",fontsize=20)
sns.barplot(data=d, y="Infected",x='Days Passed',palette='gnuplot')
plt.show()
# deceased cases
plt.figure(figsize=(20,10))
plt.title("Time vs Deceased cases",fontsize=20)
sns.barplot(data=d, y="Deaths",x='Days Passed',palette='gnuplot')
plt.show()
#recovered cases
plt.figure(figsize=(20,10))
plt.title("Time vs Recovered cases",fontsize=20)
sns.barplot(data=d, y="Recovered",x='Days Passed',palette='gnuplot')
plt.show()
###Output
_____no_output_____
###Markdown
Visualising Together
###Code
#Plotting all three columns together
d[0:114].plot(x='Days Passed', y=["Infected","Recovered","Deaths"] ,figsize=(12,8), grid=False,title="Confirmed vs Recovered vs Deaths")
plt.show()
###Output
_____no_output_____
###Markdown
Clearly, Suuth Carolina's number of infected people's curve has not peaked.. and as the recovered curve has not crossed the confirmed curve, the situation is still an outbreak
###Code
# Plotting the rates of fatality and recovery
d[0:].plot(x='Days Passed', y=["Fatal per Confirmed","Recovered per Confirmed","Fatal per(Fatal or recovered)"] ,figsize=(12,8), grid=True,title="Rates")
plt.show()
###Output
_____no_output_____
###Markdown
Growth factorWhere C is the number of confirmed cases,Growth Factor =ΔC(n)/ΔC(n−1)
###Code
plt.figure(figsize=(15,10))
plt.title("Growth Factor with respect to Time")
sns.lineplot(data=d,y='Growth Rate',x='Days Passed')
plt.show()
###Output
_____no_output_____
###Markdown
We see that eventually,the growth rate is approaching 1, ie, earlier there was an outbreak of the coronavirus in South Carolina, but it stabilised with time.
###Code
# last 7 days
plt.figure(figsize=(10,8))
plt.title("Growth Rate with respect to Time")
sns.lineplot(data=d[98:],y='Growth Rate',x='Days Passed')
plt.show()
###Output
_____no_output_____
###Markdown
==============================EDA ENDS================================ SIR COVID Model There's a lot of information to be extracted from this data; for example, we haven't analyzed the effects of long/lat of countries. However, since our main purpose is to develop a predective model in order to understand the key factors that impact the COVID-19 transmission, we will use the SIR model.SIR is a simple model that considers a population that belongs to one of the following states:1. Susceptible (S). The individual hasn't contracted the disease, but she can be infected due to transmisison from infected people2. Infected (I). This person has contracted the disease3. Recovered/Deceased (R). The disease may lead to one of two destinies: either the person survives, hence developing inmunity to the disease, or the person is deceased.
###Code
davg= d.Deaths[103]/(d.Deaths[103]+d.Recovered[103])
print("D average is "+str(davg))
population= 5150000
# source : https://bit.ly/3er5bnx
###Output
_____no_output_____
###Markdown
Calculating Rho
###Code
# Defining lists of various parameters
s= list(d.Susceptible)
i= list(d.Infected)
r= list(d.Recovered + d.Deaths)
dates= list(d['Days Passed'])
def rhovalue(sus, inf, s0):
return (inf + sus - population) / math.log( sus / s0)
rhovalues = [rhovalue(sus, inf, s[0]) for sus, inf in zip(s[1:], i[1:])]
rho= sum(rhovalues) / len(rhovalues)
rho= round(rho,4)
print("Optimal values of rho is "+str(rho))
###Output
Optimal values of rho is 1577169.2522
###Markdown
Calculating Alpha and Phi
###Code
# Calculating alpha and phi according to the formulaes of Google Classroom lectures
alpha = math.sqrt((s[0]/rho - 1) ** 2 + (2*s[0]*(population - s[0])) / (rho ** 2))
phi = np.arctanh([(s[0]/rho - 1)/alpha])[0]
print("Value of Alpha is : "+str(round(alpha,4)))
print("Value of Phi is : "+str(round(phi,4)))
###Output
Value of Alpha is : 2.2653
Value of Phi is : 7.7082
###Markdown
Calculating Beta and Gamma
###Code
#function to generate gamma
def gamma(r, t):
x = (((r*s[0]) / (rho*rho) - s[0]/rho + 1)) / alpha
tanhi = np.arctanh([x])[0]
return ((phi + tanhi) * 2) / (alpha * t)
gamma_values=[]
for index,row in d.iterrows():
#print(row['Recovered'])
gval= gamma(row['Recovered'], index+1)
gamma_values.append(gval)
gammaavg = sum(gamma_values) / len(gamma_values)
betaavg = gammaavg / rho
print("Optimal value of Beta is : " +str(betaavg))
print("Optimal value of Gamma is : " +str(gammaavg))
###Output
Optimal value of Beta is : 4.6196861176529646e-08
Optimal value of Gamma is : 0.07286026899577447
###Markdown
Equations governing SIR
###Code
# Defining the underlying governing equations
def sir(y, t, r, a):
S, I, R = y
ds = -r * S * I
dr = a * I
di = r * S * I - a * I
return [ds, di, dr]
###Output
_____no_output_____
###Markdown
Applied for South Carolina using optimal parameters
###Code
# Values for South Carolina
N = population
i0 = 1 # Initial infected
r0 = 0.0
s0 = (N - i0) # Initial Susceptible
t = np.linspace(0, 200, 1000)
#Solve a system of ordinary differential equations using lsoda from the FORTRAN library odepack.
#Solves the initial value problem for stiff or non-stiff systems of first order ode-s:
solution = scipy.integrate.odeint(sir, [s0, i0, r0], t, args = (betaavg, gammaavg))
###Output
_____no_output_____
###Markdown
Calculating other important variables
###Code
def find_ro():
return (s0 * betaavg) / gammaavg
def max_infected(infected):
yval = infected[0].get_ydata()
xval = infected[0].get_xdata()
maxinfected = max(yval)
return maxinfected
def recovered_at_end(recovered):
xval = recovered[0].get_xdata()
yval = recovered[0].get_ydata()
return yval[-1]
def covid_duration(infected):
yval = infected[0].get_ydata()
xval = infected[0].get_xdata()
for i in range(len(yval)):
if (yval[i] == yval[i-1]):
break
return xval[i-1]
###Output
_____no_output_____
###Markdown
Plotting the graph
###Code
# Plotting the graph
plt.figure(figsize = [12, 6])
plt.grid()
plt.xlabel("Days Passed")
plt.ylabel("Population")
plt.title("SIR Model for South Carolina, USA",fontsize=15)
susceptibles = plt.plot(t, solution[:, 0], label = "S(t)")
infectives = plt.plot(t, solution[:, 1], label = "I(t)")
recovered = plt.plot(t, solution[:, 2], label = "R(t)")
sns.lineplot(y=5150000,x=t,label='Total')
plt.show()
print("Duration of the epidemic : " + str(int(covid_duration(infectives))) + " days")
print("Maximum number of infectives : " + str(int(max_infected(infectives)))+" people")
print("Individuals Recovered : " + str(int(round(recovered_at_end(recovered))))+" people")
print("Basic rate of Reproduction : " + str(round(find_ro(), 5)))
###Output
Duration of the epidemic : 349 days
Maximum number of infectives : 1706449 people
Individuals Recovered : 4922897 people
Basic rate of Reproduction : 3.26534
###Markdown
Logistic Regression to Fit Confirmed Cases and See the Curve for Total Infected
###Code
from scipy.optimize import curve_fit
x_data = range(len(d.index))
y_data = d['Infected']
def log_curve(x, k, x_0, ymax):
return ymax / (1 + np.exp(-k*(x-x_0)))
# Fit the curve
popt, pcov = curve_fit(log_curve, x_data, y_data, bounds=([0,0,0],np.inf), maxfev=50000)
estimated_k, estimated_x_0, ymax= popt
# Plot the fitted curve
k = estimated_k
x_0 = estimated_x_0
y_fitted = log_curve(range(0,365), k, x_0, ymax)
print("Days after which infected curve hits inflection point is : "+str(round(x_0,1)))
print("Maximum number of infected people are : "+str(int(ymax)*10))
plt.figure(figsize=(10,8))
plt.title("Logistic Regression Curve Fit for Total Infected Cases",fontsize=15)
plt.plot(range(0,365), y_fitted, '-.', label="Fitted Curve")
plt.plot(x_data, y_data,'o' ,label="Confirmed Data")
###Output
Days after which infected curve hits inflection point is : 164.4
Maximum number of infected people are : 1650150
###Markdown
Logistic regression fits the data well. Hence, we can predict that =========================END OF NOTEBOOK==============================
###Code
# Coded by : Kartikey Sharma
# Jack of all trades, Master of some.
# Veni. Vidi. Vici.
###Output
_____no_output_____ |
notebooks/umap_example.ipynb | ###Markdown
Penguins Dataset
###Code
sns.set(style='white', context='notebook', rc={'figure.figsize':(14,10)})
penguins = pd.read_csv("https://github.com/allisonhorst/palmerpenguins/raw/5b5891f01b52ae26ad8cb9755ec93672f49328a8/data/penguins_size.csv")
penguins.head()
penguins = penguins.dropna()
penguins.species_short.value_counts()
sns.pairplot(penguins, hue='species_short')
reducer = umap.UMAP()
penguin_data = penguins[
[
"culmen_length_mm",
"culmen_depth_mm",
"flipper_length_mm",
"body_mass_g",
]
].values
scaled_penguin_data = StandardScaler().fit_transform(penguin_data)
embedding = reducer.fit_transform(scaled_penguin_data)
embedding.shape
plt.scatter(
embedding[:, 0],
embedding[:, 1],
c=[sns.color_palette()[x] for x in penguins.species_short.map({"Adelie":0, "Chinstrap":1, "Gentoo":2})])
plt.gca().set_aspect('equal', 'datalim')
plt.title('UMAP projection of the Penguin dataset', fontsize=24)
###Output
_____no_output_____ |
_notebooks/2022-01-17-movierec.ipynb | ###Markdown
Movie Recommender System Collaborative filtering (matrix factorization)You are an online retailer/travel agent/movie review website, and you would like to help the visitors of your website to explore more of your products/destinations/movies. You got data which either describe the different products/destinations/films, or past transactions/trips/views (or preferences) of your visitors (or both!). You decide to leverage that data to provide relevant and meaningful recommendations.This notebook implements a simple collaborative system using factorization of the user-item matrix.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
ratings="https://github.com/couturierc/tutorials/raw/master/recommender_system/data/ratings.csv"
movies="https://github.com/couturierc/tutorials/raw/master/recommender_system/data/movies.csv"
# If data stored locally
# ratings="./data/ratings.csv"
# movies="./data/movies.csv"
df_ratings = pd.read_csv(ratings, sep=',')
df_ratings.columns = ['userId', 'itemId', 'rating', 'timestamp']
df_movies = pd.read_csv(movies, sep=',')
df_movies.columns = ['itemId', 'title', 'genres']
df_movies.head()
df_ratings.head()
###Output
_____no_output_____
###Markdown
Quick explorationHints: use df.describe(), df.column_name.hist(), scatterplot matrix (sns.pairplot(df[column_range])), correlation matrix (sns.heatmap(df.corr()) ), check duplicates, ...
###Code
# Start your exploration -- use as many cells as you need !
###Output
_____no_output_____
###Markdown
Obtain the user-item matrice by pivoting df_ratings
###Code
##### FILL HERE (1 line) ######
df_user_item = NULL # Use df.pivot, rows ~ userId's, columns ~ itemId's
################################
# Sort index/rows (userId's) and columns (itemId's)
df_user_item.sort_index(axis=0, inplace=True)
df_user_item.sort_index(axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
This matrix has **many** missing values:
###Code
df_user_item.head()
df_user_item.describe()
###Output
_____no_output_____
###Markdown
For instance, rating for userId=1 for movies with itemId 1 to 10:
###Code
df_user_item.loc[1][:10]
# df_user_item.loc[1].dropna().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Save the movie ids for user 1 for later:
###Code
item_rated_user_1 = df_user_item.loc[1].dropna().index
item_rated_user_1
###Output
_____no_output_____
###Markdown
We want to find the matrix of rank $k$ which is closest to the original matrix. What not to do: Fill with 0's or mean values, then Singular Value Decomposition (SVD) (Adapted from https://github.com/beckernick/matrix_factorization_recommenders/blob/master/matrix_factorization_recommender.ipynb)Singular Value Decomposition decomposes a matrix $R$ into the best lower rank (i.e. smaller/simpler) approximation of the original matrix $R$. Mathematically, it decomposes R into a two unitary matrices and a diagonal matrix:$$\begin{equation}R = U\Sigma V^{T}\end{equation}$$where: - R is users's ratings matrix, - $U$ is the user "features" matrix, it represents how much users "like" each feature,- $\Sigma$ is the diagonal matrix of singular values (essentially weights), - $V^{T}$ is the movie "features" matrix, it represents how relevant each feature is to each movie,with $U$ and $V^{T}$ orthogonal.
###Code
df_user_item = df_user_item.fillna(0)
df_user_item.head()
R = df_user_item.values
R
###Output
_____no_output_____
###Markdown
Apply SVD to R (e.g. using NumPy or SciPy)
###Code
from scipy.sparse.linalg import svds
U, sigma, Vt = svds(R, k = 50)
###Output
_____no_output_____
###Markdown
What do $U$, $\Sigma$, $V^T$ look like?
###Code
U
sigma
Vt
###Output
_____no_output_____
###Markdown
Get recommendations:
###Code
# First make sigma a diagonal matrix:
sigma = np.diag(sigma)
R_after_svd = np.dot(np.dot(U, sigma), Vt)
R_after_svd
###Output
_____no_output_____
###Markdown
Drawbacks of this approach: - the missing values (here filled with 0's) is feedback that the user did not give, we should not cannot consider it negative/null rating.- the dense matrix is huge, applying SVD is not scalable. Approximate SVD with stochastic gradient descend (SGD)This time, we do **not** fill missing values. We inject $\Sigma$ into U and V, and try to find P and q such that $\widehat{R} = P Q^{T}$ is close to $R$ **for the item-user pairs already rated**. A first function to simplify the entries (userId/itemId) : we map the set of
###Code
def encode_ids(data):
'''Takes a rating dataframe and return:
- a simplified rating dataframe with ids in range(nb unique id) for users and movies
- 2 mapping disctionaries
'''
data_encoded = data.copy()
users = pd.DataFrame(data_encoded.userId.unique(),columns=['userId']) # df of all unique users
dict_users = users.to_dict()
inv_dict_users = {v: k for k, v in dict_users['userId'].items()}
items = pd.DataFrame(data_encoded.itemId.unique(),columns=['itemId']) # df of all unique items
dict_items = items.to_dict()
inv_dict_items = {v: k for k, v in dict_items['itemId'].items()}
data_encoded.userId = data_encoded.userId.map(inv_dict_users)
data_encoded.itemId = data_encoded.itemId.map(inv_dict_items)
return data_encoded, dict_users, dict_items
###Output
_____no_output_____
###Markdown
Here is the procedure we would like to implement in the function SGD():1. itinialize P and Q to random values2. for $n_{epochs}$ passes on the data: * for all known ratings $r_{ui}$ * compute the error between the predicted rating $p_u \cdot q_i$ and the known ratings $r_{ui}$: $$ err = r_{ui} - p_u \cdot q_i $$ * update $p_u$ and $q_i$ with the following rule: $$ p_u \leftarrow p_u + \alpha \cdot err \cdot q_i $$ $$ q_i \leftarrow q_i + \alpha \cdot err \cdot p_u$$
###Code
# Adapted from http://nicolas-hug.com/blog/matrix_facto_4
def SGD(data, # dataframe containing 1 user|item|rating per row
n_factors = 10, # number of factors
alpha = .01, # number of factors
n_epochs = 3, # number of iteration of the SGD procedure
):
'''Learn the vectors P and Q (ie all the weights p_u and q_i) with SGD.
'''
# Encoding userId's and itemId's in data
data, dict_users, dict_items = encode_ids(data)
##### FILL HERE (2 lines) ######
n_users = NULL # number of unique users
n_items = NULL # number of unique items
################################
# Randomly initialize the user and item factors.
p = np.random.normal(0, .1, (n_users, n_factors))
q = np.random.normal(0, .1, (n_items, n_factors))
# Optimization procedure
for epoch in range(n_epochs):
print ('epoch: ', epoch)
# Loop over the rows in data
for index in range(data.shape[0]):
row = data.iloc[[index]]
u = int(row.userId) # current userId = position in the p vector (thanks to the encoding)
i = int(row.itemId) # current itemId = position in the q vector
r_ui = float(row.rating) # rating associated to the couple (user u , item i)
##### FILL HERE (1 line) ######
err = NULL # difference between the predicted rating (p_u . q_i) and the known ratings r_ui
################################
# Update vectors p_u and q_i
##### FILL HERE (2 lines) ######
p[u] = NULL # cf. update rule above
q[i] = NULL
################################
return p, q
def estimate(u, i, p, q):
'''Estimate rating of user u for item i.'''
##### FILL HERE (1 line) ######
return NULL #scalar product of p[u] and q[i] /!\ dimensions
################################
p, q = SGD(df_ratings)
###Output
_____no_output_____
###Markdown
Get the estimate for all user-item pairs: Get the user-item matrix filled with predicted ratings:
###Code
df_user_item_filled = pd.DataFrame(np.dot(p, q.transpose()))
df_user_item_filled.head()
###Output
_____no_output_____
###Markdown
However, it is using the encode ids ; we need to retrieve the association of encoded ids to original ids, and apply it:
###Code
df_ratings_encoded, dict_users, dict_items = encode_ids(df_ratings)
df_user_item_filled.rename(columns=(dict_items['itemId']), inplace=True)
df_user_item_filled.rename(index=(dict_users['userId']), inplace=True)
# Sort index/rows (userId's) and columns (itemId's)
df_user_item_filled.sort_index(axis=0, inplace=True)
df_user_item_filled.sort_index(axis=1, inplace=True)
df_user_item_filled.head()
###Output
_____no_output_____
###Markdown
Originally available ratings for user 1:
###Code
df_user_item.loc[1][:10]
###Output
_____no_output_____
###Markdown
Estimated ratings after the approximate SVD:
###Code
df_user_item_filled.loc[1][:10]
###Output
_____no_output_____
###Markdown
Give recommendation to a userFor instance 10 recommended movies for user 1
###Code
recommendations = list((df_user_item_filled.loc[10]).sort_values(ascending=False)[:10].index)
recommendations
df_movies[df_movies.itemId.isin(recommendations)]
###Output
_____no_output_____
###Markdown
vs the ones that were rated initially:
###Code
already_rated = list((df_user_item.loc[10]).sort_values(ascending=False)[:10].index)
already_rated
df_movies[df_movies.itemId.isin(already_rated)]
###Output
_____no_output_____
###Markdown
This is all the movies in descending order of predicted rating. Let's remove the ones that where alread rated. ---To put this into production, you'd first separate data into a training and validation set and optimize the number of latent factors (n_factors) by minimizing the Root Mean Square Error. It is easier to use a framework that allows to do this, do cross-validation, grid search, etc. Gradient Descent SVD using Surprise
###Code
!pip install surprise
#!pip install scikit-surprise # if the first line does not work
# from surprise import Reader, Dataset, SVD, evaluate
# Following Surprise documentation examples
# https://surprise.readthedocs.io/en/stable/getting_started.html
from surprise import Reader, Dataset, SVD, evaluate, NormalPredictor
from surprise.model_selection import cross_validate
from collections import defaultdict
# As we're loading a custom dataset, we need to define a reader.
reader = Reader(rating_scale=(0.5, 5))
# The columns must correspond to user id, item id and ratings (in that order).
data = Dataset.load_from_df(df_ratings[['userId', 'itemId', 'rating']], reader)
# We'll use the famous SVD algorithm.
algo = SVD()
# Run 5-fold cross-validation and print results
cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, verbose=True)
###Output
_____no_output_____
###Markdown
Tune algorithm parameters with GridSearchCV
###Code
from surprise.model_selection import GridSearchCV
param_grid = {'n_epochs': [5, 10], 'lr_all': [0.002, 0.005],
'reg_all': [0.4, 0.6]}
gs = GridSearchCV(SVD, param_grid, measures=['rmse', 'mae'], cv=3)
gs.fit(data)
# best RMSE score
print(gs.best_score['rmse'])
# combination of parameters that gave the best RMSE score
print(gs.best_params['rmse'])
# We can now use the algorithm that yields the best rmse:
algo = gs.best_estimator['rmse']
trainset = data.build_full_trainset()
algo.fit(trainset)
algo.predict(621,1)
df_data = data.df
df_data = df_data.join(df_movies,how="left", on='itemId',rsuffix='_', lsuffix='')
df_data[df_data['userId']==1].sort_values(by = 'rating',ascending=False)[:10]
# From Surprise documentation: https://surprise.readthedocs.io/en/stable/FAQ.html
def get_top_n(predictions, n=10):
'''Return the top-N recommendation for each user from a set of predictions.
Args:
predictions(list of Prediction objects): The list of predictions, as
returned by the test method of an algorithm.
n(int): The number of recommendation to output for each user. Default
is 10.
Returns:
A dict where keys are user (raw) ids and values are lists of tuples:
[(raw item id, rating estimation), ...] of size n.
'''
# First map the predictions to each user.
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
# Predict ratings for all pairs (u, i) that are NOT in the training set.
testset = trainset.build_anti_testset()
predictions = algo.test(testset)
top_n = get_top_n(predictions, n=10)
top_n.items()
# Print the recommended items for all user 1
for uid, user_ratings in top_n.items():
print(uid, [iid for (iid, _) in user_ratings])
if uid == 1:
break
df_movies[df_movies.itemId.isin([318, 750, 1204, 858, 904, 48516, 1221, 912, 1276, 4973])]
###Output
_____no_output_____ |
4-assets/BOOKS/Jupyter-Notebooks/Overflow/27_SimpsonsRule.ipynb | ###Markdown
Physics 256 Simpson's Rule Last Time- Solving for the magnetic field- Approximating integrals - Rectangular rule - Trapezoidal rule Today- Simpson's rule- Improper and divergent integrals Setting up the Notebook
###Code
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
plt.style.use('notebook');
%config InlineBackend.figure_format = 'retina'
colors = ["#2078B5", "#FF7F0F", "#2CA12C", "#D72827", "#9467BE", "#8C574B",
"#E478C2", "#808080", "#BCBE20", "#17BED0", "#AEC8E9", "#FFBC79",
"#98E08B", "#FF9896", "#C6B1D6", "#C59D94", "#F8B7D3", "#C8C8C8",
"#DCDC8E", "#9EDAE6"]
###Output
_____no_output_____
###Markdown
Better QuadratureWe want to come up with a better scaling approximation to the definite integral:\begin{equation}I = \int_a^b f(x) dx\end{equation}where we break up the region of integration into $N$ equally sized regions of size:\begin{equation}\Delta x = \frac{b-a}{N}.\end{equation}Our previous methods approximated the integral of a single panel $I_i = \int_{x_i}^{x_{i+1}} f(x) dx$ using a $0^{th}$ or $1^{st}$ order polynomial. Now, let's use a $2^{nd}$ order polynomial:\begin{equation}P(x) = \alpha + \beta x + \gamma x^2\end{equation}where we need to fix the coefficients $\alpha,\beta,\gamma$ by matching:\begin{equation}\int_{x_i}^{x+2} P(x) dx \approx \int_{x_i}^{x_{i+2}} f(x) dx .\end{equation}The final answer is: \begin{equation}I_i + I_{i+1} \approx \frac{\Delta x}{3} \left[f(x_i) + 4 f(x_{i+1}) + f(x_{i+2}) \right]\end{equation}which needs to be summed over all panels:\begin{equation}I_{\rm simps} \approx \frac{\Delta x}{3} \left[f(a) + f(b) + 4\sum_{i=1}^{N/2} f(x_{2i-1}) + 2\sum_{i=1}^{N/2-1} f(x_{2i}) \right]\end{equation}Note: we need an even number of panels for this to work.Let's code it up!
###Code
def simpsons_rule(f,x,*params):
'''The trapezoidal rule for numerical integration of f(x) over x.'''
a,b = x[0],x[-1]
Δx = x[1] - x[0]
N = x.size
#I = (f(a,*params) + f(b,*params))/3.0
#I += (4.0/3.0)*np.sum([f(a + j*Δx,*params) for j in range(1,N,2)])
#I += (2.0/3.0)*np.sum([f(a + j*Δx,*params) for j in range(2,N,2)])
I = (f(a,*params) + f(b,*params))/3.0
I += (4.0/3.0)*sum([f(a+i*Δx,*params) for i in range(1,N,2)])
I += (2.0/3.0)*sum([f(a+i*Δx,*params) for i in range(2,N,2)])
return Δx*I
###Output
_____no_output_____
###Markdown
Programming challenge Use Simpson's rule to evaluate the error function in the range $x\in [0,1]$. Make sure to keep the panel width $\Delta x$ fixed for each value of $x$. Compare with the exact result from scipy.special.\begin{equation}\mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_0^x \mathrm{e}^{-t^2} dt \end{equation}
###Code
from scipy.constants import pi as π
from scipy.special import erf
def erf_kernel(t):
'''The error function kernel.'''
return (2.0/np.sqrt(π))*np.exp(-t*t)
Δx = 0.001
x = np.linspace(0,1,20)
erf_approx = np.zeros_like(x)
for j,cx in enumerate(x[1:]):
N = int(cx/Δx)
if N % 2: N += 1
x_int = np.linspace(0,cx,N)
erf_approx[j+1] = simpsons_rule(erf_kernel,x_int)
# plot the results and compare with the 'exact' value
plt.plot(x,erf_approx,'o', mec=colors[0], mfc=colors[0], mew=1, ms=8, label="Simpson's Rule")
plt.plot(x,erf(x), color=colors[1],zorder=0, label='scipy.special.erf')
plt.legend(loc='lower right')
plt.xlabel('x')
plt.ylabel('erf(x)')
###Output
_____no_output_____
###Markdown
Improper Integrals Infinite IntervalsSuppose we want to evaluate a definite integral on a semi-infinte interval: $[a,\infty]$ or $[-\infty,b]$. We can proceed by finding a function $\phi$ which maps the semi-infinite region to a finite one.\begin{equation}I = \int_a^b f(x) dx\end{equation}with $a=-\infty$ or $b = \infty$ but not both. (i) $a\ne 0 \text{ and } b \ne 0$Consider $y = \phi(x) = \frac{1}{x}$, then:\begin{align}I &= \int_a^b f(x) dx \newline&= \int_{1/a}^{1/b} dy \left(-\frac{1}{y^2}\right) f\left(\frac{1}{y}\right) \newline &= \int_{1/b}^{1/a} \frac{dy}{y^2} f\left(\frac{1}{y}\right) .\end{align} (ii) $a = 0 \text{ and } b = \infty$Consider $y = \phi(x) = \frac{x}{1+x}$, then:\begin{align}y (1+x) &= x \newlinex(1-y) &= y \newlinex &= \phi^{-1}(y) = \frac{y}{1-y}\end{align}so:\begin{equation}dx = dy \left[\frac{1}{1-y} + \frac{y}{(1-y)^2} \right] \Rightarrow dx = \frac{dy}{(1-y)^2}.\end{equation}Finally\begin{align}I &= \int_0^\infty f(x) dx \newline&= \int_{0}^{1} dy\frac{dy}{(1-y)^2} f\left(\frac{y}{1-y}\right) .\end{align} ExampleEvaluate the integral:\begin{equation}I = \int_0^\infty \frac{dx}{\sqrt{x^4+1}}.\end{equation}Initially it looks like we have a problem as we can't directly evaluate $y/(1-y)$ when $y\to 1$ numerically. However, defind a new function:\begin{align}g(y) &= \frac{1}{(1-y)^2} f\left(\frac{y}{1-y}\right) \newline &= \frac{1}{\sqrt{y^4 + (1-y)^4}}\end{align}which has no numerical singularities.
###Code
def f(x):
return 1.0/np.sqrt(x**4 + 1)
def g(y):
return 1.0/np.sqrt(y**4 + (1-y)**4)
y = np.linspace(0,1,100000)
print(simpsons_rule(g,y))
###Output
1.85408467737
###Markdown
Scipy.IntegrateMuch of what we have learned has already been coded up for you in [`scipy.integrate`](https://docs.scipy.org/doc/scipy-0.18.1/reference/integrate.html) and you should learn how to use these routines. The workhorse is the [`quad`](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.integrate.quad.htmlscipy.integrate.quad) method which is fast, highly accurate and very flexible. It returns a tuple giving the result of the integral and an estimate of the error.
###Code
from scipy import integrate
print(integrate.quad(f, 0, np.inf))
###Output
(1.854074677301372, 2.425014482827383e-10)
|
4-Machine_Learning/1-Supervisado/8-Ensembling/ejercicio ensembles.ipynb | ###Markdown
Ejercicios ensemblingEn este ejercicio vas a realizar prediciones sobre un dataset de ciudadanos indios diabéticos. Se trata de un problema de clasificación en el que intentaremos predecir 1 (diabético) 0 (no diabético). Todas las variables son numércias. 1. Carga las librerias que consideres comunes al notebook
###Code
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# Common imports
import numpy as np
import pandas as pd
import os
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
2. Lee los datos de [esta direccion](https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv)Los nombres de columnas son:```Pythonnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']```
###Code
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv"
df = pd.read_csv(url)
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
df.columns = names
df.head()
df.describe()
values = df.values
X = values[:,0:8]
y = values[:,8]
###Output
_____no_output_____
###Markdown
3. BaggingPara este apartado tendrás que crear un ensemble utilizando la técnica de bagging ([BaggingClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html)), mediante la cual combinarás 100 [DecisionTreeClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html). Recuerda utilizar también [cross validation](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html) con 10 kfolds.**Para este apartado y siguientes, no hace falta que dividas en train/test**, por hacerlo más sencillo. Simplemente divide tus datos en features y target.Establece una semilla
###Code
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
model = BaggingClassifier(
DecisionTreeClassifier(random_state=42), n_estimators= 100,
max_samples = 100, bootstrap = True, random_state=42)
# evaluate the model
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=42)
n_scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
# report performance
print('Accuracy: %.3f (%.3f)' % (np.mean(n_scores), np.std(n_scores)))
###Output
Accuracy: 0.763 (0.046)
###Markdown
4. Random ForestEn este caso entrena un [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) con 100 árboles y un `max_features` de 3. También con validación cruzada
###Code
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier(n_estimators = 100, max_features = 3, random_state = 42)
rnd_clf.fit(X, y)
# evaluate the model
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=42)
n_scores = cross_val_score(rnd_clf, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
# report performance
print('Accuracy: %.3f (%.3f)' % (np.mean(n_scores), np.std(n_scores)))
###Output
Accuracy: 0.768 (0.051)
###Markdown
5. AdaBoostImplementa un [AdaBoostClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html) con 30 árboles.
###Code
from sklearn.ensemble import AdaBoostClassifier
ada_clf = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1), n_estimators = 30,
algorithm = "SAMME.R", learning_rate=0.5, random_state=42
)
ada_clf.fit(X, y)
###Output
_____no_output_____ |
notebooks/Lab_14_Similitud_Twitter.ipynb | ###Markdown
Análisis de Similitud en Twitter Cargar el archivoSe extrajeron tweets del año 2016 en los que se menciona al BBVA de la solución GNIPInstalamos la librería necesaria para leer archivos de Excel
###Code
#pip install openpyxl
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms import community
import time
df = pd.read_excel('../data/Tweets_BBVA.xlsx', index_col="id")
df.head()
###Output
_____no_output_____
###Markdown
Extraemos el campo "body" que contiene el tweet
###Code
tweet = df["body"][1:300]
tweet.head()
###Output
_____no_output_____
###Markdown
Creamos la Matriz Término-Documento (TDM) con CountVectorizer
###Code
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer()
X = vec.fit_transform(tweet)
tdm = pd.DataFrame(X.toarray().transpose(), index=vec.get_feature_names())
tdm.columns = tweet.index
tdm.head()
###Output
_____no_output_____
###Markdown
Creamos la Matriz Término-Documento (TDM) con TfidfVectorizer Es otra opción, considerando la Frecuencia Inversa de los Términos
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
doc_vec = vectorizer.fit_transform(tweet)
tdm2 = pd.DataFrame(doc_vec.toarray().transpose(),
index=vectorizer.get_feature_names())
tdm2.columns = tweet.index
tdm2.head()
###Output
_____no_output_____
###Markdown
Calculamos la matriz de correlaciones
###Code
matcor = tdm.corr()
matcor.head()
###Output
_____no_output_____
###Markdown
Transformamos la matriz en un DataFrame de input para el Grafo
###Code
cordf = pd.DataFrame()
cordf = pd.DataFrame(columns = ['inicio', 'fin', 'peso'])
for i in matcor.index:
for j in matcor.index:
if i<j:
try:
w=matcor.loc[i,j]
cordf = cordf.append({'inicio' : i, 'fin' : j, 'peso' : w}, ignore_index = True)
except Exception:
pass
###Output
_____no_output_____
###Markdown
Filtramos las correlaciones bajas y monstramos las más altas
###Code
cordf = cordf[cordf['peso']>.4]
cordf.sort_values('peso', ascending=False).head(10)
###Output
_____no_output_____
###Markdown
Los tweets que tienen correlación 1, tienen exactamente el mismo contenido
###Code
tweet[tweet.index==44670]
tweet[tweet.index==44681]
###Output
_____no_output_____
###Markdown
Creamos el Grafo de Relaciones entre Tweets, para identificar visualmente los que sean similares
###Code
G = nx.from_pandas_edgelist(cordf, source = 'inicio', target = 'fin', edge_attr='peso')
print(nx.info(G))
###Output
_____no_output_____
###Markdown
Crear la función top_nodes que mostrará los valores más altos de un diccionario
###Code
def get_top_nodes(cdict, num=5):
top_nodes ={}
for i in range(num):
top_nodes =dict(
sorted(cdict.items(), key=lambda x: x[1], reverse=True)[:num]
)
return top_nodes
###Output
_____no_output_____
###Markdown
Visualización de Similitud Guardar el grado de cada nodo en un diccionario
###Code
gdeg=G.degree()
get_top_nodes(dict(gdeg))
###Output
_____no_output_____
###Markdown
Ahora Visualizamos los Tweets, agrupados por similitud
###Code
plt.figure(figsize=(80,45))
pos=nx.spring_layout(G)
edges = G.edges()
weights = [G[u][v]['peso'] for u,v in edges]
nx.draw_networkx(G, width=weights, pos=pos, node_size=[val*10 for(node,val)in gdeg])
plt.show()
###Output
_____no_output_____ |
pyfund/Cap05/Notebooks/Cap5 Metodos.ipynb | ###Markdown
Métodos
###Code
# Criando uma classe chamada círculo
class Circulo():
# O valor de pi é constante
pi = 3.14
# Quando um objeto desta classe for criado, este método será executado e
# o valor default do raio será 5.
def __init__(self, raio = 5):
self.raio = raio
# Esse método calcula a área. self utiliza os atributos deste mesmo objeto
def area(self):
return (self.raio * self.raio) * Circulo.pi
# Método para gerar um novo raio
def setRaio(self, novo_raio):
self.raio = novo_raio
# Método para obter o raio do círculo
def getRaio(self):
return self.raio
# Criando o objeto circ. Uma instância da classe Círculo()
circ = Circulo()
# executando um método da classe Círculo
circ.getRaio()
# Criando outro objeto chamado cir1. uma instãncia da classe Circulo()
# Agora sobrescrevendo o valor do atributo
circ1 = Circulo(7)
# Executando um método da classe Círculo
circ1.getRaio()
# Imprimindo o raio
print('O raio é: ', circ.getRaio())
# Imprimindo a area
print('A área é igul a: ', circ.area())
# Gerando um novo valor para o raio do círculo
circ.setRaio(3)
print('Área: ', circ.area())
# Imprimindo o novo raio
print ('Novo raio igual a: ', circ.getRaio())
###Output
Novo raio igual a: 3
|
01_TF_basics_and_linear_regression/tensorflow_basic.ipynb | ###Markdown
TensorFlow基础此处使用python2写法,可以用pyenv安装anaconda2-4.4.0亦即python 2.7对应版本运行 SessionSession is a class for running TensorFlow operations. A Session object encapsulates the environment in which Operation objects are executed, and Tensor objects are evaluated. In this tutorial, we will use a session to print out the value of tensor. Session can be used as follows:
###Code
import tensorflow as tf
a = tf.constant(100)
with tf.Session() as sess:
print sess.run(a)
#syntactic sugar
print a.eval()
# or
sess = tf.Session()
print sess.run(a)
# print a.eval() # this will print out an error
###Output
100
100
100
###Markdown
Interactive sessionInteractive session is a TensorFlow session for use in interactive contexts, such as a shell. The only difference with a regular Session is that an Interactive session installs itself as the default session on construction. The methods [Tensor.eval()](https://www.tensorflow.org/versions/r0.11/api_docs/python/framework.htmlTensor) and [Operation.run()](https://www.tensorflow.org/versions/r0.11/api_docs/python/framework.htmlOperation) will use that session to run ops.This is convenient in interactive shells and IPython notebooks, as it avoids having to pass an explicit Session object to run ops.
###Code
sess = tf.InteractiveSession()
print a.eval() # simple usage
###Output
100
###Markdown
ConstantsWe can use the `help` function to get an annotation about any function. Just type `help(tf.consant)` on the below cell and run it.It will print out `constant(value, dtype=None, shape=None, name='Const')` at the top. Value of tensor constant can be scalar, matrix or tensor (more than 2-dimensional matrix). Also, you can get a shape of tensor by running [tensor.get_shape()](https://www.tensorflow.org/versions/r0.11/api_docs/python/framework.htmlTensor)`.as_list()`. * tensor.get_shape()* tensor.get_shape().as_list()
###Code
a = tf.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.float32, name='a')
print a.eval()
print "shape: ", a.get_shape(), ",type: ", type(a.get_shape())
print "shape: ", a.get_shape().as_list(), ",type: ", type(a.get_shape().as_list()) # this is more useful
###Output
[[ 1. 2. 3.]
[ 4. 5. 6.]]
shape: (2, 3) ,type: <class 'tensorflow.python.framework.tensor_shape.TensorShape'>
shape: [2, 3] ,type: <type 'list'>
###Markdown
Basic functionsThere are some basic functions we need to know. Those functions will be used in next tutorial **3. feed_forward_neural_network**.* tf.argmax* tf.reduce_sum* tf.equal* tf.random_normal tf.argmax `tf.argmax(input, dimension, name=None)` returns the index with the largest value across dimensions of a tensor.
###Code
a = tf.constant([[1, 6, 5], [2, 3, 4]])
print a.eval()
print "argmax over axis 0"
print tf.argmax(a, 0).eval()
print "argmax over axis 1"
print tf.argmax(a, 1).eval()
###Output
[[1 6 5]
[2 3 4]]
argmax over axis 0
[1 0 0]
argmax over axis 1
[1 2]
###Markdown
tf.reduce_sum`tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None)` computes the sum of elements across dimensions of a tensor. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned
###Code
a = tf.constant([[1, 1, 1], [2, 2, 2]])
print a.eval()
print "reduce_sum over entire matrix"
print tf.reduce_sum(a).eval()
print "reduce_sum over axis 0"
print tf.reduce_sum(a, 0).eval()
print "reduce_sum over axis 0 + keep dimensions"
print tf.reduce_sum(a, 0, keep_dims=True).eval()
print "reduce_sum over axis 1"
print tf.reduce_sum(a, 1).eval()
print "reduce_sum over axis 1 + keep dimensions"
print tf.reduce_sum(a, 1, keep_dims=True).eval()
###Output
[[1 1 1]
[2 2 2]]
reduce_sum over entire matrix
9
reduce_sum over axis 0
[3 3 3]
reduce_sum over axis 0 + keep dimensions
[[3 3 3]]
reduce_sum over axis 1
[3 6]
reduce_sum over axis 1 + keep dimensions
[[3]
[6]]
###Markdown
tf.equal`tf.equal(x, y, name=None)` returns the truth value of `(x == y)` element-wise. Note that `tf.equal` supports broadcasting. For more about broadcasting, please see [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
###Code
a = tf.constant([[1, 0, 0], [0, 1, 1]])
print a.eval()
print "Equal to 1?"
print tf.equal(a, 1).eval()
print "Not equal to 1?"
print tf.not_equal(a, 1).eval()
###Output
[[1 0 0]
[0 1 1]]
Equal to 1?
[[ True False False]
[False True True]]
Not equal to 1?
[[False True True]
[ True False False]]
###Markdown
tf.random_normal`tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)` outputs random values from a normal distribution.
###Code
normal = tf.random_normal([3], stddev=0.1)
print normal.eval()
###Output
[-0.10547373 0.14595924 0.12629835]
###Markdown
VariablesWhen we train a model, we use variables to hold and update parameters. Variables are in-memory buffers containing tensors. They must be explicitly initialized and can be saved to disk during and after training. we can later restore saved values to exercise or analyze the model.* tf.Variable* tf.Tensor.name* tf.all_variables tf.Variable`tf.Variable(initial_value=None, trainable=True, name=None, variable_def=None, dtype=None)` creates a new variable with value `initial_value`.The new variable is added to the graph collections listed in collections, which defaults to `[GraphKeys.VARIABLES]`. If `trainable` is true, the variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
###Code
# variable will be initialized with normal distribution
var = tf.Variable(tf.random_normal([3], stddev=0.1), name='var')
print var.name
tf.initialize_all_variables().run()
print var.eval()
###Output
var:0
[ 0.09533361 -0.01884578 0.02439106]
###Markdown
tf.Tensor.nameWe can call `tf.Variable` and give the same name `my_var` more than once as seen below. Note that `var3.name` prints out `my_var_1:0` instead of `my_var:0`. This is because TensorFlow doesn't allow user to create variables with the same name. In this case, TensorFlow adds `'_1'` to the original name instead of printing out an error message. Note that you should be careful not to call `tf.Variable` giving same name more than once, because it will cause a fatal problem when you save and restore the variables.
###Code
var2 = tf.Variable(tf.random_normal([2, 3], stddev=0.1), name='my_var')
var3 = tf.Variable(tf.random_normal([2, 3], stddev=0.1), name='my_var')
print var2.name
print var3.name
###Output
my_var:0
my_var_1:0
###Markdown
tf.all_variablesUsing `tf.all_variables()`, we can get the names of all existing variables as follows:
###Code
for var in tf.all_variables():
print var.name
###Output
var:0
my_var:0
my_var_1:0
###Markdown
Sharing variablesTensorFlow provides several classes and operations that you can use to create variables contingent on certain conditions.* tf.get_variable* tf.variable_scope* reuse_variables tf.get_variable`tf.get_variable(name, shape=None, dtype=None, initializer=None, trainable=True)` is used to get or create a variable instead of a direct call to `tf.Variable`. It uses an initializer instead of passing the value directly, as in `tf.Variable`. An initializer is a function that takes the shape and provides a tensor with that shape. Here are some initializers available in TensorFlow:* `tf.constant_initializer(value)` initializes everything to the provided value,* `tf.random_uniform_initializer(a, b)` initializes uniformly from [a, b],* `tf.random_normal_initializer(mean, stddev)` initializes from the normal distribution with the given mean and standard deviation.
###Code
my_initializer = tf.random_normal_initializer(mean=0, stddev=0.1)
v = tf.get_variable('v', shape=[2, 3], initializer=my_initializer)
tf.initialize_all_variables().run()
print v.eval()
###Output
[[-0.07411054 -0.1204523 0.12766932]
[-0.0053311 0.12680909 -0.10410611]]
###Markdown
tf.variable_scope`tf.variable_scope(scope_name)` manages namespaces for names passed to `tf.get_variable`.
###Code
with tf.variable_scope('layer1'):
w = tf.get_variable('v', shape=[2, 3], initializer=my_initializer)
print w.name
with tf.variable_scope('layer2'):
w = tf.get_variable('v', shape=[2, 3], initializer=my_initializer)
print w.name
###Output
layer1/v:0
layer2/v:0
###Markdown
reuse_variablesNote that you should run the cell above only once. If you run the code above more than once, an error message will be printed out: `"ValueError: Variable layer1/v already exists, disallowed."`. This is because we used `tf.get_variable` above, and this function doesn't allow creating variables with the existing names. We can solve this problem by using `scope.reuse_variables()` to get preivously created variables instead of creating new ones.
###Code
with tf.variable_scope('layer1', reuse=True):
w = tf.get_variable('v') # Unlike above, we don't need to specify shape and initializer
print w.name
# or
with tf.variable_scope('layer1') as scope:
scope.reuse_variables()
w = tf.get_variable('v')
print w.name
###Output
layer1/v:0
layer1/v:0
###Markdown
Place holderTensorFlow provides a placeholder operation that must be fed with data on execution. If you want to get more details about placeholder, please see [here](https://www.tensorflow.org/versions/r0.11/api_docs/python/io_ops.htmlplaceholder).
###Code
x = tf.placeholder(tf.int16)
y = tf.placeholder(tf.int16)
add = tf.add(x, y)
mul = tf.mul(x, y)
# Launch default graph.
print "2 + 3 = %d" % sess.run(add, feed_dict={x: 2, y: 3})
print "3 x 4 = %d" % sess.run(mul, feed_dict={x: 3, y: 4})
###Output
2 + 3 = 5
3 x 4 = 12
|
Notebooks/Model2.ipynb | ###Markdown
Subsampling data __Hom__
###Code
original_dataset_dir = 'C:/Users/yeage/Desktop/UMBC/DATA 602/Final/Maps'
import random
from sklearn.model_selection import train_test_split
train, test = train_test_split(random.sample(os.listdir(original_dataset_dir+'/Hom'), 4800), test_size = 1/8)
train, val = train_test_split(train, test_size = 0.1)
for fname in train:
src = os.path.join(original_dataset_dir, 'Hom', fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
for fname in val:
src = os.path.join(original_dataset_dir, 'Hom', fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
for fname in test:
src = os.path.join(original_dataset_dir, 'Hom', fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
###Output
_____no_output_____
###Markdown
__No Hom__
###Code
train, test = train_test_split(random.sample(os.listdir(original_dataset_dir+'/None'), 4800), test_size = 1/8)
train, val = train_test_split(train, test_size = .1)
original_dataset_dir = original_dataset_dir+'/None'
# Copy first 1000 dog images to train_dogs_dir
for fname in train:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to validation_dogs_dir
for fname in val:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to test_dogs_dir
for fname in test:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
print('total training Hom images:', len(os.listdir(train_cats_dir)))
print('total training No Hom images:', len(os.listdir(train_dogs_dir)))
print('total validation Hom images:', len(os.listdir(validation_cats_dir)))
print('total validation No Hom images:', len(os.listdir(validation_dogs_dir)))
print('total test Hom images:', len(os.listdir(test_cats_dir)))
print('total test No Hom images:', len(os.listdir(test_dogs_dir)))
###Output
total test No Hom images: 600
###Markdown
CNN with Keras
###Code
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard
import time
NAME = "MapHomClass-Augment-{}".format(int(time.time()))
tensorboard = TensorBoard(log_dir=".\logs{}".format(NAME))
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=1e-4),
metrics=['acc'])
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
import tensorflow as tf
tf.test.is_gpu_available()
tf.test.is_built_with_gpu_support()
tf.config.list_physical_devices('GPU')
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
#from tensorflow.compat.v1 import ConfigProto
#from tensorflow.compat.v1 import InteractiveSession
#config = ConfigProto()
#config.gpu_options.allow_growth = True
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
history = model.fit(
train_generator,
epochs=10,
validation_data=validation_generator,
validation_steps=20,callbacks=[tensorboard])
###Output
WARNING:tensorflow:sample_weight modes were coerced from
...
to
['...']
WARNING:tensorflow:sample_weight modes were coerced from
...
to
['...']
Train for 378 steps, validate for 20 steps
Epoch 1/10
378/378 [==============================] - 36s 94ms/step - loss: 0.6897 - acc: 0.5380 - val_loss: 0.6694 - val_acc: 0.5975
Epoch 2/10
378/378 [==============================] - 9s 23ms/step - loss: 0.6625 - acc: 0.6049 - val_loss: 0.6550 - val_acc: 0.5950
Epoch 3/10
378/378 [==============================] - 8s 22ms/step - loss: 0.6477 - acc: 0.6163 - val_loss: 0.6412 - val_acc: 0.6300
Epoch 4/10
378/378 [==============================] - 8s 22ms/step - loss: 0.6385 - acc: 0.6325 - val_loss: 0.6635 - val_acc: 0.6500
Epoch 5/10
378/378 [==============================] - 8s 22ms/step - loss: 0.6312 - acc: 0.6421 - val_loss: 0.6333 - val_acc: 0.6450
Epoch 6/10
378/378 [==============================] - 8s 22ms/step - loss: 0.6209 - acc: 0.6578 - val_loss: 0.6503 - val_acc: 0.6450
Epoch 7/10
378/378 [==============================] - 9s 22ms/step - loss: 0.6083 - acc: 0.6685 - val_loss: 0.6373 - val_acc: 0.6450
Epoch 8/10
378/378 [==============================] - 8s 22ms/step - loss: 0.5987 - acc: 0.6722 - val_loss: 0.6385 - val_acc: 0.6225
Epoch 9/10
378/378 [==============================] - 9s 23ms/step - loss: 0.5844 - acc: 0.6942 - val_loss: 0.6238 - val_acc: 0.6750
Epoch 10/10
378/378 [==============================] - 8s 22ms/step - loss: 0.5698 - acc: 0.7025 - val_loss: 0.6180 - val_acc: 0.6500
###Markdown
Saving and Loading Models
###Code
from tensorflow import keras
#model.save('IdentifyingHomShootTimespans2HR.h5')
#trained_model = keras.models.load_model('IdentifyingHomShootTimespans.h5')
trained_model = keras.models.load_model('IdentifyingHomShootTimespans2HR.h5')
#! ls
#trained_model.get_layer('dense_2').weights[1]
###Output
_____no_output_____
###Markdown
Data Augmentation
###Code
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
os.path#.join(CrimeMaps_small)
original_dataset_dir+'/CrimeMaps_small'
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing import image
fnames = [os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)]
plt.figure(figsize=(10, 10))
image_list = []
## creating the image list as arrays
for img in [image.load_img(img_path, target_size=(150, 150)) for img_path in random.sample(fnames, 3)]:
x = image.img_to_array(img)
x = x.reshape((1,) + x.shape)
## Data Augmentation
j = 0
for batch in datagen.flow(x, batch_size=1):
image_list.append(image.array_to_img(batch[0]))
j+=1
if j % 4==0:
break
## plotting
for i, img in enumerate(image_list):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(img)
plt.axis("off")
###Output
_____no_output_____
###Markdown
__Continue Training with Augmented Data__
###Code
NAME = "MapHomClass-Augment-{}".format(int(time.time()))
tensorboard = TensorBoard(log_dir=".\logs\{}".format(NAME))
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
history = trained_model.fit_generator(
train_generator,
epochs=50,
validation_data=validation_generator,callbacks=[tensorboard])
#trained_model.save('IdentifyingHomShootTimespans_augmented_model.h5')
trained_model.save('IdentifyingHomShootTimespans_augmented_model2HR_Best.h5')
import tensorflow as tf
my_callbacks = [
tf.keras.callbacks.EarlyStopping(min_delta= 0.01, patience=1),
tf.keras.callbacks.ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.h5'),
]
history = trained_model.fit(
train_generator,
epochs=5,
validation_data=validation_generator,
callbacks=my_callbacks)
trained_model = tf.keras.models.load_model("IdentifyingHomShootTimespans_augmented_model2HR_Best.h5")
import numpy as np
import matplotlib.pyplot as plt
test_datagen = ImageDataGenerator(rescale=1./255)
base_dir = 'CrimeMaps_small'
test_dir = os.path.join(base_dir, 'test')
(a) = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=15,
class_mode='binary')
b = next(a)
b[1].shape
b[0].shape
plt.imshow(np.reshape(b[0][4], (150, 150, 3)))
label2=[]
for i in range(15):
label2.append(b[1][i])
###Output
_____no_output_____
###Markdown
fig = plt.figure(figsize=(20, 20))for i in range(15): ax = plt.subplot(5, 5, i + 1) plt.imshow(b[0][i]) plt.title(str(label2[i])+'/'+str(test_labels[i])) plt.axis("off")
###Code
predictions=trained_model.predict(b)
Lablz=np.round(predictions).tolist()
flat_list = []
for sublist in Lablz:
for item in sublist:
flat_list.append(item)
correct=sum(x == y for x, y in zip(label2,flat_list))
total=len(flat_list)
print("The percentage of accurate matches from the test set is: "+str(round(correct/total*100,2))+"%")
###Output
_____no_output_____
###Markdown
---
###Code
b = next(a)
label2=[]
for i in range(15):
label2.append(b[1][i])
###Output
_____no_output_____
###Markdown
fig = plt.figure(figsize=(20, 20))for i in range(15): ax = plt.subplot(5, 5, i + 1) plt.imshow(b[0][i]) plt.title(str(label2[i])+'/'+str(test_labels[i])) plt.axis("off")
###Code
predictions=trained_model.predict(b)
Lablz=np.round(predictions).tolist()
flat_list = []
for sublist in Lablz:
for item in sublist:
flat_list.append(item)
correct=sum(x == y for x, y in zip(label2,flat_list))
total=len(flat_list)
#print("The percentage of accurate matches from the test set is: "+str(round(correct/total*100,2))+"%")
aggacc=[]
for i in range(50):
b = next(a)
label2=[]
for i in range(15):
label2.append(b[1][i])
predictions=trained_model.predict(b)
Lablz=np.round(predictions).tolist()
flat_list = []
for sublist in Lablz:
for item in sublist:
flat_list.append(item)
correct=sum(x == y for x, y in zip(label2,flat_list))
total=len(flat_list)
print("The percentage of accurate matches from the test set is: "+str(round(correct/total*100,2))+"%")
aggacc.append(round(correct/total*100,2))
import statistics
statistics.mean(aggacc)
###Output
_____no_output_____ |
February/Week8/52.ipynb | ###Markdown
Largest BST in a Binary Tree[原题](https://mp.weixin.qq.com/s/aZX6PIoVv0gSHXHeIAA9eA) QuestionYou are given the root of a binary tree. Find and return the largest subtree of that tree, which is a valid binary search tree.
###Code
class TreeNode:
''' Class Definition '''
def __init__(self, key):
''' The Constructor'''
self.left = None
self.right = None
self.key = key
def __str__(self):
''' Preorder Traversal '''
answer = str(self.key)
if self.left:
answer += str(self.left)
if self.right:
answer += str(self.right)
return answer
def largest_bst_subtree(root: TreeNode) -> dict:
if root is None:
return {
'size': 0,
'root': None,
'min': float('Inf'),
'max': -float('Inf')
}
left_info = largest_bst_subtree(root.left)
right_info = largest_bst_subtree(root.right)
size_include_itself = 0
if (left_info['root'] == root.left and
right_info['root'] == root.right and
root.key > left_info['max'] and
root.key < right_info['min']):
size_include_itself = left_info['size'] + right_info['size'] + 1
max_size = max(left_info['size'], right_info['size'], size_include_itself)
if left_info['size'] > right_info['size']:
max_root = left_info['root']
else:
max_root = right_info['root']
if max_size == size_include_itself:
max_root = root
return {
'size': max_size,
'root': max_root,
'min': min(left_info['min'], right_info['min'], root.key),
'max': max(left_info['max'], right_info['max'], root.key)
}
# 5
# / \
# 6 7
# / / \
# 2 4 9
node = TreeNode(5)
node.left = TreeNode(6)
node.right = TreeNode(7)
node.left.left = TreeNode(2)
node.right.left = TreeNode(4)
node.right.right = TreeNode(9)
print(largest_bst_subtree(node)['root'])
#749
###Output
749
|
03-Loops_Condicionais_Metodos_Funcoes/Notebooks/01-If-Elif-Else.ipynb | ###Markdown
Condicional If
###Code
# Condicional If
if 5 > 2:
print("Python funciona!")
# Statement If...Else
if 5 < 2:
print("Python funciona!")
else:
print("Algo está errado!")
6 > 3
3 > 7
4 < 8
4 >= 4
if 5 == 5:
print("Testando Python!")
if True:
print('Parece que Python funciona!')
# Atenção com a sintaxe
if 4 > 3
print("Tudo funciona!")
# Atenção com a sintaxe
if 4 > 3:
print("Tudo funciona!")
###Output
_____no_output_____
###Markdown
Condicionais Aninhados
###Code
idade = 18
if idade > 17:
print("Você pode dirigir!")
Nome = "Bob"
if idade > 13:
if Nome == "Bob":
print("Ok Bob, você está autorizado a entrar!")
else:
print("Desculpe, mas você não pode entrar!")
idade = 13
Nome = "Bob"
if idade >= 13 and Nome == "Bob":
print("Ok Bob, você está autorizado a entrar!")
idade = 12
Nome = "Bob"
if (idade >= 13) or (Nome == "Bob"):
print("Ok Bob, você está autorizado a entrar!")
###Output
Ok Bob, você está autorizado a entrar!
###Markdown
Elif
###Code
dia = "Terça"
if dia == "Segunda":
print("Hoje fará sol!")
else:
print("Hoje vai chover!")
if dia == "Segunda":
print("Hoje fará sol!")
elif dia == "Terça":
print("Hoje vai chover!")
else:
print("Sem previsão do tempo para o dia selecionado")
###Output
Hoje vai chover!
###Markdown
Operadores Lógicos
###Code
idade = 18
nome = "Bob"
if idade > 17:
print("Você pode dirigir!")
idade = 18
if idade > 17 and nome == "Bob":
print("Autorizado!")
# Usando mais de uma condição na cláusula if
disciplina = input('Digite o nome da disciplina: ')
nota_final = input('Digite a nota final (entre 0 e 100): ')
if disciplina == 'Geografia' and nota_final >= '70':
print('Você foi aprovado!')
else:
print('Lamento, acho que você precisa estudar mais!')
# Usando mais de uma condição na cláusula if e introduzindo Placeholders
disciplina = input('Digite o nome da disciplina: ')
nota_final = input('Digite a nota final (entre 0 e 100): ')
semestre = input('Digite o semestre (1 a 4): ')
if disciplina == 'Geografia' and nota_final >= '50' and int(semestre) != 1:
print('Você foi aprovado em %s com média final %r!' %(disciplina, nota_final))
else:
print('Lamento, acho que você precisa estudar mais!')
###Output
Digite o nome da disciplina: Geografia
Digite a nota final (entre 0 e 100): 40
Digite o semestre (1 a 4): 2
Lamento, acho que você precisa estudar mais!
|
Longitudinal Isolates/(A) Process Longitudinal Samples - FASTQ to VCF.ipynb | ###Markdown
This notebook was made to create (and submit jobs for) JankyPipe, a pipeline that takes fastq files as input of Mycobacterium tuberculosis isolates, aligns the reads to H37Rv and calls variants. The output is a VCF file, a lineage call and a Qualimap report. This notebook also submits a job that runs JankyPipe on all of the *Longitudinal* isolates in our study.
###Code
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
%matplotlib inline
import os
import pandas as pd
import numpy as np
from slurmpy import Slurm
import vcf
import shutil
###Output
_____no_output_____
###Markdown
*Function* to launch JankyPipe as a Job
###Code
def Launch_JankyPipe(fqf1 , fqf2 , tag , output_dir , scratch_dir , O2_SLURM_logs_dir):
'''
This script launches a job to call variants for the input fastq files against H37Rv
using a number of packages. The important output (VCF, lineage info files, quality report)
is stored in the output directory while the intermediary files (SAMs, trimmed fastqs, BAM, etc)
are stored in a scratch directory.
'''
#store all commands in a list
commands_list = []
#change directory to scratch
commands_list.append( 'cd ' + scratch_dir )
###################################
### Load Necessary Modules ########
###################################
#load perl
commands_list.append( 'module load perl/5.24.0' )
#load java
commands_list.append( 'module load java/jdk-1.8u112' )
#load BWA
commands_list.append( 'module load bwa/0.7.15' )
#load Samtools
commands_list.append( 'module load samtools/1.3.1' )
#load BCFtools
commands_list.append( 'module load bcftools/1.3.1' )
#load Picard
commands_list.append( 'module load picard/2.8.0' )
#Create Index files for Reference Genome
commands_list.append( 'mkdir RefGen' )
#copy reference genome over to RefGen folder
commands_list.append( 'cp /home/rv76/Farhat_Lab/Reference_Seqs/H37Rv/h37rv.fasta RefGen/TBRefGen.fasta' )
#change directory to RefGen folder
commands_list.append( 'cd RefGen' )
###################################
### Create Index Files for H37Rv ##
###################################
commands_list.append( 'samtools faidx TBRefGen.fasta' )
commands_list.append( 'bwa index TBRefGen.fasta' )
RefGen = scratch_dir + '/RefGen/TBRefGen.fasta' #H37Rv reference
#go back to parent directory
commands_list.append( 'cd ..' )
###################################
### UnZip FastQ files #############
###################################
fqf1_base_name = fqf1.split('/')[-1][0:-9]
fqf2_base_name = fqf2.split('/')[-1][0:-9]
#work with the unzipped files for the rest of the pipeline (after unzipping them)
fqf1_unzipped = scratch_dir + '/{}'.format(fqf1_base_name) + '.fastq'
fqf2_unzipped = scratch_dir + '/{}'.format(fqf2_base_name) + '.fastq'
commands_list.append( 'zcat {0} > {1}'.format(fqf1, fqf1_unzipped) )
commands_list.append( 'zcat {0} > {1}'.format(fqf2, fqf2_unzipped) )
#use the unzipped fastq files now
fqf1 = fqf1_unzipped
fqf2 = fqf2_unzipped
####################################
### PRINSEQ (trim reads) ##########
###################################
#create directory for prinseq in output directory
commands_list.append( 'mkdir ' + output_dir + '/prinseq' )
commands_list.append( 'perl /n/data1/hms/dbmi/farhat/bin/prinseq-lite-0.20.4/prinseq-lite.pl -fastq {0} -fastq2 {1} -out_format 3 -out_good {2}/{3}-trimmed -out_bad null -log {4}/{3}-trimmed.log -min_qual_mean 20 -verbose'.format(fqf1, fqf2, scratch_dir, tag , output_dir+'/prinseq') )
#use newly trimmed fastq files now
fqf1 = scratch_dir + '/{}'.format(tag) + '-trimmed_1.fastq'
fqf2 = scratch_dir + '/{}'.format(tag) + '-trimmed_2.fastq'
######################################
### BWA (align reads to reference) ###
######################################
#create SAM file
samfile = scratch_dir + '/{}.sam'.format(tag)
#run BWA
commands_list.append( 'bwa mem -M {3} {0} {1} > {2}'.format(fqf1 , fqf2 , samfile , RefGen) )
#####################################
### PICARD (sort & convert to BAM) ##
#####################################
#create BAM file
bamfile = scratch_dir + '/{0}.sorted.bam'.format(tag)
commands_list.append( 'java -Xmx16G -jar /n/data1/hms/dbmi/farhat/bin/picard/picard/build/libs/picard.jar SortSam INPUT={0} OUTPUT={1} SORT_ORDER=coordinate'.format(samfile, bamfile) )
####################################
### PICARD (remove duplicates) ####
###################################
#create BAM file with removed duplicates
drbamfile = bamfile.replace(".bam", ".duprem.bam")
#remove duplicates from BAM file
commands_list.append( "java -Xmx32G -jar /n/data1/hms/dbmi/farhat/bin/picard/picard/build/libs/picard.jar MarkDuplicates I={0} O={1} REMOVE_DUPLICATES=true M={2} ASSUME_SORT_ORDER=coordinate".format(bamfile, drbamfile, drbamfile[:-4]+'.metrics') )
####################################
### SAMTOOLS (to index BAM file) ###
####################################
commands_list.append( "samtools index {0}".format(drbamfile) )
######################################
### QUALIMAP (quality of BAM file) ###
######################################
#store quality report, pilon VCF & lineage call information all in Output directory
commands_list.append( 'cd ' + output_dir )
commands_list.append( 'mkdir QualiMap' ) #make a folder for pilon output in output directory
commands_list.append( 'unset DISPLAY' ) #unset JAVA virtual machine variable [http://qualimap.bioinfo.cipf.es/doc_html/faq.html]
commands_list.append( "/n/data1/hms/dbmi/farhat/bin/qualimap_v2.2.1/qualimap bamqc -bam {0} --outdir {1} --outfile {2}.pdf --outformat PDF".format(drbamfile, output_dir+'/QualiMap', tag+'_stats') )
###################################
### PILON (call variants) #########
###################################
#store quality report, pilon VCF & lineage call information all in Output directory
commands_list.append( 'mkdir pilon' ) #make a folder for pilon output in output directory
out_pilon_dir = output_dir + '/pilon/' #variable for pilon output path
commands_list.append( 'java -Xmx32G -jar /n/data1/hms/dbmi/farhat/bin/pilon/pilon-1.22.jar --genome {0} --bam {1} --output {2} --outdir {3} --variant'.format(RefGen, drbamfile, tag, out_pilon_dir) )
#####################################
### Luca's LINEAGE CALLING script ###
#####################################
#create directory
commands_list.append( 'mkdir ' + scratch_dir + '/fast-lineage-caller/' )#make a folder for lineage call in output directory
commands_list.append( 'mkdir ' + output_dir + '/fast-lineage-caller/' )#make a folder for lineage call in scratch directory
#create VRT file
vrtfile = scratch_dir + '/fast-lineage-caller/{}.vrt'.format(tag)
commands_list.append( 'cd ' + scratch_dir + '/fast-lineage-caller' )#change directory to store output in scratch
#convert VCF to VRT
commands_list.append( 'vrtTools-vcf2vrt.py {0} {1} 1'.format(out_pilon_dir+tag+'.vcf', vrtfile) )
#call lineage with SNP database an VRT file
commands_list.append( 'cd ' + output_dir + '/fast-lineage-caller' )#change directory to store output in VCF output
commands_list.append( 'FastLineageCaller-assign2lineage.py /home/rv76/Bio_Pipelines/fast-lineage-caller-master/example/db_snps.tsv ' + vrtfile + ' &> ' + 'lineage_call.txt' )
###############################################################################################################
######################################## SUBMIT as a job to O2 ################################################
###############################################################################################################
#append all commands in a single string to be submitted as a job
JankyPipe_job = ''
for command_i in commands_list:
JankyPipe_job = JankyPipe_job + '\n' + command_i
#directory where you want output + error files
os.chdir(O2_SLURM_logs_dir)
job_name = tag
s = Slurm(job_name , {'partition':'short' , 'n':'1' , 't':'0-6:00:00' , 'mem-per-cpu':'36G' , 'mail-type':'FAIL' , 'mail-user':'[email protected]'})
#submits the job
job_id = s.run(JankyPipe_job)
print job_name + ' : ' + str(job_id)
###Output
_____no_output_____
###Markdown
Longitudinal Samples Pull all relevant sequenced isolate and corresponding FastQ file paths
###Code
sample_annotation = pd.read_csv('/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/CSV_files/sample_annotation_files/cetr_casali_walker_trauner_witney_xu_guerra_bryant_fastq_path_names.csv' , sep = ',').set_index('patient_id')
sample_annotation.head(n=2)
np.shape(sample_annotation)
###Output
_____no_output_____
###Markdown
Create directories for each isolate and launch JankyPipe IMPORTANT PARENT DIRECTORIES - /n/scratch2/rv76/inhost_TB_dynamics_project/JankyPipe/intermediary_files/ [to store intermediate files (unzipped fastq, trimmed fastq, SAM, sorted BAM, etc)]- /n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/JankyPipe/output/ [to store final files (pilon VCF, lineage, QualiMap, trim logs)]- /n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/JankyPipe/O2_SLURM_logs/ [to store submitted SLURM script, and SLURM error & verbose logs]
###Code
for isolate_i in range(0 , np.shape(sample_annotation)[0]):
isolate_fastq_paths = sample_annotation.iloc[isolate_i , 0]
#paths & names for fastq files
fqf1 = isolate_fastq_paths.split(';')[0]
fqf2 = isolate_fastq_paths.split(';')[1]
#get the tag ID for the fastq files (same as ID for fastq files)
tag = fqf1.split('/')[-1].split('_')[0]
#where pilon VCF and lineage information will be stored [LAB FOLDER]
output_dir = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/JankyPipe/output/' + tag
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
elif not os.path.exists(output_dir):
os.makedirs(output_dir)
#where everything else happens (trimming, aligning, etc.) [SCRATCH FOLDER]
scratch_dir = '/n/scratch2/rv76/inhost_TB_dynamics_project/JankyPipe/intermediary_files/' + tag
if os.path.exists(scratch_dir):
shutil.rmtree(scratch_dir)
os.makedirs(scratch_dir)
elif not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
#store O2 job log files [LAB FOLDER]
O2_SLURM_logs_dir = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/JankyPipe/O2_SLURM_logs/' + tag
if os.path.exists(O2_SLURM_logs_dir):
shutil.rmtree(O2_SLURM_logs_dir)
os.makedirs(O2_SLURM_logs_dir)
elif not os.path.exists(O2_SLURM_logs_dir):
os.makedirs(O2_SLURM_logs_dir)
#Launch JankyPipe after making necessary directories!!!
Launch_JankyPipe(fqf1 , fqf2 , tag , output_dir , scratch_dir , O2_SLURM_logs_dir)
###Output
_____no_output_____
###Markdown
save tags (corresponds to folder names)
###Code
#store tags to each sample
tag_list = []
for isolate_i in range(0 , np.shape(sample_annotation)[0]):
isolate_fastq_paths = sample_annotation.iloc[isolate_i , 0]
#paths & names for fastq files
fqf1 = isolate_fastq_paths.split(';')[0]
#get the tag ID for the fastq files (same as ID for fastq files)
tag = fqf1.split('/')[-1].split('_')[0]
tag_list.append(tag)
sample_annotation['tag'] = tag_list
#store as CSV
sample_annotation.to_csv('/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/CSV_files/sample_annotation_files/cetr_casali_walker_trauner_witney_xu_guerra_bryant_fastq_path_names_and_JankyPipe_tags.csv' , sep = ',')
sample_annotation.head(n=2)
###Output
_____no_output_____
###Markdown
Determine if jobs ran successfully or not
###Code
successful_run = []
for isolate_i in range(0 , np.shape(sample_annotation)[0]):
#get the tag ID for the fastq files (same as ID for fastq files)
tag = sample_annotation.tag[isolate_i]
#where pilon VCF and lineage information will be stored [LAB FOLDER]
output_dir = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/JankyPipe/output/' + tag
#check to see 'Lineage Call' folder exists in the output directory (last thing that is run in JankyPipe)
if os.path.exists(output_dir + '/fast-lineage-caller/'):
successful_run.append('yes')
else:
successful_run.append('no')
sample_annotation['successful_run'] = successful_run
sample_annotation[sample_annotation.successful_run == 'no']
###Output
_____no_output_____
###Markdown
Re-Run isolates through pipeline that hit a run-timelimit
###Code
#isolates that don't have a lineage-call directory didn't finish running through pipeline
sample_annotation_ReRun = sample_annotation[sample_annotation.successful_run == 'no']
#if path already exists, remove current contents, then recreate empty directory
#if path doesn't exist, create new directory
for isolate_i in range(0 , np.shape(sample_annotation_ReRun)[0]):
isolate_fastq_paths = sample_annotation_ReRun.iloc[isolate_i , 0]
#paths & names for fastq files
fqf1 = isolate_fastq_paths.split(';')[0]
fqf2 = isolate_fastq_paths.split(';')[1]
#get the tag ID for the fastq files (same as ID for fastq files)
tag = fqf1.split('/')[-1].split('_')[0]
#where pilon VCF and lineage information will be stored [LAB FOLDER]
output_dir = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/JankyPipe/output/' + tag
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
elif not os.path.exists(output_dir):
os.makedirs(output_dir)
#where everything else happens (trimming, aligning, etc.) [SCRATCH FOLDER]
scratch_dir = '/n/scratch2/rv76/inhost_TB_dynamics_project/JankyPipe/intermediary_files/' + tag
if os.path.exists(scratch_dir):
shutil.rmtree(scratch_dir)
os.makedirs(scratch_dir)
elif not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
#store O2 job log files [LAB FOLDER]
O2_SLURM_logs_dir = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/JankyPipe/O2_SLURM_logs/' + tag
if os.path.exists(O2_SLURM_logs_dir):
shutil.rmtree(O2_SLURM_logs_dir)
os.makedirs(O2_SLURM_logs_dir)
elif not os.path.exists(O2_SLURM_logs_dir):
os.makedirs(O2_SLURM_logs_dir)
#Launch JankyPipe after making necessary directories!!!
Launch_JankyPipe(fqf1 , fqf2 , tag , output_dir , scratch_dir , O2_SLURM_logs_dir)
###Output
submitted: Submitted batch job 31784984
submitted: Submitted batch job 31784986
submitted: Submitted batch job 31784988
submitted: Submitted batch job 31784990
###Markdown
Scrape and Analyze Mean Coverage Import Sample Annotation file for filtered *longitudinal* isolates pairs
###Code
sample_annotation = pd.read_csv('/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/CSV_files/sample_annotation_files/Longitudinal_fastq_path_names_and_JankyPipe_tags_filtered_final.csv' , sep = ',').set_index('patient_id')
sample_annotation.head()
np.shape(sample_annotation)
from itertools import compress
import time
import sys
import pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker as ticker
from pylab import plot, show, savefig, xlim, figure, hold, ylim, legend, boxplot, setp, axes
from itertools import compress
from pylab import MaxNLocator
import seaborn as sns; sns.set()
from matplotlib.colors import LogNorm
from matplotlib import gridspec
import ast
import itertools
import seaborn as sns
from sklearn.preprocessing import StandardScaler
#genomic data directory
rolling_DB_dir = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/JankyPipe/output/'
#get all folders (each folder corresponds to a different sequenced isolate)
isolate_directories = list(sample_annotation.tag)
#dictionary that stores the mean coverage for each isolate (that successfully ran through megapipe2.0) from QUALIMAP
mean_coverage_dict = {} #key: isolate_ID , value: mean coverage
#iterate through each sequenced isolate
isolate_i = 0
for isolate_ID in isolate_directories:
#directory that stores files for each sequenced isolate
directory_for_sequenced_isolate = rolling_DB_dir + isolate_ID
#check to see if megapipe successfully ran on sequenced isolate
try:
#existence of a PILON and QUALIMAP directories and corresponding VCF file [there's also an option for FAST-LINEAGE-CALLER]
if ( 'pilon' in os.listdir(directory_for_sequenced_isolate) ) and ( 'QualiMap' in os.listdir(directory_for_sequenced_isolate) ):
#existence of a VCF and GENOME-QUALITY files in relevent directories [there's also an option for LINEAGE]
if ( 'vcf' in list( itertools.chain.from_iterable( [filename.split('.') for filename in os.listdir(directory_for_sequenced_isolate + '/pilon/')] ) ) ) and ( 'genome_results.txt' in os.listdir(directory_for_sequenced_isolate + '/QualiMap/') ):
#we have a valid VCF and Quality-Map (and Lineage file?) file so megapipe ran successfully, let's keep the variant call information for this sequenced isolate and look for qualimap, lineage call data as well
#QUALIMAP DATA
########################################################################################################################
#look for qualimap output txt file that has mean coverage & mean read length
qualimap_BAM_file_stats = directory_for_sequenced_isolate + '/QualiMap/' + 'genome_results.txt'
#parse qualimap txt file and store the mean coverage for the BWA mapping (BAM file) & mean read length
with open(qualimap_BAM_file_stats ,'r') as f:
#iterate through lines in text file
for stat_per_line in f:
#find the mean coverage for mapping
if 'mean coverageData' in stat_per_line:
mean_coverage = float( stat_per_line.split('=')[-1][:-2].replace(',' , '') )
mean_coverage_dict[isolate_ID] = mean_coverage
break #once we have mean coverage
########################################################################################################################
#keep track of progress
isolate_i += 1
if isolate_i % np.ceil(0.05*len(isolate_directories)) == 0:
print float(isolate_i) / float(len(isolate_directories))
except OSError: #hit some file that is not another directory with genomic data
continue
mean_coverage_DF = pd.DataFrame()
mean_coverage_series = pd.Series(mean_coverage_dict)
mean_coverage_DF['mean_coverage'] = mean_coverage_series
mean_coverage_DF['isolate_ID'] = mean_coverage_DF.index
mean_coverage_DF.head()
np.shape(mean_coverage_DF)
###Output
_____no_output_____
###Markdown
Average Coverage across isolates
###Code
mean_coverage_DF.mean_coverage.mean()
###Output
_____no_output_____ |
api_pagarme_python.ipynb | ###Markdown
Classes e Funções
###Code
from datetime import datetime
import sys
#import time
data_atual = horario.data_atual
ano_atual = horario.ano_atual
class Transacao:
def __init__(self,transacao):
self.objeto = transacao['object'] # string
self.id = transacao['id'] # Integer
self.id_assinatura = transacao['subscription_id'] # integer
self.tid = transacao['tid'] # Integer
self.status = transacao['status'] # string
self.usuario = transacao['customer'] # dict | objeto usuario
self.email = self.usuario['email'] # string
self.data_criacao = transacao['date_created'] # str
self.data_update = transacao['date_updated'] # str
self.valor = transacao['amount'] / 100 # Integer, vira float
self.valor_captado = transacao['paid_amount'] / 100 # Integer, vira float
self.valor_estornado = transacao['refunded_amount'] / 100 # Integer, vira float
self.parcelas = transacao['installments'] # Integer
self.cobranca = transacao['billing'] # dict | objeto cobranca
self.endereco = transacao['address'] # dict | objeto endereço
self.completo = transacao # dict | objeto transacao
def get_objeto(self):
"""
Checa se é uma transacao mesmo, ele devolve o 'object' dela como transaction.
"""
if self.objeto.lower() != "transaction":
print(f"{self.objeto}, não é uma transacao, cuidado em tio, f")
return False
else:
print("Chamou o var e confirmou, segue o jogo, é transação!")
return True
# FUNCOES DE INFORMAÇÕES BASICAS
def get_info_basica(self):
"""
Retorna uma tupla, com as informações basicas da transacao, como a data(updated), valor e status.
"""
# para devolver o valor em string normal no formato yyyy-mm-dd
data_simples = datetime.strptime(self.data_update,"%Y-%m-%dT%H:%M:%S.%fZ")
data_simples = f"{data_simples.year}-{data_simples.month}-{data_simples.day}"
return self.data_update, self.valor, self.status, self.id, self.email
def get_valores(self):
"""
Retorna um dicionario com os valores. Valor Pago, Valor Capturado e Valor Estornado.
"""
_dicionario_valores = {"valor_brl" : self.valor,
"valor_captado" : self.valor_captado,
"valor_estornado" : self.valor_estornado}
return _dicionario_valores
def get_data_update(self):
"""
Passamos a data que está em formato de string e datetime, para unix timestamp.
"""
_data = self.data_update
_data = datetime.strptime(_data,"%Y-%m-%dT%H:%M:%S.%fZ")
# tiramos 10800 pois o pagar.me devolve na hora UTC 0
# (timestamp(_data) - 10800)
_data = (datetime.timestamp(_data) - 10800) * 1000
return _data
def get_data_created(self):
"""
Passamos a data que está em formato de string e datetime, para unix timestamp.
"""
_data = self.data_criacao
_data = datetime.strptime(_data,"%Y-%m-%dT%H:%M:%S.%fZ")
# tiramos 10800 pois o pagar.me devolve na hora UTC 0
# (timestamp(_data) - 10800)
_data = (datetime.timestamp(_data) - 10800) * 1000
return _data
def get_endereco(self):
"""
Retorna uma tupla com (cidade, estado, pais e CEP).
"""
if self.endereco == None: # se n tiver nada no endereco, busca em cobrancas
if self.cobranca != None: # se até o objeto de cobrança tiver limpo, passa vazio
# nao vamos utilizar o endereço de entrega como parametro para endereço
_cidade = self.cobranca['address']['city']
_estado = self.cobranca['address']['state']
_pais = self.cobranca['address']['country']
_cep = self.cobranca['address']['zipcode']
else:
_cidade = " "
_estado = " "
_pais = " "
_cep = " "
else:
_cidade = self.endereco['city']
_estado = self.endereco['state']
_pais = self.endereco['country']
_cep = self.endereco['zipcode']
return _cidade,_estado,_pais,_cep
# funcao para criar um dicionario e utilizar o pandas para analisar
def get_dicionario_transacao(self):
_cidade, _estado, *resto = self.get_endereco()
_dicionario_transacao = {
"status": self.status, # Status
"id": str(self.id), # ID
"subscription_id": str(self.id_assinatura), # ID da Assinatura
"nome": self.usuario['name'], # Nome
"email": self.email, # Email
"valor_brl": self.valor, # Valor (R$)
"valor_captura_brl": self.valor_captado, # Valor Capturado (R$
"valor_estornado_brl": self.valor_estornado, # Valor Estornado (R$)
"num_parcelas": self.parcelas, # Número de Parcelas
"TID" : str(self.tid), # TID
"cidade": _cidade, # Cidade
"estado": _estado, # Estado
"data_criacao": self.data_criacao, # Data
"data_atualizacao": self.data_update, # Última Atualização
}
return _dicionario_transacao
# para criar lista de público para o facebook
def get_dados_facebook(self):
email = self.usuario['email']
telefone = self.usuario['phone_numbers']
if type(telefone) == list:
telefone = telefone[0]
nome = self.usuario['name'].split()
n_nome = len(nome)
if n_nome == 1:
fn = nome[0]
ln = " "
elif 0 >= n_nome:
fn = " "
ln = " "
else:
fn = nome[0]
ln = " ".join(nome[1:]) #[_ for _ in nome[1:]]
# dados do endereço
# chamando o método de endereço e passando em cada var referente
cidade, estado, pais, cep = self.get_endereco()
# yyyy-mm-dd
data_nasci = self.usuario['birthday']
if data_nasci == None:
ano_nasci = " "
idade = " "
data_nasci = " "
else:
data_nasci_datetime = datetime.strptime(data_nasci,"%Y-%m-%d")
ano_nasci = data_nasci_datetime.year
if ano_nasci > (ano_atual - 5):
ano_nasci = " "
idade = " "
data_nasci = " "
else:
idade = ano_atual - ano_nasci
return fn,ln,email,telefone,cidade,estado,pais,data_nasci,ano_nasci,idade
# FUNCOES DE FORA DA CLASSE
def ultima_transacao(transacoes):
"""Retorna a ultima transação das transações."""
return transacoes[-1]
# Para instanciar a classe, basta chama-la armazenando em uma variavel,
test_classe = Transacao(tran_teste)
# nesse caso estamos deixando a transacao dentro do objeto com nome 'test_classe'
# podemos chamar o atributo .objeto para checar qual é
test_classe.objeto # -> 'transaction'
print(test_classe.get_dados_facebook())
t_teste = test_classe.get_dicionario_transacao()
###Output
_____no_output_____
###Markdown
Fazendo a chamada na API do pagar.me, utilizando da classe Transacao para analise das transacões
###Code
# quanto tempo durou a requisicao - começo
tempo_agora = time.time()
# --> variaveis de contagem/soma
soma_valor_brl = 0
soma_valor_capturado = 0
soma_valor_estornado = 0
count_transacoes = 0
count_requisicoes = 0
LISTA_VALORES = list()
LISTA_DATAS = list()
# a hora vem em segundos, precisamos passar em milisegundos
data_parametro = time.time() * 1000
print(" --- DATA PARAMETRO: {}".format(data_parametro))
# declarações para a saida de loading
print("É para puxar até qual data:")
_data = horario.get_string_data() # funcao para puxar um input e inserir as datas
por_inicial = data_parametro # data inicial, tbm a porcentagem inicial
data_final = horario.get_unixtime_data(_data) # data final e tbm a porcentagem final
por_dif = data_final - por_inicial # a diferença entra a data de inicio e fim
# por_atual = (atual - por_inicial) / por_dif
# DECLARAÇÕES IMPORTANTES | CSV
LISTA_FB_CSV = list()
LISTA_DATAFRAME = list()
# declaracao para o while
tem_transacao = True
while tem_transacao:
transacoes = pm.transaction.find_by({"count": "500",
"date_created": f"<{data_parametro}",
"status": "paid"})
# se retornar nenhuma transacao em transacoes, pode parar tbm
count_requisicoes += 1
if len(transacoes) == 0:
tem_transacao = False
break
for ite_transacao in transacoes:
# instanciando a classe e gerando o objeto ref a transação
objeto_transacao = Transacao(ite_transacao)
# declarando a variavel para manter a data de criacao da transacao
data_parametro = objeto_transacao.get_data_created()
if data_parametro <= data_final - 10800: # DATA FINAL
# se a data de criacao for menor que a data que selecionamos, pare o loop
tem_transacao = False
break
# contagem das transacoes
count_transacoes += 1
# declaracoes para variaveis de contagem/soma
_valores_transacao = objeto_transacao.get_valores()
LISTA_VALORES.append(_valores_transacao)
LISTA_DATAS.append(objeto_transacao.data_criacao)
# variaveis para observar valores do periodo selecionado
soma_valor_brl += _valores_transacao['valor_brl']
soma_valor_capturado += _valores_transacao['valor_captado']
soma_valor_estornado += _valores_transacao['valor_estornado']
# passando as transacoes para uma lista
LISTA_FB_CSV.append(list(objeto_transacao.get_dados_facebook()))
LISTA_DATAFRAME.append(objeto_transacao.get_dicionario_transacao())
# passamos novamente a data de criacao para a variavel data parametro
# que vai ser usada na nova requisicao
data_parametro = objeto_transacao.get_data_created()
# saindo para a tela de carregamento
por_atual = (data_parametro - por_inicial) / por_dif
sys.stdout.write("\r" + "CARREGANDO, PERA AI... {:.1f} %".format(por_atual * 100))
sys.stdout.flush()
# limite de 1.000 por minuto, então da um tempo para fazer a proxima
time.sleep(30)
# quanto tempo durou a requisicao
tempo_depois = time.time()
# agora mostra os valores e umas informações sobre as chamadas
print()
print("-------- VALORES --------")
print("|- Valor Total Pago: R$ {:,}".format(soma_valor_brl))
print("|- Valor Total Capturado: R$ {:,}".format(soma_valor_capturado))
print("|- Valor Total Estornado: R$ {:,}".format(soma_valor_estornado))
print("-------------------------------")
print("Soma count transacoes: {}".format(count_transacoes))
print("Soma count requisições: {}".format(count_requisicoes))
print("------------------------")
print("Data da transacao do BREAK: {}".format(objeto_transacao.data_update))
print("Data da ultima transacao: {}".format(LISTA_DATAFRAME[-1]['data_criacao']))
print("------------------------")
print("Requisicao durou: {}".format(horario.get_standard_format(tempo_depois - tempo_agora)))
# 1640995200000 -> timestamp janeiro 01/01/2022 00:00:00 | GTM TIME +0
print("Data das 5 primeiras transações")
for n,i in enumerate(LISTA_DATAFRAME[:5]):
print("{} | {}".format(n,i['data_criacao']))
print("Data das 5 ultimas transações")
for n,i in enumerate(LISTA_DATAFRAME[-5:]):
print("{} | {}".format(n,i['data_criacao']))
###Output
_____no_output_____
###Markdown
Passando para o .csv para pandas | DATAFRAME
###Code
# declaracao para lista do DataFrame é:
LISTA_DATAFRAME
with open("dados/dataset_{}_{:02d}_{:02d}.csv".format(data_atual.year,
data_atual.month,
data_atual.day),
"w", newline = "") as file:
writer = csv.writer(file,delimiter=",")
writer.writerow(LISTA_DATAFRAME[1].keys())
for ite_row in LISTA_DATAFRAME:
writer.writerow(ite_row.values())
###Output
_____no_output_____
###Markdown
Criando .csv da INFO BASICA
###Code
lista_numero = list()
for i in LISTA_FB_CSV:
lista_numero.append(i[3])
lista_numero = list(dict.fromkeys(lista_numero))
with open ("dados/lista_pagar_me_numero.csv", "w", newline="") as csvfile:
writer = csv.writer(csvfile,delimiter=',')
writer.writerow("numero_pagar_me")
for i in lista_numero:
writer.writerow(i)
###Output
_____no_output_____
###Markdown
Montando a lista para o público do facebook direto com a requisição acima
###Code
with open("dados/publicos/transacoes_basica.csv","w",newline="") as csvfile:
writer = csv.writer(csvfile,delimiter=",")
writer.writerow(["1_nome","2_nome","email","numero","city","st","ct","dob","y","age"])
for ite_linha in LISTA_FB_CSV:
writer.writerow(ite_linha)
###Output
_____no_output_____
###Markdown
Criando o arquivo CSV com a lista que criamos 'LISTA_CSV', informações basicas USUARIOS
###Code
with open("dados/transacoes.csv","w",newline="") as csvfile:
writer = csv.writer(csvfile,delimiter=",")
writer.writerow(["data","valor","status","id","email"])
for ite_linha in LISTA_CSV:
writer.writerow(ite_linha)
###Output
_____no_output_____
###Markdown
Criação do .csv dos compradores do pagar.me | lista publico facebook. Fazendo a requisição
###Code
## codigo para criar LISTA PÚBLICO FACEBOOK
# fn,ln,email,telefone,cidade,estado,pais,data_nasci,ano_nasci,idade
with open(f"dados/publicos/lista_publico_{data_atual.month}_{data_atual.day}_.csv","w",newline='') as csvfile:
data_parametro = time.time() * 1000
writer = csv.writer(csvfile, delimiter=",")
# Passando os nomes da coluna (header)
writer.writerow(["fn","ln","email","phone","ct","st","country","dob","doby","age"])
while True:
transacoes = pm.transaction.find_by({"count":"500","status":"paid","date_updated":f"<{data_parametro}"})
if len(transacoes) == 0:
break
for ite_tran in transacoes:
t = Transacao(ite_tran)
data_parametro = t.get_data_update()
t_fb = t.get_dados_facebook()
writer.writerow([t_fb[0],t_fb[1],t_fb[2],t_fb[3],t_fb[4],t_fb[5],t_fb[6],t_fb[7],t_fb[8],t_fb[9]])
time.sleep(40)
###Output
_____no_output_____
###Markdown
LOUCURA PRA CA DE BAIXO
###Code
# tentando fazer meio que uma paginação, pegar a ultima transação enviada, e fazer outra requisição a partir daquela data
transacoes = pm.transaction.find_by({'count':'8',"date_updated":"<1624367191178.0"})
print("Numero de transacoes: {}".format(len(transacoes)))
# checando as transacoes
for i,primeira in enumerate(transacoes):
ult_tran = Transacao(primeira)
data_ultima = ult_tran.get_data_update()
print("{} | {}".format(i,ult_tran.data_update))
print("{} | {}".format(i,ult_tran.get_data_update()))
print()
# ult_tran = ultima_transacao(transacoes)
print("data da ultima transacao: {}".format(data_ultima))
#ult_tran = Transacao(ult_tran)
#data_ultima = ult_tran.get_data_update()
print("get_date_updated: {}".format(data_ultima))
print("---------------")
# para puxar a ultima tbm, basta deixar um <=, ao inves de somente <
search_params = {"count":"7","date_updated":f"< {data_ultima}"}
pos_transacoes = pm.transaction.find_by(search_params)
print("Numero de transacoes: {}".format(len(pos_transacoes)))
for n,i in enumerate(pos_transacoes):
ult_tran = Transacao(i)
data_ultima = ult_tran.get_data_update()
print("{} | {}".format(n,i['date_updated']))
print("{} | {}".format(n,ult_tran.get_data_update()))
print()
print("--------------------")
search_params = {"count":"5","date_updated":f"< {data_ultima}"}
pos_transacoes = pm.transaction.find_by(search_params)
print("Numero de transacoes: {}".format(len(pos_transacoes)))
for n,i in enumerate(pos_transacoes):
ult_tran = Transacao(i)
data_ultima = ult_tran.get_data_update()
print("{} | {}".format(n,ult_tran.data_update))
print("{} | {}".format(n,ult_tran.get_data_update()))
print()
#tr = pm.transaction.find_by({"count":"1","date_created":"DATE_CRATED"})
#tr
# print()
tr = pm.transaction.find_by({"id":"444673976"})
for i in tr:
for j in i:
print(f"{j:25} | {i[j]}")
pass
# horario da transacao acima
print(i['date_created'])
import horario
hora = horario.get_unixtime_datetime(i['date_created'])
hora - (10800 * 1000)
t = pm.transaction.find_by({"date_created":"<=1635732285250.0"})
for i in t:
print(i['date_created'])
print()
data_pam = time.time() * 1000
datetime.fromtimestamp(time.time())
for i in range(10):
tra = pm.transaction.find_by({"count":"1","date_created": f"<{data_pam}"})
for t in tra:
obj = Transacao(t)
data_pam = obj.get_data_created()
print(i)
print(obj.data_criacao)
print("Hora unixtime: {}".format(obj.get_data_created()))
print("Data Parametr: {}".format(data_pam))
print("-ID Transacao: {}".format(obj.id))
print()
###Output
_____no_output_____ |
04_benchs.ipynb | ###Markdown
Exploring TS definitions...
###Code
#export
from fastcore.test import *
from fastai2.basics import *
from fastai2.torch_core import *
from fastai2.data import *
#export
import pandas as pd
from fastcore.all import *
from scipy.io import arff
###Output
_____no_output_____
###Markdown
Core> Basic timeseries opening/processing funcs.
###Code
#export
def maybe_unsqueeze(x):
"Add empty dimension if it is a rank 1 tensor/array"
if isinstance(x, np.ndarray): return x[None,:] if len(x.shape)==1 else x
if isinstance(x, Tensor): return x.unsqueeze(0) if len(x.shape)==1 else x
else: return None
a = np.random.random(10)
test_eq((1,10), maybe_unsqueeze(a).shape)
test_eq((1,10), maybe_unsqueeze(maybe_unsqueeze(a)).shape) #do nothing
t = torch.rand(10)
test_eq((1,10), maybe_unsqueeze(t).shape)
test_eq((1,10), maybe_unsqueeze(maybe_unsqueeze(t)).shape) #do nothing
###Output
_____no_output_____
###Markdown
A time series is just an array of 1 dimesion.
###Code
#export
def show_array(array, ax=None, figsize=None, title=None, ctx=None, tx=None, **kwargs):
"Show an array on `ax`."
# Handle pytorch axis order
if hasattrs(array, ('data','cpu','permute')):
array = array.data.cpu()
elif not isinstance(array,np.ndarray):
array=array(array)
arrays = maybe_unsqueeze(array)
ax = ifnone(ax,ctx)
if figsize is None: figsize = (5,5)
if ax is None: _,ax = plt.subplots(figsize=figsize)
tx = ifnone(tx,np.arange(arrays[0].shape[0]))
label = kwargs.pop('label', 'x')
for a, c in zip(arrays, ['b', 'c', 'm', 'y', 'k',]):
ax.plot(tx, a, '-'+c,label=label, **kwargs)
if title is not None: ax.set_title(title)
ax.legend()
return ax
###Output
_____no_output_____
###Markdown
A simple array of 1 channel is `np.arange(10)`.
###Code
show_array(np.arange(10));
# export
class TSeries(TensorBase):
"Basic Timeseries wrapper"
@classmethod
def create(cls, x):
return cls(maybe_unsqueeze(x))
@property
def channels(self): return self.shape[0]
@property
def len(self): return self.shape[-1]
def __repr__(self):
return f'TSeries(ch={self.channels}, len={self.len})'
def show(self, ctx=None, **kwargs):
return show_array(self, ctx=ctx, **kwargs)
###Output
_____no_output_____
###Markdown
TESTS
###Code
X = np.random.rand(10000, 1000)
y = np.random.randint(0,10,10000)
###Output
_____no_output_____
###Markdown
Loading from arrays:
###Code
class NaiveNumpyDataset(torch.utils.data.Dataset):
def __init__(self, X, y=None):
self.X, self.y = X, y
def __getitem__(self, idx):
if self.y is None: return (self.X[idx], )
else: return (self.X[idx], self.y[idx])
def __len__(self):
return len(self.X)
ds = NaiveNumpyDataset(X,y)
ds.X.shape, ds.y.shape
dls_torch = torch.utils.data.DataLoader(dataset=ds, batch_size=8)
dls = DataLoaders.from_dsets(ds, bs=8)
def cycle_dl(dl):
for x,y in iter(dl):
pass
bx,by = dls.train.one_batch()
bx.shape
%timeit cycle_dl(dls_torch)
%timeit cycle_dl(dls.train)
class NumpyDataset():
"Tensor aware implementation"
def __init__(self, X, y=None):
self.X, self.y = X, y
def __getitem__(self, idx):
if self.y is None: return (self.X[idx], )
else: return (TSeries.create(self.X[idx]), TensorCategory(self.y[idx]))
def __len__(self):
return len(self.X)
ds = NumpyDataset(X,y)
ds[0]
dls = DataLoaders.from_dsets(ds, bs=8)
dls.train.one_batch()
%timeit cycle_dl(dls.train)
class TSTransform(Transform):
def __init__(self, x, y):
self.x, self.y = x, y
def encodes(self, i):
return (TSeries.create(self.x[i]), TensorCategory(self.y[i]))
tl = TfmdLists(range_of(X), TSTransform(X, y))
tl[0:5]
dls = DataLoaders.from_dsets(tl, bs=8)
bx, by = dls.one_batch()
%timeit cycle_dl(dls.train)
dl =TfmdDL(tl, bs=8)
%timeit cycle_dl(dl)
###Output
1.32 s ± 30.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
batch tfmdDL
###Code
class Slicer:
"slice numpy ds"
def __init__(self,to): self.to = to
def __getitem__(self, idxs):
return self.to.new(*self.to[idxs])
class NumpyDataset2():
def __init__(self, X, y=None):
self.X, self.y = X, y
def __getitem__(self, idx):
if self.y is None: return (self.X[idx], )
else: return (self.X[idx], self.y[idx])
def __len__(self):
return len(self.X)
@property
def slicer(self):
return Slicer(self)
def new(self, X, y):
return type(self)(X, y)
ds = NumpyDataset2(X,y)
ds.slicer[0:4]
class ReadTSBatch(ItemTransform):
def __init__(self, to): self.to = to
def encodes(self, to):
res = (tensor(to.X).float(), )
res = res + (tensor(to.y),)
# if to.device is not None: res = to_device(res, to.device)
return res
# def decodes(self, o):
# o = [_maybe_expand(o_) for o_ in to_np(o) if o_.size != 0]
# vals = np.concatenate(o, axis=1)
# try: df = pd.DataFrame(vals, columns=self.to.all_col_names)
# except: df = pd.DataFrame(vals, columns=self.to.x_names)
# to = self.to.new(df)
# return to
rtb = ReadTSBatch(ds)
rtb.encodes(ds.slicer[0:4])
class TSDataloader(TfmdDL):
do_item = noops
def __init__(self, dataset, bs=16, shuffle=False, after_batch=None, num_workers=0, **kwargs):
if after_batch is None: after_batch = L(TransformBlock().batch_tfms)+ReadTSBatch(dataset)
super().__init__(dataset, bs=bs, shuffle=shuffle, after_batch=after_batch, num_workers=num_workers, **kwargs)
def create_batch(self, b): return self.dataset.slicer[b]
dl = TSDataloader(ds, bs=128)
%timeit cycle_dl(dl)
###Output
20.5 ms ± 381 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
|
keito_prob.ipynb | ###Markdown
###Code
import cv2
from google.colab.patches import cv2_imshow
print(cv2.__version__)
main_img = cv2.imread('/content/test.png')
gry_img = cv2.cvtColor(main_img,cv2.COLOR_BGR2GRAY)
_, _, stats, centroids = cv2.connectedComponentsWithStats(gry_img)
flats_cxcy = []
for idx,cxcy in enumerate(centroids):
if (stats[idx,4] > 6300) and (stats[idx,4] < 11000):
flats_cxcy.append(cxcy)
cv2.putText(main_img,str(stats[idx,4]),(int(cxcy[0]),int(cxcy[1])),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,128))
cv2.putText(main_img,str([int(cxcy[0]),int(cxcy[1])]),(int(cxcy[0]-40),int(cxcy[1])),cv2.FONT_HERSHEY_SIMPLEX,0.3,(128,0,))
# contours,_ = cv2.findContours(img.copy(),cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for num,cnt in enumerate(contours):
# x,y,w,h = cv2.boundingRect(cnt)
# w_by_h_ratio = w/h
# if w_by_h_ratio >=0.7:
# cv2.putText(main_img,"*",(int(x+w/2),int(y+h/2)),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,128))
cv2_imshow(main_img)
print(len(flats_cxcy))
#Create table map with flat(0,0) mapped to imgflat(cx,cy)
#to find the flat,we need to find bbox: DONEs
import numpy as np
np_flats_cxcy = np.array(flats_cxcy)
print('before',np_flats_cxcy.shape)
print('after',np_flats_cxcy.reshape(5,6,2))
###Output
_____no_output_____ |
Part II My Market Model.ipynb | ###Markdown
Traditional Language Model: returns the most similar words to a given word Here I ask it for the 6 most similar words (concepts) to 'Fear'
###Code
neighbors('fear',Language_VSM).head(7)
###Output
_____no_output_____
###Markdown
My Market Model: returns the most similar firms to a chosen stock. Here I ask it for the 9 most similar stocks to Google : - Apple, Equifax, Microsoft, S&P500, Western Union, Visa, Exxon, DirectTv (data is from 2011), and Intel.
###Code
neighbors('GOOG',VSM).head(10)
###Output
_____no_output_____
###Markdown
Using model to identify statistically correlated stocksLet's use Nvidia as an example.
###Code
neighbors('NVDA',VSM).head()
neighbors('GLW',VSM).head()
###Output
_____no_output_____
###Markdown
- **AMAT**---> Applied Materials Inc: Firm that performs engineering tasks for semiconductor chips- **GLW**----> Corning Inc: American multinational technology company that specializes in specialty glass, ceramics, and related materials and technologies including advanced optics, primarily for industrial and scientific applications.- **MU**---> Micron Tech: producer of computer memory and computer data storage including dynamic random-access memory...- **MOLX**----> Molex: manufacturer of electronic, electrical, and fiber optic connectivity systems. Molex offers over 100,000 products across a variety of industries, including data communications... If you bought each stock at the start of 2011, your returns appear like
###Code
df = chants.get_data(['AMAT','GLW','MU','MOLX'],dates1)
df.dropna(axis=0,inplace=True)
df.drop('SPY',inplace=True,axis=1)
((df/df.iloc[0,:])-1).plot()
###Output
_____no_output_____
###Markdown
The Usefuleness of the ModelThe model places Applied Materials (AMAT) as most similar to Nvidia. It's nuanced enough, though, that it doesn't simply place NVDA as the most similar to AMAT. If they made the same things at the same scale for the same people (like Coke and Pepsi), that might make sense. But if they occupy different spaces in the market, AMAT might be most similar to NVDA but it might have different neighbors.NVDA produces graphics cards while AMAT performs specialized services in the microchip industry
###Code
neighbors('AMAT',VSM).head(15)
###Output
_____no_output_____
###Markdown
We see NVDA doesn't even make the top 15. I won't write out the full list but the closest four are- **KLAC**: supplies process control and yield management systems for the semiconductor industry and other related nanoelectronics- **LRCX**: supplier of wafer fabrication equipment and related services to the semiconductor industry- **APH**: major producer of electronic and fiber optic connectors, cable and interconnect systems- **MOLX**: (was on NVDA's nearest neighbor) manufacturer of connectivity systems.If you want to read into this, you can be happy that the nearest two neighbors perform specialized services in the microchip industry before generalizing to system electronic.I'll plot the returns for this group as well, but to make the comparisons easier I'll plot the previous chart with NVDA, AMAT, GLW, and MU first.
###Code
((df/df.iloc[0,:])-1).plot()
df1 = chants.get_data(['AMAT','KLAC','LRCX','APH'],dates)
df1.drop('SPY',inplace=True,axis=1)
((df1/df1.iloc[0,:])-1).dropna(axis=0).plot()
###Output
_____no_output_____
###Markdown
I'm not going to color code it for times sake, sorry. AMAT, in blue, is the common stock between both sets. You can see if we treat it as the chosen stock, it's personal story is reflected by the group of stocks most similar to it. Both sets share an overall theme though. You can see how this might produce a lot of potentially correlated stocks. The next step would be to validate them using traditional methods.I probably should have plotted the S&P500 to show where the market was at.- For a visual overview of what a Market VSM does: https://public.tableau.com/app/profile/jelan.samatar/viz/VSM_Project/Story
###Code
df = chants.get_data(['SPY','AMAT','NVDA'],dates)
((df/df.iloc[0,:])-1).dropna(axis=0).plot()
###Output
_____no_output_____ |
mf_performance_analysis/mf data extraction/Equity Funds/Dividend Yield Fund/dy_mf_data_extraction.ipynb | ###Markdown
Dividend Yield FundThese mutual funds invest in stocks and follow strategy of investing in stocks which generates higher dividend yield. Exctracting Dividend Yield Mutual Fund's Historical Investment Returns DataData in this table: Get Absolute historical returns for ₹1000 investment. If 1Y column value is 1234.5 that means, your ₹1000 investment 1 year back would have grown to ₹1234.5.
###Code
dy_lump_sum_rtn = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/returns/dividend-yield-fund.html")
df1 = pd.DataFrame(dy_lump_sum_rtn[0])
#Renaming historical returns column names
df1.rename({'1W': '1W_RTN(%)', '1M': '1M_RTN(%)', '3M': '3M_RTN(%)', '6M': '6M_RTN(%)',
'YTD': 'YTD_RTN(%)', '1Y': '1Y_RTN(%)', '2Y': '2Y_RTN(%)', '3Y': '3Y_RTN(%)',
'5Y': '5Y_RTN(%)', '10Y': '10Y_RTN(%)'
}, axis=1, inplace=True)
print("Shape of the dataframe:", df1.shape)
df1.head()
###Output
Shape of the dataframe: (14, 13)
###Markdown
Exctracting Dividend Yield Mutual Fund's Monthly Returns DataData in this table: Get monthly returns. If Jan month column value is 5.4% that means, fund has given 5.4% returns in Jan month.
###Code
dy_monthly_rtn = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/monthly-returns/dividend-yield-fund.html")
df2 = pd.DataFrame(dy_monthly_rtn[0])
#Renaming df1 column names
df1.rename({"Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)",
'MTD': 'MTD_RTN(%)', "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)",
"Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)"
}, axis=1, inplace=True)
print("Shape of the dataframe:", df2.shape)
df2.head()
###Output
Shape of the dataframe: (14, 14)
###Markdown
Exctracting Dividend Yield Mutual Fund's Quarterly Returns DataData in this table: Get quarterly returns. If Q1 column value is 5.4% that means, fund has given 5.4% returns from 1st Jan to 31st Mar.
###Code
dy_quarterly_rtn = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/quarterly-returns/dividend-yield-fund.html")
df3 = pd.DataFrame(dy_quarterly_rtn[0])
print("Shape of the dataframe:", df3.shape)
df3.head()
###Output
Shape of the dataframe: (14, 14)
###Markdown
Exctracting Dividend Yield Mutual Fund's Annual Investment Returns DataData in this table: Get annual returns. If 2018 year column value is 5.4% that means, fund has given 5.4% returns from 1st Jan to 31st Dec/Last date.
###Code
dy_annual_rtn = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/annual-returns/dividend-yield-fund.html")
df4 = pd.DataFrame(dy_annual_rtn[0])
#Renaming yearly returns column names
df4.rename({'2020': '2020_RTN(%)', '2019': '2019_RTN(%)', '2018': '2018_RTN(%)', '2017': '2017_RTN(%)',
'2016': '2016_RTN(%)', '2015': '2015_RTN(%)', '2014': '2014_RTN(%)', '2013': '2013_RTN(%)',
'2012': '2012_RTN(%)', '2011': '2011_RTN(%)', '2010': '2010_RTN(%)'
}, axis=1, inplace=True)
print("Shape of the dataframe:", df4.shape)
df4.head()
###Output
Shape of the dataframe: (14, 14)
###Markdown
Exctracting Dividend Yield Mutual Fund's Rank Within Category DataData in this table: Get performance rank within category. If 1Y column value is 3/45 that means, Fund ranked 3rd in terms of performance out of 45 funds in that category.
###Code
dy_rank_in_category = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/ranks/dividend-yield-fund.html")
df5 = pd.DataFrame(dy_rank_in_category[0])
#Renaming df5 column names
df5.rename({'1W': '1W_Rank', '1M': '1M_Rank', '3M': '3M_Rank', '6M': '6M_Rank', 'YTD': 'YTD_Rank',
'1Y': '1Y_Rank', '2Y': '2Y_Rank', '3Y': '3Y_Rank', '5Y': '5Y_Rank', '10Y': '10Y_Rank'
}, axis=1, inplace=True)
print("Shape of the dataframe:", df5.shape)
df5.head()
###Output
Shape of the dataframe: (12, 12)
###Markdown
Exctracting Dividend Yield Mutual Fund's Risk Ratios DataData in this table: Get values of risk ratios calculated on daily returns for last 3 years.
###Code
dy_risk_ratio = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/risk-ratios/dividend-yield-fund.html")
df6 = pd.DataFrame(dy_risk_ratio[0])
#Droping the 'Category' column
df6.drop('Category', inplace=True, axis=1)
print("Shape of the dataframe:", df6.shape)
df6.head()
###Output
Shape of the dataframe: (5, 8)
###Markdown
Exctracting Dividend Yield Mutual Fund's Portfolio DataData in this table: Compare how schemes have invested money across various asset class and number of instruments.
###Code
dy_portfolio = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/portfolioassets/dividend-yield-fund.html")
df7 = pd.DataFrame(dy_portfolio[0])
#Renaming SIP returns column names
df7.rename({'Turnover ratio': 'Turnover ratio(%)'}, axis=1, inplace=True)
print("Shape of the dataframe:", df7.shape)
df7.head()
###Output
Shape of the dataframe: (14, 10)
###Markdown
Exctracting Dividend Yield Mutual Fund's Latest NAV DataData in this table: Get the latest values of NAV for the mutual funds.
###Code
dy_nav = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/navs/dividend-yield-fund.html")
df8 = pd.DataFrame(dy_nav[0])
df8.rename({'1D Change' : '1D Change(%)'}, axis=1, inplace=True)
print("Shape of the dataframe:", df8.shape)
df8.head()
###Output
Shape of the dataframe: (14, 10)
###Markdown
Exctracting Dividend Yield Mutual Fund's SIP Returns DataData in this table: Get absolute SIP returns. If 1Y column value is 10%, that means fund has given 10% returns on your SIP investments started 1 year back from latest NAV date.
###Code
dy_sip_rtns = pd.read_html(
"https://www.moneycontrol.com/mutual-funds/performance-tracker/sip-returns/dividend-yield-fund.html")
df9 = pd.DataFrame(dy_sip_rtns[0])
#Renaming SIP returns column names
df9.rename({'1Y': '1Y_SIP_RTN(%)', '2Y': '2Y_SIP_RTN(%)', '3Y': '3Y_SIP_RTN(%)',
'5Y': '5Y_SIP_RTN(%)', '10Y': '10Y_SIP_RTN(%)', 'YTD' : 'YTD_SIP_RTN(%)'
}, axis=1, inplace=True)
print("Shape of the dataframe:", df9.shape)
df9.head()
df_final = pd.concat([df1,df2,df3,df4,df5,df6,df7,df8,df9],axis=1,sort=False)
print("Shape of the dataframe:", df_final.shape)
# Remove duplicate columns by name in Pandas
df_final = df_final.loc[:,~df_final.columns.duplicated()]
# Removing spaces in the column names
#df_final.columns = df_final.columns.str.replace(' ','_')
print("Shape of the dataframe:", df_final.shape)
df_final.head()
#Exporting the consolidated elss mf data as a csv file
#print("Shape of the dataframe:", df_final.shape)
#df_final.to_csv('dy_mf_data('+ str(pd.to_datetime('today').strftime('%d-%b-%Y %H:%M:%S')) + ').csv',
# index=False)
#Exporting the elss mf data columns with its datatype as a csv file
#df_dtypes.to_csv('elss_mf_col_data_types('+ str(pd.to_datetime('today').strftime('%d-%b-%Y %H:%M:%S')) + '.csv)')
###Output
_____no_output_____ |
CameraPipeline.ipynb | ###Markdown
Reading live camera data
###Code
# Import the required modules
import cv2
import numpy as np
from IPython.display import clear_output, display, Image
import PIL.Image
from io import BytesIO
import ipywidgets as widgets
def img2ByteArr(frame, ext='jpeg'):
byteObj = BytesIO()
PIL.Image.fromarray(frame).save(byteObj, ext)
return byteObj.getvalue()
cam = cv2.VideoCapture(0) #Resolutions from camera- 1920x1080, 1280x720, 640x360
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
_, frame = cam.read()
#Create Ipython image widget
w=widgets.Image(value=img2ByteArr(frame));
display(w)
#print(widgets.height, widgets.width)
for frame_number in range(100):
#Capture frame
_, frame = cam.read()
#print(frame.shape)
#Perform operation
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#Display frame
w.value=img2ByteArr(frame)
cam.release()
clear_output()
#print(frame.shape)
###Output
_____no_output_____ |
05_Data-Driven_Mapping.ipynb | ###Markdown
Lesson 5. Data-driven Mapping*Data-driven mapping* refers to the process of using data values to determine the symbology of mapped features. Color, shape, and size are the three most common symbology types used in data-driven mapping.Data-driven maps are often refered to as thematic maps.- 5.1 Choropleth Maps- 5.2 Issues with Visualization- 5.3 Classification Schemes- 5.4 Point Maps- 5.5 Mapping Categorical Data- 5.6 Recap- **Exercise**: Data-Driven Mapping**Instructor Notes**- Datasets used - California counties shapefile ('notebook_data/california_counties/CaliforniaCounties.shp') - Alameda County schools ('notebook_data/alco_schools.csv') - Berkeley bike boulevards ('notebook_data/transportation/BerkeleyBikeBlvds.geojson')- Expected time to complete - Lecture and questions: 30 minutes - Exercises: 15 minutes Types of Thematic MapsThere are two primary types of maps used to convey data values:- `Choropleth maps`: set the color of areas (polygons) by data value- `Point symbol maps`: set the color or size of points by data valueWe will discuss both of these types of maps in more detail in this lesson. But let's take a quick look at choropleth maps.
###Code
import pandas as pd
import geopandas as gpd
import matplotlib # base python plotting library
import matplotlib.pyplot as plt # submodule of matplotlib
# To display plots, maps, charts etc in the notebook
%matplotlib inline
###Output
_____no_output_____
###Markdown
5.1 Choropleth MapsChoropleth maps are the most common type of thematic map.Let's take a look at how we can use a geodataframe to make a choropleth map.We'll start by reloading our counties dataset from Day 1.
###Code
counties = gpd.read_file('notebook_data/california_counties/CaliforniaCounties.shp')
counties.head()
counties.columns
###Output
_____no_output_____
###Markdown
Here's a plain map of our polygons.
###Code
counties.plot()
###Output
_____no_output_____
###Markdown
Now, for comparison, let's create a choropleth map by setting the color of the county based on the values in the population per square mile (`POP12_SQMI`) column.
###Code
counties.plot(column='POP12_SQMI', figsize=(10,10))
###Output
_____no_output_____
###Markdown
That's really the heart of it. To set the color of the features based on the values in a column, set the `column` argument to the column name in the gdf.> **Protip:** You can quickly right-click on the plot and save to a file or open in a new browser window. By default map colors are linearly scaled to data values. This is called a `proportional color map`.- The great thing about `proportional color maps` is that you can visualize the full range of data values. We can also add a legend and even tweak its display.
###Code
counties.plot(column='POP12_SQMI', figsize=(10,10), legend=True)
plt.show()
counties.plot(column='POP12_SQMI', figsize=(10,10), legend=True,
legend_kwds={'label': "Population Density per mile$^2$",
'orientation': "horizontal"},)
plt.show()
###Output
_____no_output_____
###Markdown
QuestionWhy are we plotting `POP12_SQMI` instead of `POP2012`?
###Code
Your response here:
###Output
_____no_output_____
###Markdown
Note: Types of Color MapsThere are a few different types of color maps (or color palettes), each of which has a different purpose:- *diverging* - a "diverging" set of colors are used so emphasize mid-range values as well as extremes.- *sequential* - usually with a single color hue to emphasize changes in magnitude, where darker colors typically mean higher values- *qualitative* - a diverse set of colors to identify categories and avoid implying quantitative significance.> **Pro-tip**: You can actually see all your color map options if you misspell what you put in `cmap` and try to run-in. Try it out!> **Pro-tip**: Sites like [ColorBrewer](https://colorbrewer2.org/type=sequential&scheme=Blues&n=3) let's you play around with different types of color maps. If you want to create your own, [The Python Graph Gallery](https://python-graph-gallery.com/python-colors/) is a way to see what your Python color options are. 5.2 Issues with Visualization Types of choropleth dataThere are several types of quantitative data variables that can be used to create a choropleth map. Let's consider these in terms of our ACS data.- **Count** - counts, aggregated by feature - *e.g. population within a census tract*- **Density** - count, aggregated by feature, normalized by feature area - *e.g. population per square mile within a census tract*- **Proportions / Percentages** - value in a specific category divided by total value across in all categories - *e.g. proportion of the tract population that is white compared to the total tract population*- **Rates / Ratios** - value in one category divided by value in another category - *e.g. homeowner-to-renter ratio would be calculated as the number of homeowners (c_owners/ c_renters)* Interpretability of plotted dataThe goal of a choropleth map is to use color to visualize the spatial distribution of a quantitative variable.Brighter or richer colors are typically used to signify higher values.A big problem with choropleth maps is that our eyes are drawn to the color of larger areas, even if the values being mapped in one or more smaller areas are more important. We see just this sort of problem in our population-density map. ***Why does our map not look that interesting?*** Take a look at the histogram below, then consider the following question.
###Code
plt.hist(counties['POP12_SQMI'],bins=40)
plt.title('Population Density per mile$^2$')
plt.show()
###Output
_____no_output_____
###Markdown
QuestionWhat county does that outlier represent? What problem does that pose?
###Code
Your response here:
###Output
_____no_output_____
###Markdown
5.3 Classification schemesLet's try to make our map more interpretable!The common alternative to a proportionial color map is to use a **classification scheme** to create a **graduated color map**. This is the standard way to create a **choropleth map**.A **classification scheme** is a method for binning continuous data values into 4-7 classes (the default is 5) and map those classes to a color palette. The commonly used classifications schemes:- **Equal intervals** - equal-size data ranges (e.g., values within 0-10, 10-20, 20-30, etc.) - pros: - best for data spread across entire range of values - easily understood by map readers - cons: - but avoid if you have highly skewed data or a few big outliers - **Quantiles** - equal number of observations in each bin - pros: - looks nice, becuase it best spreads colors across full set of data values - thus, it's often the default scheme for mapping software - cons: - bin ranges based on the number of observations, not on the data values - thus, different classes can have very similar or very different values. - **Natural breaks** - minimize within-class variance and maximize between-class differences - e.g. 'fisher-jenks' - pros: - great for exploratory data analysis, because it can identify natural groupings - cons: - class breaks are best fit to one dataset, so the same bins can't always be used for multiple years - **Manual** - classifications are user-defined - pros: - especially useful if you want to slightly change the breaks produced by another scheme - can be used as a fixed set of breaks to compare data over time - cons: - more work involved Classification schemes and GeoDataFramesClassification schemes can be implemented using the geodataframe `plot` method by setting a value for the **scheme** argument. This requires the [pysal](https://pysal.org/) and [mapclassify](https://pysal.org/mapclassify) libraries to be installed in your Python environment. Here is a list of the `classification schemes` names that we will use:- `equalinterval`, `quantiles`,`fisherjenks`,`naturalbreaks`, and `userdefined`.For more information about these classification schemes see the [pysal mapclassifiers web page](https://pysal.org/mapclassify/api.html) or check out the help docs. -------------------------- Classification schemes in actionLet's redo the last map using the `quantile` classification scheme.- What is different about the code? About the output map?
###Code
# Plot population density - mile^2
fig, ax = plt.subplots(figsize = (10,5))
counties.plot(column='POP12_SQMI',
scheme="quantiles",
legend=True,
ax=ax
)
ax.set_title("Population Density per Sq Mile")
###Output
_____no_output_____
###Markdown
Note: For interval notation- A square bracket is *inclusive*- A parentheses is *exclusive* User Defined Classification SchemesYou may get pretty close to your final map without being completely satisfied. In this case you can manually define a classification scheme.Let's customize our map with a `user-defined` classification scheme where we manually set the breaks for the bins using the `classification_kwds` argument.
###Code
fig, ax = plt.subplots(figsize = (14,8))
counties.plot(column='POP12_SQMI',
legend=True,
cmap="RdYlGn",
scheme='user_defined',
classification_kwds={'bins':[50,100,200,300,400]},
ax=ax)
ax.set_title("Population Density per Sq Mile")
###Output
_____no_output_____
###Markdown
Since we are customizing our plot, we can also edit our legend to specify and format the text so that it's easier to read.- We'll use `legend_labels_list` to customize the labels for group in the legend.
###Code
fig, ax = plt.subplots(figsize = (14,8))
counties.plot(column='POP12_SQMI',
legend=True,
cmap="RdYlGn",
scheme='user_defined',
classification_kwds={'bins':[50,100,200,300,400]},
ax=ax)
# Create the labels for the legend
legend_labels_list = ['<50','50 to 100','100 to 200','200 to 300','300 to 400','>400']
# Apply the labels to the plot
for j in range(0,len(ax.get_legend().get_texts())):
ax.get_legend().get_texts()[j].set_text(legend_labels_list[j])
ax.set_title("Population Density per Sq Mile")
###Output
_____no_output_____
###Markdown
Let's plot a ratioIf we look at the columns in our dataset, we see we have a number of variablesfrom which we can calculate proportions, rates, and the like.Let's try that out:
###Code
counties.head()
fig, ax = plt.subplots(figsize = (15,6))
# Plot percent hispanic as choropleth
counties.plot(column=(counties['HISPANIC']/counties['POP2012'] * 100),
legend=True,
cmap="Blues",
scheme='user_defined',
classification_kwds={'bins':[20,40,60,80]},
edgecolor="grey",
linewidth=0.5,
ax=ax)
legend_labels_list = ['<20%','20% - 40%','40% - 60%','60% - 80%','80% - 100%']
for j in range(0,len(ax.get_legend().get_texts())):
ax.get_legend().get_texts()[j].set_text(legend_labels_list[j])
ax.set_title("Percent Hispanic Population")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Questions1. What new options and operations have we added to our code?1. Based on our code, what title would you give this plot to describe what it displays?1. How many bins do we specify in the `legend_labels_list` object, and how many bins are in the map legend? Why?
###Code
Your responses here:
###Output
_____no_output_____
###Markdown
5.4 Point maps Choropleth maps are great, but mapping using point symbols enables us to visualize our spatial data in another way. If you know both mapping methods you can expand how much information you can show in one map. For example, point maps are a great way to map `counts` because the varying sizes of areas are deemphasized. -----------------------Let's read in some point data on Alameda County schools.
###Code
schools_df = pd.read_csv('notebook_data/alco_schools.csv')
schools_df.head()
###Output
_____no_output_____
###Markdown
We got it from a plain CSV file, let's coerce it to a GeoDataFrame.
###Code
schools_gdf = gpd.GeoDataFrame(schools_df,
geometry=gpd.points_from_xy(schools_df.X, schools_df.Y))
schools_gdf.crs = "epsg:4326"
###Output
_____no_output_____
###Markdown
Then we can map it.
###Code
schools_gdf.plot()
plt.title('Alameda County Schools')
###Output
_____no_output_____
###Markdown
Proportional Color Maps**Proportional color maps** linearly scale the `color` of a point symbol by the data values.Let's try this by creating a map of `API`. API stands for *Academic Performance Index*, which is a measurement system that looks at the performance of an individual school.
###Code
schools_gdf.plot(column="API", cmap="gist_heat",
edgecolor="grey", figsize=(10,8), legend=True)
plt.title("Alameda County, School API scores")
###Output
_____no_output_____
###Markdown
When you see that continuous color bar in the legend you know that the mapping of data values to colors is not classified. Graduated Color MapsWe can also create **graduated color maps** by binning data values before associating them with colors. These are just like choropleth maps, except that the term "choropleth" is only used with polygon data. Graduated color maps use the same syntax as the choropleth maps above - you create them by setting a value for `scheme`. Below, we copy the code we used above to create a choropleth, but we change the name of the geodataframe to use the point gdf.
###Code
fig, ax = plt.subplots(figsize = (15,6))
# Plot percent non-white with graduated colors
schools_gdf.plot(column='API',
legend=True,
cmap="Blues",
scheme='user_defined',
classification_kwds={'bins':[0,200,400,600,800]},
edgecolor="grey",
linewidth=0.5,
#markersize=60,
ax=ax)
# Create a custom legend
legend_labels_list = ['0','< 200','< 400','< 600','< 800','>= 800']
# Apply the legend to the map
for j in range(0,len(ax.get_legend().get_texts())):
ax.get_legend().get_texts()[j].set_text(legend_labels_list[j])
# Create the plot
plt.tight_layout()
plt.title("Alameda County, School API scores")
schools_gdf['API'].describe()
###Output
_____no_output_____
###Markdown
As you can see, the syntax for a choropleth and graduated color map is the same,although some options only apply to one or the other.For example, uncomment the `markersize` parameter above to see how you can further customize a graduated color map. Graduated symbol maps`Graduated symbol maps` are also a great method for mapping points. These are just like graduated color maps but instead of associating symbol color with data values they associate point size. Similarly,graduated symbol maps use `classification schemes` to set the size of point symbols. > We demonstrate how to make graduated symbol maps along with some other mapping techniques in the `Optional Mapping notebook` which we encourage you to explore on your own. (***Coming Soon***) 5.5 Mapping Categorical Data Mapping categorical data, also called qualitative data, is a bit more straightforward. There is no need to scale or classify data values. The goal of the color map is to provide a contrasting set of colors so as to clearly delineate different categories. Here's a point-based example:
###Code
schools_gdf.plot(column='Org', categorical=True, legend=True)
###Output
_____no_output_____
###Markdown
5.6 RecapWe learned about important data driven mapping strategies and mapping concepts and can leverage what many of us know about `matplotlib`- Choropleth Maps- Point maps- Color schemes - Classifications Exercise: Data-Driven MappingPoint and polygons are not the only geometry-types that we can use in data-driven mapping!Run the next cell to load a dataset containing Berkeley's bicycle boulevards (which we'll be using more in the following notebook).Then in the following cell, write your own code to:1. plot the bike boulevards;2. color them by status (find the correct column in the head of the dataframe, displayed below);3. color them using a fitting, good-looking qualitative colormap that you choose from [The Matplotlib Colormap Reference](https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html);4. set the line width to 5 (check the plot method's documentation to find the right argument for this!);4. add the argument `figsize=[20,20]`, to make your map nice and big and visible! Then answer the questions posed in the last cell.To see the solution, double-click the Markdown cell below.
###Code
bike_blvds = gpd.read_file('notebook_data/transportation/BerkeleyBikeBlvds.geojson')
bike_blvds.head()
# YOUR CODE HERE:
###Output
_____no_output_____
###Markdown
Double-click to see solution!<!-- SOLUTION:bike_blvds.plot(column='Status', cmap='Dark2', linewidth=5, legend=True, figsize=[20,20])-->------------------------------------- Questions1. What does that map indicate about the status of the Berkeley bike boulevards?1. What does that map indicate about the status of your Berkeley bike-boulevard *dataset*?
###Code
Your responses here:
###Output
_____no_output_____
###Markdown
Lesson 5. Data-driven Mapping*Data-driven mapping* refers to the process of using data values to determine the symbology of mapped features. Color, shape, and size are the three most common symbology types used in data-driven mapping.Data-driven maps are often refered to as thematic maps.- 5.1 Choropleth Maps- 5.2 Issues with Visualization- 5.3 Classification Schemes- 5.4 Point Maps- 5.5 Mapping Categorical Data- 5.6 Recap- **Exercise**: Data-Driven Mapping Instructor Notes- Datasets used - 'notebook_data/california_counties/CaliforniaCounties.shp' - 'notebook_data/alco_schools.csv' - 'notebook_data/transportation/BerkeleyBikeBlvds.geojson'- Expected time to complete - Lecture + Questions: 30 minutes - Exercises: 15 minutes Types of Thematic MapsThere are two primary types of maps used to convey data values:- `Choropleth maps`: set the color of areas (polygons) by data value- `Point symbol maps`: set the color or size of points by data valueWe will discuss both of these types of maps in more detail in this lesson. But let's take a quick look at choropleth maps.
###Code
import pandas as pd
import geopandas as gpd
import matplotlib # base python plotting library
import matplotlib.pyplot as plt # submodule of matplotlib
# To display plots, maps, charts etc in the notebook
%matplotlib inline
###Output
_____no_output_____
###Markdown
5.1 Choropleth MapsChoropleth maps are the most common type of thematic map.Let's take a look at how we can use a geodataframe to make a choropleth map.We'll start by reloading our counties dataset from Day 1.
###Code
counties = gpd.read_file('notebook_data/california_counties/CaliforniaCounties.shp')
counties.head()
counties.columns
###Output
_____no_output_____
###Markdown
Here's a plain map of our polygons.
###Code
counties.plot()
###Output
_____no_output_____
###Markdown
Now, for comparison, let's create a choropleth map by setting the color of the county based on the values in the population per square mile (`POP12_SQMI`) column.
###Code
counties.plot(column='POP12_SQMI', figsize=(10,10))
###Output
_____no_output_____
###Markdown
That's really the heart of it. To set the color of the features based on the values in a column, set the `column` argument to the column name in the gdf.> **Protip:** - You can quickly right-click on the plot and save to a file or open in a new browser window. By default map colors are linearly scaled to data values. This is called a `proportional color map`.- The great thing about `proportional color maps` is that you can visualize the full range of data values. We can also add a legend, and even tweak its display.
###Code
counties.plot(column='POP12_SQMI', figsize=(10,10), legend=True)
plt.show()
counties.plot(column='POP12_SQMI', figsize=(10,10), legend=True,
legend_kwds={'label': "Population Density per mile$^2$",
'orientation': "horizontal"},)
plt.show()
###Output
_____no_output_____
###Markdown
QuestionWhy are we plotting `POP12_SQMI` instead of `POP2012`?
###Code
Your response here:
###Output
_____no_output_____
###Markdown
Note: Types of Color MapsThere are a few different types of color maps (or color palettes), each of which has a different purpose:- *diverging* - a "diverging" set of colors are used so emphasize mid-range values as well as extremes.- *sequential* - usually with a single color hue to emphasize changes in magnitude, where darker colors typically mean higher values- *qualitative* - a diverse set of colors to identify categories and avoid implying quantitative significance.> **Pro-tip**: You can actually see all your color map options if you misspell what you put in `cmap` and try to run-in. Try it out!> **Pro-tip**: Sites like [ColorBrewer](https://colorbrewer2.org/type=sequential&scheme=Blues&n=3) let's you play around with different types of color maps. If you want to create your own, [The Python Graph Gallery](https://python-graph-gallery.com/python-colors/) is a way to see what your Python color options are. 5.2 Issues with Visualization Types of choropleth dataThere are several types of quantitative data variables that can be used to create a choropleth map. Let's consider these in terms of our ACS data.- **Count** - counts, aggregated by feature - *e.g. population within a census tract*- **Density** - count, aggregated by feature, normalized by feature area - *e.g. population per square mile within a census tract*- **Proportions / Percentages** - value in a specific category divided by total value across in all categories - *e.g. proportion of the tract population that is white compared to the total tract population*- **Rates / Ratios** - value in one category divided by value in another category - *e.g. homeowner-to-renter ratio would be calculated as the number of homeowners (c_owners/ c_renters)* Interpretability of plotted dataThe goal of a choropleth map is to use color to visualize the spatial distribution of a quantitative variable.Brighter or richer colors are typically used to signify higher values.A big problem with choropleth maps is that our eyes are drawn to the color of larger areas, even if the values being mapped in one or more smaller areas are more important. We see just this sort of problem in our population-density map. ***Why does our map not look that interesting?*** Take a look at the histogram below, then consider the following question.
###Code
plt.hist(counties['POP12_SQMI'],bins=40)
plt.title('Population Density per mile$^2$')
plt.show()
###Output
_____no_output_____
###Markdown
QuestionWhat county does that outlier represent? What problem does that pose?
###Code
Your response here:
###Output
_____no_output_____
###Markdown
5.3 Classification schemesLet's try to make our map more interpretable!The common alternative to a proportionial color map is to use a **classification scheme** to create a **graduated color map**. This is the standard way to create a **choropleth map**.A **classification scheme** is a method for binning continuous data values into 4-7 classes (the default is 5) and map those classes to a color palette. The commonly used classifications schemes:- **Equal intervals** - equal-size data ranges (e.g., values within 0-10, 10-20, 20-30, etc.) - pros: - best for data spread across entire range of values - easily understood by map readers - cons: - but avoid if you have highly skewed data or a few big outliers - **Quantiles** - equal number of observations in each bin - pros: - looks nice, becuase it best spreads colors across full set of data values - thus, it's often the default scheme for mapping software - cons: - bin ranges based on the number of observations, not on the data values - thus, different classes can have very similar or very different values. - **Natural breaks** - minimize within-class variance and maximize between-class differences - e.g. 'fisher-jenks' - pros: - great for exploratory data analysis, because it can identify natural groupings - cons: - class breaks are best fit to one dataset, so the same bins can't always be used for multiple years - **Manual** - classifications are user-defined - pros: - especially useful if you want to slightly change the breaks produced by another scheme - can be used as a fixed set of breaks to compare data over time - cons: - more work involved Classification schemes and GeoDataFramesClassification schemes can be implemented using the geodataframe `plot` method by setting a value for the **scheme** argument. This requires the [pysal](https://pysal.org/) and [mapclassify](https://pysal.org/mapclassify) libraries to be installed in your Python environment. Here is a list of the `classification schemes` names that we will use:- `equalinterval`, `quantiles`,`fisherjenks`,`naturalbreaks`, and `userdefined`.For more information about these classification schemes see the [pysal mapclassifiers web page](https://pysal.org/mapclassify/api.html) or check out the help docs. -------------------------- Classification schemes in actionLet's redo the last map using the `quantile` classification scheme.- What is different about the code? About the output map?
###Code
# Plot population density - mile^2
fig, ax = plt.subplots(figsize = (10,5))
counties.plot(column='POP12_SQMI',
scheme="quantiles",
legend=True,
ax=ax
)
ax.set_title("Population Density per Sq Mile")
###Output
_____no_output_____
###Markdown
Note: For interval notation- A square bracket is *inclusive*- A parentheses is *exclusive* User Defined Classification SchemesYou may get pretty close to your final map without being completely satisfied. In this case you can manually define a classification scheme.Let's customize our map with a `user-defined` classification scheme where we manually set the breaks for the bins using the `classification_kwds` argument.
###Code
fig, ax = plt.subplots(figsize = (14,8))
counties.plot(column='POP12_SQMI',
legend=True,
cmap="RdYlGn",
scheme='user_defined',
classification_kwds={'bins':[50,100,200,300,400]},
ax=ax)
ax.set_title("Population Density per Sq Mile")
###Output
_____no_output_____
###Markdown
Since we are customizing our plot, we can also edit our legend to specify and format the text so that it's easier to read.- We'll use `legend_labels_list` to customize the labels for group in the legend.
###Code
fig, ax = plt.subplots(figsize = (14,8))
counties.plot(column='POP12_SQMI',
legend=True,
cmap="RdYlGn",
scheme='user_defined',
classification_kwds={'bins':[50,100,200,300,400]},
ax=ax)
# Create the labels for the legend
legend_labels_list = ['<50','50 to 100','100 to 200','200 to 300','300 to 400','>400']
# Apply the labels to the plot
for j in range(0,len(ax.get_legend().get_texts())):
ax.get_legend().get_texts()[j].set_text(legend_labels_list[j])
ax.set_title("Population Density per Sq Mile")
###Output
_____no_output_____
###Markdown
Let's plot a ratioIf we look at the columns in our dataset, we see we have a number of variablesfrom which we can calculate proportions, rates, and the like.Let's try that out:
###Code
counties.head()
fig, ax = plt.subplots(figsize = (15,6))
# Plot percent hispanic as choropleth
counties.plot(column=(counties['HISPANIC']/counties['POP2012'] * 100),
legend=True,
cmap="Blues",
scheme='user_defined',
classification_kwds={'bins':[20,40,60,80]},
edgecolor="grey",
linewidth=0.5,
ax=ax)
legend_labels_list = ['<20%','20% - 40%','40% - 60%','60% - 80%','80% - 100%']
for j in range(0,len(ax.get_legend().get_texts())):
ax.get_legend().get_texts()[j].set_text(legend_labels_list[j])
ax.set_title("Percent Hispanic Population")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Questions1. What new options and operations have we added to our code?1. Based on our code, what title would you give this plot to describe what it displays?1. How many bins do we specify in the `legend_labels_list` object, and how many bins are in the map legend? Why?
###Code
Your responses here:
###Output
_____no_output_____
###Markdown
5.4 Point maps Choropleth maps are great, but mapping using point symbols enables us to visualize our spatial data in another way. If you know both mapping methods you can expand how much information you can show in one map. For example, point maps are a great way to map `counts` because the varying sizes of areas are deemphasized. -----------------------Let's read in some point data on Alameda County schools.
###Code
schools_df = pd.read_csv('notebook_data/alco_schools.csv')
schools_df.head()
###Output
_____no_output_____
###Markdown
We got it from a plain CSV file, let's coerce it to a GeoDataFrame.
###Code
schools_gdf = gpd.GeoDataFrame(schools_df,
geometry=gpd.points_from_xy(schools_df.X, schools_df.Y))
schools_gdf.crs = "epsg:4326"
###Output
_____no_output_____
###Markdown
Then we can map it.
###Code
schools_gdf.plot()
plt.title('Alameda County Schools')
###Output
_____no_output_____
###Markdown
Proportional Color Maps**Proportional color maps** linearly scale the `color` of a point symbol by the data values.Let's try this by creating a map of `API`. API stands for *Academic Performance Index*, which is a measurement system that looks at the performance of an individual school.
###Code
schools_gdf.plot(column="API", cmap="gist_heat",
edgecolor="grey", figsize=(10,8), legend=True)
plt.title("Alameda County, School API scores")
###Output
_____no_output_____
###Markdown
When you see that continuous color bar in the legend you know that the mapping of data values to colors is not classified. Graduated Color MapsWe can also create **graduated color maps** by binning data values before associating them with colors. These are just like choropleth maps, except that the term "choropleth" is only used with polygon data. Graduated color maps use the same syntax as the choropleth maps above - you create them by setting a value for `scheme`. Below, we copy the code we used above to create a choropleth, but we change the name of the geodataframe to use the point gdf.
###Code
fig, ax = plt.subplots(figsize = (15,6))
# Plot percent non-white with graduated colors
schools_gdf.plot(column='API',
legend=True,
cmap="Blues",
scheme='user_defined',
classification_kwds={'bins':[0,200,400,600,800]},
edgecolor="grey",
linewidth=0.5,
#markersize=60,
ax=ax)
# Create a custom legend
legend_labels_list = ['0','< 200','< 400','< 600','< 800','>= 800']
# Apply the legend to the map
for j in range(0,len(ax.get_legend().get_texts())):
ax.get_legend().get_texts()[j].set_text(legend_labels_list[j])
# Create the plot
plt.tight_layout()
plt.title("Alameda County, School API scores")
schools_gdf['API'].describe()
###Output
_____no_output_____
###Markdown
As you can see, the syntax for a choropleth and graduated color map is the same,although some options only apply to one or the other.For example, uncomment the `markersize` parameter above to see how you can further customize a graduated color map. Graduated symbol maps`Graduated symbol maps` are also a great method for mapping points. These are just like graduated color maps but instead of associating symbol color with data values they associate point size. Similarly,graduated symbol maps use `classification schemes` to set the size of point symbols. > We demonstrate how to make graduated symbol maps along with some other mapping techniques in the `Optional Mapping notebook` which we encourage you to explore on your own. (***Coming Soon***) 5.5 Mapping Categorical Data Mapping categorical data, also called qualitative data, is a bit more straightforward. There is no need to scale or classify data values. The goal of the color map is to provide a contrasting set of colors so as to clearly delineate different categories. Here's a point-based example:
###Code
schools_gdf.plot(column='Org', categorical=True, legend=True)
###Output
_____no_output_____
###Markdown
5.6 RecapWe learned about important data driven mapping strategies and mapping concepts and can leverage what many of us know about `matplotlib`- Choropleth Maps- Point maps- Color schemes - Classifications Exercise: Data-Driven MappingPoint and polygons are not the only geometry-types that we can use in data-driven mapping!Run the next cell to load a dataset containing Berkeley's bicycle boulevards (which we'll be using more in the following notebook).Then in the following cell, write your own code to:1. plot the bike boulevards;2. color them by status (find the correct column in the head of the dataframe, displayed below);3. color them using a fitting, good-looking qualitative colormap that you choose from [The Matplotlib Colormap Reference](https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html);4. set the line width to 5 (check the plot method's documentation to find the right argument for this!);4. add the argument `figsize=[20,20]`, to make your map nice and big and visible! Then answer the questions posed in the last cell.To see the solution, double-click the Markdown cell below.
###Code
bike_blvds = gpd.read_file('notebook_data/transportation/BerkeleyBikeBlvds.geojson')
bike_blvds.head()
# YOUR CODE HERE:
###Output
_____no_output_____
###Markdown
Double-click to see solution!<!-- SOLUTION:bike_blvds.plot(column='Status', cmap='Dark2', linewidth=5, legend=True, figsize=[20,20])-->------------------------------------- Questions1. What does that map indicate about the status of the Berkeley bike boulevards?1. What does that map indicate about the status of your Berkeley bike-boulevard *dataset*?
###Code
Your responses here:
###Output
_____no_output_____ |
benchmarking/simulations/negative_control.ipynb | ###Markdown
Here as a negative control we will compare RPCA output from a clear block model of rank 2 (positive control) and a negative control of random counts with the same mean seq. depth.
###Code
depth=2.5e3
overlap_=20
rank_=2
#run model with fit variables and new variants
_,X_signal=build_block_model(rank_, depth/40, depth/40,
depth, depth
,200,1000,overlap=overlap_
,mapping_on=False)
X_signal=pd.DataFrame(X_signal,
index=['OTU_'+str(x)
for x in range(X_signal.shape[0])],
columns=['sample_'+str(x)
for x in range(X_signal.shape[1])])
#run model with fit variables and new variants
X_random=np.random.randint(0,np.mean(X_signal.values)*2.3,(1000,200))
X_random=pd.DataFrame(X_random,
index=['OTU_'+str(x)
for x in range(X_random.shape[0])],
columns=['sample_'+str(x)
for x in range(X_random.shape[1])])
X_random.index = shuffle(X_random).index
X_random.columns = shuffle(X_random.T).index
X_random=X_random.T
X_random.sort_index(inplace=True)
X_random=X_random.T
X_random.sort_index(inplace=True)
#meta on cluster
meta = np.array(['Group 1']*int(X_signal.shape[1]/2)+['Group 2']*int(X_signal.shape[1]/2)).T
meta = pd.DataFrame(meta,index=X_signal.columns,columns=['group'])
print('X_random mean %.2f seq/sample'%X_random.sum(axis=0).mean())
print('X_signal mean %.2f seq/sample'%X_signal.sum(axis=0).mean())
#RPCA on random
X_random_rclr = rclr().fit_transform(X_random.T)
U_random,s,V = OptSpace().fit_transform(X_random_rclr)
U_random = pd.DataFrame(U_random,index=X_random.columns)
#RPCA on very clear signal
X_signal_rclr = rclr().fit_transform(X_signal.T)
U_signal,s,V = OptSpace().fit_transform(X_signal_rclr)
U_signal = pd.DataFrame(U_signal,index=X_random.columns)
#show the results
fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,figsize=(15,8))
ax1.imshow(clr(X_signal+1),aspect='auto',norm=MidpointNormalize(midpoint=0.), cmap='PiYG')
ax1.set_title('Positive Control')
ax2.imshow(clr(X_random+1),aspect='auto',norm=MidpointNormalize(midpoint=0.), cmap='PiYG')
ax2.set_title('Negative Control')
_ = plot_pcoa(U_signal, meta, ax3, 'group')
_ = plot_pcoa(U_random, meta, ax4, 'group')
plt.show()
###Output
_____no_output_____ |
04-Dictionaries/00-DictionaryMethod_ExerciseSolutions.ipynb | ###Markdown
Dictionary Method: Exercise SolutionsFirst I'll recreate what we did in the tutorial.
###Code
#import the necessary packages
import pandas
import nltk
from nltk import word_tokenize
import string
#read the Music Reviews corpus into a Pandas dataframe
df = pandas.read_csv("../Data/BDHSI2016_music_reviews.csv", encoding='utf-8', sep = '\t')
df['body'] = df['body'].apply(lambda x: ''.join([i for i in x if not i.isdigit()]))
df['body_tokens'] = df['body'].str.lower()
df['body_tokens'] = df['body_tokens'].apply(nltk.word_tokenize)
df['body_tokens'] = df['body_tokens'].apply(lambda x: [word for word in x if word not in string.punctuation])
df['token_count'] = df['body_tokens'].apply(lambda x: len(x))
#view the dataframe
df
#Read in dictionary files
pos_sent = open("../Data/positive_words.txt", encoding='utf-8').read()
neg_sent = open("../Data/negative_words.txt", encoding='utf-8').read()
#view part of the pos_sent variable, to see how it's formatted.
print(pos_sent[:101])
#remember the split function? We'll split on the newline character (\n) to create a list
positive_words=pos_sent.split('\n')
negative_words=neg_sent.split('\n')
#view the first elements in the lists
print(positive_words[:10])
print(negative_words[:10])
###Output
_____no_output_____
###Markdown
Great! You know what to do now.Exercise:1. Create a column with the number of positive words, and another with the proportion of positive words2. Create a column with the number of negative words, and another with the proportion of negative words3. Print the average proportion of negative and positive words by genre4. Compare this to the average score by genre
###Code
#exercise code here
#1. Create a column with the number of positive words and another with the proportion of positive words
df['pos_num'] = df['body_tokens'].apply(lambda x: len([word for word in x if word in positive_words]))
df['pos_prop'] = df['pos_num']/df['token_count']
#2. Create a column with the number of negative words, and another with the proportion of negative words
df['neg_num'] = df['body_tokens'].apply(lambda x: len([word for word in x if word in negative_words]))
df['neg_prop'] = df['neg_num']/df['token_count']
df
#3. Print the average proportion of negative and positive words by genre
grouped = df.groupby('genre')
print("Averge proportion of positive words by genre")
print(grouped['pos_prop'].mean().sort_values(ascending=False))
print()
print("Averge proportion of negative words by genre")
grouped['neg_prop'].mean().sort_values(ascending=False)
# 4. Compare this to the average score by genre
print("Averge score by genre")
grouped['score'].mean().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
3. Dictionary Method using Scikit-learnWe can also do this using the document term matrix. We'll again do this in pandas, to make it conceptually clear. As you get more comfortable with programming you may want to eventually shift over to working with sparse matrix format.
###Code
#import the function CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
countvec = CountVectorizer()
#create our document term matrix as a pandas dataframe
dtm_df = pandas.DataFrame(countvec.fit_transform(df.body).toarray(), columns=countvec.get_feature_names(), index = df.index)
###Output
_____no_output_____
###Markdown
Now we can keep only those *columns* that occur in our positive words list. To do this, we'll first save a list of the columns names as a variable, and then only keep the elements of the list that occur in our positive words list. We'll then create a new dataframe keeping only those select columns.
###Code
#create a columns variable that is a list of all column names
columns = list(dtm_df)
pos_columns = [word for word in columns if word in positive_words]
#create a dtm from our dtm_df that keeps only positive sentiment columns
dtm_pos = dtm_df[pos_columns]
#count the number of positive words for each document
dtm_pos['pos_count'] = dtm_pos.sum(axis=1)
#dtm_pos.drop('pos_count',axis=1, inplace=True)
dtm_pos['pos_count']
###Output
_____no_output_____
###Markdown
EX: Do the same for negative words. EX: Calculate the proportion of negative and positive words for each document.
###Code
#EX: Do the same for negative words.
neg_columns = [word for word in columns if word in negative_words]
dtm_neg = dtm_df[neg_columns]
dtm_neg['neg_count'] = dtm_neg.sum(axis=1)
dtm_neg['neg_count']
#EX: Calculate the proportion of negative and positive words for each document.
dtm_pos['pos_proportion'] = dtm_pos['pos_count']/dtm_df.sum(axis=1)
print(dtm_pos['pos_proportion'])
print()
dtm_neg['neg_proportion'] = dtm_neg['neg_count']/dtm_df.sum(axis=1)
print(dtm_neg['neg_proportion'])
###Output
_____no_output_____ |
Machine Learning/PyTorch Scholarship/Lesson4 - PyTorch Intro/Part 7 - Loading Image Data (Exercises).ipynb | ###Markdown
Loading Image DataSo far we've been working with fairly artificial datasets that you wouldn't typically be using in real projects. Instead, you'll likely be dealing with full-sized images like you'd get from smart phone cameras. In this notebook, we'll look at how to load images and use them to train neural networks.We'll be using a [dataset of cat and dog photos](https://www.kaggle.com/c/dogs-vs-cats) available from Kaggle. Here are a couple example images:We'll use this dataset to train a neural network that can differentiate between cats and dogs. These days it doesn't seem like a big accomplishment, but five years ago it was a serious challenge for computer vision systems.
###Code
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms
import helper
###Output
_____no_output_____
###Markdown
The easiest way to load image data is with `datasets.ImageFolder` from `torchvision` ([documentation](http://pytorch.org/docs/master/torchvision/datasets.htmlimagefolder)). In general you'll use `ImageFolder` like so:```pythondataset = datasets.ImageFolder('path/to/data', transform=transform)```where `'path/to/data'` is the file path to the data directory and `transform` is a list of processing steps built with the [`transforms`](http://pytorch.org/docs/master/torchvision/transforms.html) module from `torchvision`. ImageFolder expects the files and directories to be constructed like so:```root/dog/xxx.pngroot/dog/xxy.pngroot/dog/xxz.pngroot/cat/123.pngroot/cat/nsdf3.pngroot/cat/asd932_.png```where each class has it's own directory (`cat` and `dog`) for the images. The images are then labeled with the class taken from the directory name. So here, the image `123.png` would be loaded with the class label `cat`. You can download the dataset already structured like this [from here](https://s3.amazonaws.com/content.udacity-data.com/nd089/Cat_Dog_data.zip). I've also split it into a training set and test set. TransformsWhen you load in the data with `ImageFolder`, you'll need to define some transforms. For example, the images are different sizes but we'll need them to all be the same size for training. You can either resize them with `transforms.Resize()` or crop with `transforms.CenterCrop()`, `transforms.RandomResizedCrop()`, etc. We'll also need to convert the images to PyTorch tensors with `transforms.ToTensor()`. Typically you'll combine these transforms into a pipeline with `transforms.Compose()`, which accepts a list of transforms and runs them in sequence. It looks something like this to scale, then crop, then convert to a tensor:```pythontransform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor()])```There are plenty of transforms available, I'll cover more in a bit and you can read through the [documentation](http://pytorch.org/docs/master/torchvision/transforms.html). Data LoadersWith the `ImageFolder` loaded, you have to pass it to a [`DataLoader`](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader). The `DataLoader` takes a dataset (such as you would get from `ImageFolder`) and returns batches of images and the corresponding labels. You can set various parameters like the batch size and if the data is shuffled after each epoch.```pythondataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)```Here `dataloader` is a [generator](https://jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/). To get data out of it, you need to loop through it or convert it to an iterator and call `next()`.```python Looping through it, get a batch on each loop for images, labels in dataloader: pass Get one batchimages, labels = next(iter(dataloader))``` >**Exercise:** Load images from the `Cat_Dog_data/train` folder, define a few transforms, then build the dataloader.
###Code
data_dir = 'Cat_Dog_data/train'
transform = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(230),
transforms.ToTensor()
])# TODO: compose transforms here
dataset = datasets.ImageFolder(data_dir, transform = transform)# TODO: create the ImageFolder
dataloader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True)# TODO: use the ImageFolder dataset to create the DataLoader
# Run this to test your data loader
images, labels = next(iter(dataloader))
helper.imshow(images[0], normalize=False)
###Output
_____no_output_____
###Markdown
If you loaded the data correctly, you should see something like this (your image will be different): Data AugmentationA common strategy for training neural networks is to introduce randomness in the input data itself. For example, you can randomly rotate, mirror, scale, and/or crop your images during training. This will help your network generalize as it's seeing the same images but in different locations, with different sizes, in different orientations, etc.To randomly rotate, scale and crop, then flip your images you would define your transforms like this:```pythontrain_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])```You'll also typically want to normalize images with `transforms.Normalize`. You pass in a list of means and list of standard deviations, then the color channels are normalized like so```input[channel] = (input[channel] - mean[channel]) / std[channel]```Subtracting `mean` centers the data around zero and dividing by `std` squishes the values to be between -1 and 1. Normalizing helps keep the network work weights near zero which in turn makes backpropagation more stable. Without normalization, networks will tend to fail to learn.You can find a list of all [the available transforms here](http://pytorch.org/docs/0.3.0/torchvision/transforms.html). When you're testing however, you'll want to use images that aren't altered (except you'll need to normalize the same way). So, for validation/test images, you'll typically just resize and crop.>**Exercise:** Define transforms for training data and testing data below.
###Code
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([
transforms.RandomRotation(45),
transforms.RandomResizedCrop(230),
transforms.RandomHorizontalFlip(p = 0.52),
transforms.ToTensor(),
transforms.Normalize(
(0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)
)
])
test_transforms = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(230),
transforms.ToTensor(),
transforms.Normalize(
(0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)
)
])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform = train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform = test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle = True)
# change this to the trainloader or testloader
data_iter = iter(testloader)
images, labels = next(data_iter)
fig, axes = plt.subplots(figsize=(10,4), ncols=4)
for ii in range(4):
ax = axes[ii]
helper.imshow(images[ii], ax=ax, normalize=False)
###Output
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
###Markdown
Your transformed images should look something like this.Training examples:Testing examples: At this point you should be able to load data for training and testing. Now, you should try building a network that can classify cats vs dogs. This is quite a bit more complicated than before with the MNIST and Fashion-MNIST datasets. To be honest, you probably won't get it to work with a fully-connected network, no matter how deep. These images have three color channels and at a higher resolution (so far you've seen 28x28 images which are tiny).In the next part, I'll show you how to use a pre-trained network to build a model that can actually solve this problem.
###Code
# Optional TODO: Attempt to build a network to classify cats vs dogs from this dataset
from torch import nn, optim
import torch.nn.functional as F
n_inputs = next(iter(trainloader))[0].shape[1:].numel()
# Network architecture
class DogCatClassifierNet(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(n_inputs, 500) # Input
self.fc2 = nn.Linear(500, 500)
self.fc3 = nn.Linear(500, 250)
self.fc4 = nn.Linear(250, 2)
self.dropout = nn.Dropout(p = 0.2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.dropout(F.relu(self.fc1(x)))
x = self.dropout(F.relu(self.fc2(x)))
x = self.dropout(F.relu(self.fc3(x)))
x = self.fc4(x)
return x
model = DogCatClassifierNet().cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr = 0.005)
model.train()
epochs = 4
len_trainloader = len(trainloader)
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
images, labels = images.cuda(), labels.cuda()
logits = model.forward(images).cuda()
loss = criterion(logits, labels).cuda()
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f'Epoch: {e + 1}... Training loss: {running_loss / len_trainloader}')
import numpy as np
images, labels = iter(testloader).next()
img = images[0].view(1, -1).cuda()
helper.imshow(images[0], normalize = True)
model.eval()
with torch.no_grad():
output = model.forward(img).cpu()
output = output.numpy().squeeze()
print(output)
fig, axs = plt.subplots()
axs.barh(np.arange(2), output)
axs.set_aspect(0.1)
axs.set_yticks(np.arange(2))
axs.set_yticklabels(['Cat', 'Dog'], size='small');
axs.set_title('Class Probability')
axs.set_xlim(0, 1.1)
test_loss = 0.0
class_correct = list(0. for i in range(2))
class_total = list(0. for i in range(2))
model.eval() # prep model for *evaluation*
with torch.no_grad():
for data, target in trainloader:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data).cuda()
# calculate the loss
loss = criterion(output, target).cuda()
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(data.size(0)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(testloader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(2):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
_____no_output_____ |
notebook/CoqAndPython.ipynb | ###Markdown
Fibonacci ricorsive method
###Code
def fib_ric(number, values=[0,1], i = 2):
first_val = values[len(values) - 2]
second_val = values[len(values) - 1]
if i >= number:
return values
else:
return fib_ric(number, values + [first_val + second_val], i+1)
fib_ric(10)
###Output
_____no_output_____
###Markdown
Proof
###Code
Require Import Arith.
(* Because do not autorollback I do not know why *)
Reset specification_of_fibonacci.
Definition specification_of_fibonacci (fib : nat -> nat) :=
fib 0 = 0
/\
fib 1 = 1
/\
forall n'' : nat,
fib (S (S n'')) = fib (S n'') + fib n''.
###Output
_____no_output_____ |
Flask/01_flask_hello_world.ipynb | ###Markdown
Flask- 파이썬 코드를 사용하는 웹 프레임워크이다.- `https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world`- install - `pip install flask` 1. 프로젝트 생성- hello
###Code
!mkdir -p hello
!mkdir -p hello/static
!mkdir -p hello/templates
!touch hello/hello.py
!touch hello/templates/index.html
!tree hello
# 이 코드가 실행이 되지않으면 직접 디렉토리랑 파일을 직접 생성해준다.
###Output
_____no_output_____
###Markdown
- hello.py : app 객체를 생성, route 설정을 하는 기능을 한다.- static : js, css, image파일 등을 저장- templates : html 코드를 저장하는 디렉토리- 이렇게 크게 세가지로 구성되어있다.
###Code
%%writefile hello/hello.py
from flask import *
app = Flask(__name__) # 객체를 생성
@app.route("/") # 루트
def hello():
return "Hello Flask"
@app.route('/user/<name>')
def user(name):
return render_template('index.html', name=name)
# api 생성기 작성
@app.route('/api/data')
def api_data():
data = {"alice":25, "andy":35}
return jsonify(data) # 딕셔너리 객체를 문자열로 바꾸어주는 함수
app.run(debug=True)
###Output
Overwriting hello/hello.py
###Markdown
index.html에 html 코드를 작성- jquery를 이용하여 버튼 설정
###Code
%%writefile hello/templates/index.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello Flask</title>
</head>
<body>
Hello {{name}}
<button class="result">Click!!</button>
<div class="data"></div>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.4.1/jquery.js"></script>
<script type="text/javascript">
$(document).ready(function(){
$('.result').on('click', function(){
$.getJSON("/api/data", function(data){
console.log(data);
var tag = "<p>alice:" + data.alice + "</p>";
tag += "<p>andy:" + data.andy + "</p>";
$(".data").append(tag);
})
})
})
</script>
</body>
</html>
!python hello/hello.py
###Output
_____no_output_____ |
Week 9/Keras_CNN_text_relatedness.ipynb | ###Markdown
Hyperparameters
###Code
DATA_FILE_PATH = 'drive/MyDrive/Tutorial/week 9/quora_duplicate_questions.tsv'
EMB_DIR = 'drive/MyDrive/Tutorial/week 9/glove.6B.50d.txt'
MAX_VOCAB_SIZE = 30000
MAX_SENT_LEN = 15 # HOW WE GOT THIS? I WILL SHOW YOU LATER
EMBEDDING_DIM = 50
BATCH_SIZE = 32
N_EPOCHS = 10
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
df_questions_raw = pd.read_table(DATA_FILE_PATH, sep='\t', nrows=100000)
print('Dataset size:', df_questions_raw.shape)
df_questions_raw
###Output
Dataset size: (100000, 6)
###Markdown
equalise positives and nagatives
###Code
df_questions_raw.is_duplicate.value_counts()
not_same = df_questions_raw[df_questions_raw["is_duplicate"] == 0][:37261]
same = df_questions_raw[df_questions_raw["is_duplicate"] == 1]
df_questions = pd.concat([same, not_same])
df_questions = df_questions.sample(frac=1).reset_index(drop=True) # shuffling
df_questions.is_duplicate.value_counts()
###Output
_____no_output_____
###Markdown
delete small and long txts
###Code
sent_len = lambda x:len(x)
df_questions['q1_length'] = df_questions.question1.apply(sent_len)
df_questions['q2_length'] = df_questions.question2.apply(sent_len)
df_questions[df_questions['q1_length']<10]['question1']
# Questions having lesser than 10 characters can be discarded.
indices = set(df_questions[df_questions['q1_length']<10].index).union(df_questions[df_questions['q2_length']<10].index)
df_questions.drop(indices, inplace=True)
df_questions.reset_index()
# Can drop the character count columns - to save memory
df_questions.drop(['q1_length','q2_length'], inplace=True, axis=1)
# THIS IS HOW WE GOT MAX_SENT_LEN = 15
word_count = lambda x:len(x.split()) # Word count for each question
df_questions['q1_wc'] = df_questions.question1.apply(word_count)
df_questions['q2_wc'] = df_questions.question2.apply(word_count)
p = 80.0
print('Question-1 :{} % of the sentences have a length less than or equal to {}'.format(p, np.percentile(df_questions['q1_wc'], 80)))
print('Question-2 :{} % of the sentences have a length less than or equal to {}'.format(p, np.percentile(df_questions['q2_wc'], 80)))
###Output
Question-1 :80.0 % of the sentences have a length less than or equal to 14.0
Question-2 :80.0 % of the sentences have a length less than or equal to 14.0
###Markdown
Tokenizing Better to use NLTK tokenizer first and then Keras word to indices.We can concatinate NLTK tokens with whitespace (" ".joint(...)) and then use keras Tokenizer ---Keras will do this : 'what is this?' -> ['what', 'is', 'this?']NLTK will do this : 'what is this?' -> ['what', 'is', 'this', '?']
###Code
%%time
question_list = list(df_questions['question1']) + list(df_questions['question2'])
question_list = [' '.join(word_tokenize(q)[:MAX_SENT_LEN]) for q in question_list]
question_list[:5]
# Filters - except do no removed '?'
tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE, filters='!"#$%&()*+,-./:;<=>@[\\]^_`{|}~\t\n')
tokenizer.fit_on_texts(question_list)
print("Number of words in vocabulary:", len(tokenizer.word_index))
tokenizer.word_index # It is sorted by frequency.
# Limit vocab and idx-word dictionary
word_index = word_index = {k: v for k, v in tokenizer.word_index.items() if v < MAX_VOCAB_SIZE}
idx_to_word = dict((v,k) for k,v in word_index.items())
X = tokenizer.texts_to_sequences(question_list)
X = pad_sequences(X, maxlen=MAX_SENT_LEN, padding='post', truncating='post')
X_q1 = X[:len(X)//2] # First questions
X_q2 = X[len(X)//2:] # Second questions
del X
X_q1[:3]
X_train_q1, X_test_q1, X_train_q2, X_test_q2, y_train, y_test = train_test_split(X_q1,
X_q2,
df_questions['is_duplicate'],
random_state=10,
test_size=0.1)
###Output
_____no_output_____
###Markdown
Embedding Matrix
###Code
# Load GloVe word embeddings
# Download Link: https://nlp.stanford.edu/projects/glove/
print("[INFO]: Reading Word Embeddings ...")
# Data path
embeddings = {}
f = open(EMB_DIR)
for line in f:
values = line.split()
word = values[0]
vector = np.asarray(values[1:], dtype='float32')
embeddings[word] = vector
f.close()
# Create an embedding matrix containing only the word's in our vocabulary
# If the word does not have a pre-trained embedding, then randomly initialize the embedding
embeddings_matrix = np.random.uniform(-0.05, 0.05, size=(len(word_index)+1, EMBEDDING_DIM)) # +1 is because the matrix indices start with 0
for word, i in word_index.items(): # i=0 is the embedding for the zero padding
try:
embeddings_vector = embeddings[word]
except KeyError:
embeddings_vector = None
if embeddings_vector is not None:
embeddings_matrix[i] = embeddings_vector
del embeddings
###Output
_____no_output_____
###Markdown
CNN with Keras Model API
###Code
from keras.models import Model
from keras.layers import Layer, Input, Dense, Concatenate, Conv2D, Reshape, Embedding
from keras.layers import MaxPooling1D, Flatten, BatchNormalization, Activation, Dropout
# Bigram and trigram filters
bi_filter_size = 2
tri_filter_size = 3
num_filters = 20
###Output
_____no_output_____
###Markdown
Question 1 Computational Graph
###Code
input_1 = Input(shape=(MAX_SENT_LEN, ), name='q1_input')
# Common embedding lookup layer
emb_look_up = Embedding(input_dim=MAX_VOCAB_SIZE,
output_dim=EMBEDDING_DIM,
weights = [embeddings_matrix],
trainable=False,
mask_zero=False,
name='q_embedding_lookup')
emb_1 = emb_look_up(input_1)
# Need to be reshaped because the CONV layer assumes 1 dimnesion as num of channels
emb_1 = Reshape(target_shape=(1, MAX_SENT_LEN, EMBEDDING_DIM),
name='q1_embedding_reshape')(emb_1)
# Convolutional Layers
conv_1_bi = Conv2D(filters=num_filters,
kernel_size=(bi_filter_size, EMBEDDING_DIM),
padding='valid',
activation='relu',
data_format='channels_first',
name='q1_bigram_conv')(emb_1)
conv_1_tri = Conv2D(filters=num_filters,
kernel_size=(tri_filter_size, EMBEDDING_DIM),
padding='valid',
activation='relu',
data_format='channels_first',
name='q1_trigram_conv')(emb_1)
# Remove channel dimension before max-pooling operation
bi_out_timesteps = MAX_SENT_LEN - bi_filter_size + 1
tri_out_timesteps = MAX_SENT_LEN - tri_filter_size + 1
conv_1_bi = Reshape(target_shape=(bi_out_timesteps, num_filters),
name='q1_bigram_conv_reshape')(conv_1_bi) # (MAX_SENT_LEN - bi_filter_size + 1, num_filters)
conv_1_tri = Reshape(target_shape=(tri_out_timesteps, num_filters),
name='q1_trigram_conv_reshape')(conv_1_tri)
# Max-pooling Layer
# Pool across timesteps to get 1 feature per filter, i.e., each filter captures 1 feature about the sentence/question
max_pool_1_bi = MaxPooling1D(pool_size = bi_out_timesteps,
name='q1_bigram_maxpool')(conv_1_bi)
max_pool_1_tri = MaxPooling1D(pool_size = tri_out_timesteps,
name='q1_trigram_maxpool')(conv_1_tri)
# Merge the features learnt by bi and tri filters
merged_1 = Concatenate(name='q1_maxpool_concat')([max_pool_1_bi, max_pool_1_tri])
# Inputs dropped out randomly so that there is no heavy dependence on specific features for prediction
dropout_1 = Dropout(rate=0.2,
name='q1_dropout')(merged_1)
flatten_1 = Flatten(name='q1_flatten')(dropout_1)
###Output
_____no_output_____
###Markdown
Question 2 Computational Graph
###Code
input_2 = Input(shape=(MAX_SENT_LEN, ), name='q2_input')
emb_2 = emb_look_up(input_2)
# Need to be reshaped because the CONV layer assumes 1 dimnesion as num of channels
emb_2 = Reshape((1, MAX_SENT_LEN, EMBEDDING_DIM),
name='q2_embedding_reshape')(emb_2)
# Convolutional Layers
conv_2_bi = Conv2D(filters=num_filters,
kernel_size=(bi_filter_size, EMBEDDING_DIM),
padding='valid',
activation='relu',
data_format='channels_first',
name='q2_bigram_conv')(emb_2)
conv_2_tri = Conv2D(filters=num_filters,
kernel_size=(tri_filter_size, EMBEDDING_DIM),
padding='valid',
activation='relu',
data_format='channels_first',
name='q2_trigram_conv')(emb_2)
# Remove channel dimension before max-pooling operation
conv_2_bi = Reshape((bi_out_timesteps, num_filters),
name='q2_bigram_conv_reshape')(conv_2_bi) # (MAX_SENT_LEN - bi_filter_size + 1, num_filters)
conv_2_tri = Reshape((tri_out_timesteps, num_filters),
name='q2_trigram_conv_reshape')(conv_2_tri)
# Max-pooling Layer
# Pool across timesteps to get 1 feature per filter, i.e., each filter captures 1 feature about the sentence/question
max_pool_2_bi = MaxPooling1D(pool_size = bi_out_timesteps,
name='q2_bigram_maxpool')(conv_2_bi)
max_pool_2_tri = MaxPooling1D(pool_size = tri_out_timesteps,
name='q2_trigram_maxpool')(conv_2_tri)
# Merge the features learnt by bi and tri filters
merged_2 = Concatenate(name='q2_maxpool_flatten')([max_pool_2_bi, max_pool_2_tri])
# Inputs dropped out randomly so that there is no heavy dependence on specific features for prediction
dropout_2 = Dropout(rate=0.2,
name='q2_dropout')(merged_2)
flatten_2 = Flatten(name='q2_flatten')(dropout_2)
###Output
_____no_output_____
###Markdown
Merge outputs of Q1 and Q2
###Code
# With batch-normalization, the output of a previous layer is mu-sigma normalized,
# before it is fed into the next layer.
# For feed-forward networks, batch-normalization is carried out
# after/before applying RELU activation (?)
# https://www.reddit.com/r/MachineLearning/comments/67gonq/d_batch_normalization_before_or_after_relu/
merged = Concatenate(name='q1_q2_concat')([flatten_1, flatten_2])
# Dense layers
dense_1 = Dense(units=10,
name='q1_q2_dense')(merged)
bn_1 = BatchNormalization(name='batchnorm')(dense_1)
relu_1 = Activation(activation='relu',
name='relu_activation')(bn_1)
dense_1_dropout = Dropout(0.2,
name='dense_dropout')(relu_1)
output_prob = Dense(units=1,
activation='sigmoid',
name='output_layer')(dense_1_dropout)
model = Model(inputs=[input_1, input_2], outputs=output_prob, name='text_pair_cnn')
model.summary()
plot_model(model, to_file='text_pair_cnn_classifier.png', show_layer_names=True)
Image('text_pair_cnn_classifier.png')
###Output
_____no_output_____
###Markdown
Training the model
###Code
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x = [X_train_q1, X_train_q2],
y = y_train,
batch_size=BATCH_SIZE,
epochs=N_EPOCHS,
validation_data=([X_test_q1, X_test_q2], y_test))
###Output
Epoch 1/10
2096/2096 [==============================] - 16s 6ms/step - loss: 0.6577 - accuracy: 0.6150 - val_loss: 0.6190 - val_accuracy: 0.6611
Epoch 2/10
2096/2096 [==============================] - 13s 6ms/step - loss: 0.6163 - accuracy: 0.6626 - val_loss: 0.6076 - val_accuracy: 0.6651
Epoch 3/10
2096/2096 [==============================] - 14s 6ms/step - loss: 0.6015 - accuracy: 0.6746 - val_loss: 0.5966 - val_accuracy: 0.6778
Epoch 4/10
2096/2096 [==============================] - 15s 7ms/step - loss: 0.5885 - accuracy: 0.6843 - val_loss: 0.5891 - val_accuracy: 0.6889
Epoch 5/10
2096/2096 [==============================] - 17s 8ms/step - loss: 0.5835 - accuracy: 0.6906 - val_loss: 0.5880 - val_accuracy: 0.6861
Epoch 6/10
2096/2096 [==============================] - 16s 8ms/step - loss: 0.5751 - accuracy: 0.7002 - val_loss: 0.5869 - val_accuracy: 0.6853
Epoch 7/10
2096/2096 [==============================] - 16s 8ms/step - loss: 0.5702 - accuracy: 0.7031 - val_loss: 0.5840 - val_accuracy: 0.6880
Epoch 8/10
2096/2096 [==============================] - 14s 7ms/step - loss: 0.5653 - accuracy: 0.7070 - val_loss: 0.5810 - val_accuracy: 0.6864
Epoch 9/10
2096/2096 [==============================] - 13s 6ms/step - loss: 0.5610 - accuracy: 0.7100 - val_loss: 0.5833 - val_accuracy: 0.6851
Epoch 10/10
2096/2096 [==============================] - 13s 6ms/step - loss: 0.5573 - accuracy: 0.7116 - val_loss: 0.5831 - val_accuracy: 0.6888
###Markdown
Using the model
###Code
ques_pairs = [
("How to make a youtube video?", "How can I make a youtube video?"),
("what is a car?", "where can I find some green apples?"),
]
def txt_to_indx(txt):
ret = [txt] # to make it a list
ret = tokenizer.texts_to_sequences(ret)
ret = pad_sequences(ret, maxlen=MAX_SENT_LEN, padding='post', truncating='post')
return ret
txt_to_indx("text to index function will do this")
indx_ques_pairs = [(txt_to_indx(i[0]), txt_to_indx(i[1])) for i in ques_pairs]
for i in indx_ques_pairs:
print(model.predict(i)[0][0] > 0.6)
###Output
True
False
|
.ipynb_checkpoints/PREDICTION-MODEL-1-checkpoint.ipynb | ###Markdown
Import Necessary Packages
###Code
import numpy as np
import pandas as pd
import datetime
np.random.seed(1337) # for reproducibility
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.regression import r2_score, mean_squared_error, mean_absolute_error
from dbn.tensorflow import SupervisedDBNRegression
###Output
_____no_output_____
###Markdown
Define Model Settings
###Code
RBM_EPOCHS = 5
DBN_EPOCHS = 150
RBM_LEARNING_RATE = 0.01
DBN_LEARNING_RATE = 0.01
HIDDEN_LAYER_STRUCT = [20, 50, 100]
ACTIVE_FUNC = 'relu'
BATCH_SIZE = 28
###Output
_____no_output_____
###Markdown
Define Directory, Road, and Year
###Code
# Read the dataset
ROAD = "Vicente Cruz"
YEAR = "2015"
EXT = ".csv"
DATASET_DIVISION = "seasonWet"
DIR = "../../../datasets/Thesis Datasets/"
'''''''Training dataset'''''''
WP = True
WEEKDAY = False
CONNECTED_ROADS = True
trafficDT = "recon_traffic" #orig_traffic recon_traffic
featureEngineering = "Rolling and Expanding" #Rolling Expanding Rolling and Expanding
timeFE = "yesterday" #today yesterday
timeConnected = "today"
ROLLING_WINDOW = 3
EXPANDING_WINDOW = 3
RECON_SHIFT = 96
# RECON_FE_WINDOW = 48
def addWorkingPeakFeatures(df):
result_df = df.copy()
# Converting the index as date
result_df.index = pd.to_datetime(result_df.index)
# Create column work_day
result_df['work_day'] = ((result_df.index.dayofweek) < 5).astype(int)
# Consider non-working holiday
if DATASET_DIVISION is not "seasonWet":
# Jan
result_df.loc['2015-01-01', 'work_day'] = 0
result_df.loc['2015-01-02', 'work_day'] = 0
# Feb
result_df.loc['2015-02-19', 'work_day'] = 0
result_df.loc['2015-02-25', 'work_day'] = 0
# Apr
result_df.loc['2015-04-02', 'work_day'] = 0
result_df.loc['2015-04-03', 'work_day'] = 0
result_df.loc['2015-04-09', 'work_day'] = 0
# May
result_df.loc['2015-05-01', 'work_day'] = 0
# Jun
result_df.loc['2015-06-12', 'work_day'] = 0
result_df.loc['2015-06-24', 'work_day'] = 0
# Jul
result_df.loc['2015-07-17', 'work_day'] = 0
# Aug
result_df.loc['2015-08-21', 'work_day'] = 0
result_df.loc['2015-08-31', 'work_day'] = 0
# Sep
result_df.loc['2015-08-25', 'work_day'] = 0
if DATASET_DIVISION is not "seasonWet":
# Nov
result_df.loc['2015-11-30', 'work_day'] = 0
# Dec
result_df.loc['2015-12-24', 'work_day'] = 0
result_df.loc['2015-12-25', 'work_day'] = 0
result_df.loc['2015-12-30', 'work_day'] = 0
result_df.loc['2015-12-31', 'work_day'] = 0
# Consider class suspension
if DATASET_DIVISION is not "seasonWet":
# Jan
result_df.loc['2015-01-08', 'work_day'] = 0
result_df.loc['2015-01-09', 'work_day'] = 0
result_df.loc['2015-01-14', 'work_day'] = 0
result_df.loc['2015-01-15', 'work_day'] = 0
result_df.loc['2015-01-16', 'work_day'] = 0
result_df.loc['2015-01-17', 'work_day'] = 0
# Jul
result_df.loc['2015-07-06', 'work_day'] = 0
result_df.loc['2015-07-08', 'work_day'] = 0
result_df.loc['2015-07-09', 'work_day'] = 0
result_df.loc['2015-07-10', 'work_day'] = 0
# Aug
result_df.loc['2015-08-10', 'work_day'] = 0
result_df.loc['2015-08-11', 'work_day'] = 0
# Sep
result_df.loc['2015-09-10', 'work_day'] = 0
# Oct
result_df.loc['2015-10-02', 'work_day'] = 0
result_df.loc['2015-10-19', 'work_day'] = 0
if DATASET_DIVISION is not "seasonWet":
# Nov
result_df.loc['2015-11-16', 'work_day'] = 0
result_df.loc['2015-11-17', 'work_day'] = 0
result_df.loc['2015-11-18', 'work_day'] = 0
result_df.loc['2015-11-19', 'work_day'] = 0
result_df.loc['2015-11-20', 'work_day'] = 0
# Dec
result_df.loc['2015-12-16', 'work_day'] = 0
result_df.loc['2015-12-18', 'work_day'] = 0
result_df['peak_hour'] = 0
# Set morning peak hour
start = datetime.time(7,0,0)
end = datetime.time(10,0,0)
result_df.loc[result_df.between_time(start, end).index, 'peak_hour'] = 1
# Set afternoon peak hour
start = datetime.time(16,0,0)
end = datetime.time(19,0,0)
result_df.loc[result_df.between_time(start, end).index, 'peak_hour'] = 1
result_df
return result_df
def reconstructDT(df, typeDataset='traffic', trafficFeatureNeeded=[]):
result_df = df.copy()
# Converting the index as date
result_df.index = pd.to_datetime(result_df.index, format='%d/%m/%Y %H:%M')
result_df['month'] = result_df.index.month
result_df['day'] = result_df.index.day
result_df['hour'] = result_df.index.hour
result_df['min'] = result_df.index.minute
result_df['dayOfWeek'] = result_df.index.dayofweek
if typeDataset == 'traffic':
for f in trafficFeatureNeeded:
result_df[f + '_' + str(RECON_SHIFT)] = result_df[f].shift(RECON_SHIFT)
result_df = result_df.iloc[RECON_SHIFT:, :]
for f in range(len(result_df.columns)):
result_df[result_df.columns[f]] = normalize(result_df[result_df.columns[f]])
return result_df
def shiftDTForReconstructed(df):
return df.iloc[shift:, :]
def getNeededFeatures(columns, arrFeaturesNeed, featureEngineering="Original"):
to_remove = []
if len(arrFeaturesNeed) == 0: #all features aren't needed
to_remove += range(0, len(columns))
else:
if featureEngineering == "Original":
compareTo = " "
elif featureEngineering == "Rolling" or featureEngineering == "Expanding":
compareTo = "_"
for f in arrFeaturesNeed:
for c in range(0, len(columns)):
if f not in columns[c].split(compareTo)[0] and columns[c].split(compareTo)[0] not in arrFeaturesNeed:
to_remove.append(c)
return to_remove
def normalize(data):
y = pd.to_numeric(data)
y = np.array(y.reshape(-1, 1))
scaler = MinMaxScaler()
y = scaler.fit_transform(y)
y = y.reshape(1, -1)[0]
return y
###Output
_____no_output_____
###Markdown
Preparing Traffic Dataset Importing Original Traffic (wo new features)
###Code
TRAFFIC_DIR = DIR + "mmda/"
TRAFFIC_FILENAME = "mmda_" + ROAD + "_" + YEAR + "_" + DATASET_DIVISION
orig_traffic = pd.read_csv(TRAFFIC_DIR + TRAFFIC_FILENAME + EXT, skipinitialspace=True)
orig_traffic = orig_traffic.fillna(0)
#Converting index to date and time, and removing 'dt' column
orig_traffic.index = pd.to_datetime(orig_traffic.dt, format='%d/%m/%Y %H:%M')
cols_to_remove = [0]
cols_to_remove = getNeededFeatures(orig_traffic.columns, ["statusN"])
orig_traffic.drop(orig_traffic.columns[[cols_to_remove]], axis=1, inplace=True)
orig_traffic.head()
if WEEKDAY:
orig_traffic = orig_traffic[((orig_traffic.index.dayofweek) < 5)]
orig_traffic.head()
TRAFFIC_DIR = DIR + "mmda/Rolling/" + DATASET_DIVISION + "/"
TRAFFIC_FILENAME = "eng_win" + str(ROLLING_WINDOW) + "_mmda_" + ROAD + "_" + YEAR + "_" + DATASET_DIVISION
rolling_traffic = pd.read_csv(TRAFFIC_DIR + TRAFFIC_FILENAME + EXT, skipinitialspace=True)
cols_to_remove = [0, 1, 2]
cols_to_remove += getNeededFeatures(rolling_traffic.columns, ["statusN"], "Rolling")
rolling_traffic.index = pd.to_datetime(rolling_traffic.dt, format='%Y-%m-%d %H:%M')
rolling_traffic.drop(rolling_traffic.columns[[cols_to_remove]], axis=1, inplace=True)
for f in range(len(rolling_traffic.columns)):
rolling_traffic[rolling_traffic.columns[f]] = normalize(rolling_traffic[rolling_traffic.columns[f]])
if WEEKDAY:
rolling_traffic = rolling_traffic[((rolling_traffic.index.dayofweek) < 5)]
rolling_traffic.head()
TRAFFIC_DIR = DIR + "mmda/Expanding/" + DATASET_DIVISION + "/"
TRAFFIC_FILENAME = "eng_win" + str(EXPANDING_WINDOW) + "_mmda_" + ROAD + "_" + YEAR + "_" + DATASET_DIVISION
expanding_traffic = pd.read_csv(TRAFFIC_DIR + TRAFFIC_FILENAME + EXT, skipinitialspace=True)
cols_to_remove = [0, 1, 2, 5]
cols_to_remove += getNeededFeatures(expanding_traffic.columns, ["statusN"], "Rolling")
expanding_traffic.index = pd.to_datetime(expanding_traffic.dt, format='%d/%m/%Y %H:%M')
expanding_traffic.drop(expanding_traffic.columns[[cols_to_remove]], axis=1, inplace=True)
for f in range(len(expanding_traffic.columns)):
expanding_traffic[expanding_traffic.columns[f]] = normalize(expanding_traffic[expanding_traffic.columns[f]])
if WEEKDAY:
expanding_traffic = expanding_traffic[((expanding_traffic.index.dayofweek) < 5)]
expanding_traffic.head()
recon_traffic = reconstructDT(orig_traffic, 'traffic', ['statusN'])
recon_traffic.head()
connected_roads = []
CONNECTED_1 = ["Antipolo", "Gov. Forbes - Lacson"]
for c in CONNECTED_1:
TRAFFIC_DIR = DIR + "mmda/"
TRAFFIC_FILENAME = "mmda_" + c + "_" + YEAR + "_" + DATASET_DIVISION
temp = pd.read_csv(TRAFFIC_DIR + TRAFFIC_FILENAME + EXT, skipinitialspace=True)
temp = temp.fillna(0)
#Converting index to date and time, and removing 'dt' column
temp.index = pd.to_datetime(temp.dt, format='%d/%m/%Y %H:%M')
cols_to_remove = [0]
cols_to_remove = getNeededFeatures(temp.columns, ["statusN"])
temp.drop(temp.columns[[cols_to_remove]], axis=1, inplace=True)
if WEEKDAY:
temp = temp[((temp.index.dayofweek) < 5)]
for f in range(len(temp.columns)):
temp[temp.columns[f]] = normalize(temp[temp.columns[f]])
temp = temp.rename(columns={temp.columns[f]: temp.columns[f] +"(" + c + ")"})
connected_roads.append(temp)
connected_roads[1]
###Output
c:\users\ronnie nieva\anaconda3\envs\tensorflow\lib\site-packages\ipykernel_launcher.py:3: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
Merging datasets
###Code
if trafficDT == "orig_traffic":
arrDT = [orig_traffic]
for c in connected_roads:
arrDT.append(c)
elif trafficDT == "recon_traffic":
arrDT = [recon_traffic]
print("TimeConnected = " + timeConnected)
for c in connected_roads:
if timeConnected == "today":
startIndex = np.absolute(len(arrDT[0])-len(c))
endIndex = len(c)
elif timeConnected == "yesterday":
startIndex = 0
endIndex = len(rolling_traffic) - RECON_SHIFT
c = c.iloc[startIndex:endIndex, :]
c.index = arrDT[0].index
arrDT.append(c)
print(str(startIndex) + " " + str(endIndex))
if featureEngineering != "":
print("Adding Feature Engineering")
print("TimeConnected = " + timeFE)
if timeFE == "today":
startIndex = np.absolute(len(arrDT[0])-len(rolling_traffic))
endIndex = len(rolling_traffic)
elif timeFE == "yesterday":
startIndex = 0
endIndex = len(rolling_traffic) - RECON_SHIFT
if featureEngineering == "Rolling":
temp = rolling_traffic.iloc[startIndex:endIndex, :]
arrDT.append(temp)
elif featureEngineering == "Expanding":
temp = expanding_traffic.iloc[startIndex:endIndex, :]
arrDT.append(temp)
elif featureEngineering == "Rolling and Expanding":
print(str(startIndex) + " " + str(endIndex))
#Rolling
temp = rolling_traffic.iloc[startIndex:endIndex, :]
temp.index = arrDT[0].index
arrDT.append(temp)
#Expanding
temp = expanding_traffic.iloc[startIndex:endIndex, :]
temp.index = arrDT[0].index
arrDT.append(temp)
merged_dataset = pd.concat(arrDT, axis=1)
if "Rolling" in featureEngineering:
merged_dataset = merged_dataset.iloc[ROLLING_WINDOW+1:, :]
merged_dataset
###Output
TimeConnected = today
96 14688
Adding Feature Engineering
TimeConnected = yesterday
0 14592
###Markdown
Adding Working / Peak Features
###Code
if WP:
merged_dataset = addWorkingPeakFeatures(merged_dataset)
print("Adding working / peak days")
###Output
Adding working / peak days
###Markdown
Preparing Training dataset Merge Original (and Rolling and Expanding)
###Code
# To-be Predicted variable
Y = merged_dataset.statusN
Y = Y.fillna(0)
# Training Data
X = merged_dataset
X = X.drop(X.columns[[0]], axis=1)
# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.67, shuffle=False)
X_train = np.array(X_train)
X_test = np.array(X_test)
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
# Data scaling
# min_max_scaler = MinMaxScaler()
# X_train = min_max_scaler.fit_transform(X_train)
#Print training and testing data
pd.concat([X, Y.to_frame()], axis=1).head()
###Output
_____no_output_____
###Markdown
Training Model
###Code
# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=HIDDEN_LAYER_STRUCT,
learning_rate_rbm=RBM_LEARNING_RATE,
learning_rate=DBN_LEARNING_RATE,
n_epochs_rbm=RBM_EPOCHS,
n_iter_backprop=DBN_EPOCHS,
batch_size=BATCH_SIZE,
activation_function=ACTIVE_FUNC)
regressor.fit(X_train, Y_train)
#To check RBM Loss Errors:
rbm_error = regressor.unsupervised_dbn.rbm_layers[0].rbm_loss_error
#To check DBN Loss Errors
dbn_error = regressor.dbn_loss_error
###Output
_____no_output_____
###Markdown
Testing Model
###Code
# Test
min_max_scaler = MinMaxScaler()
X_test = min_max_scaler.fit_transform(X_test)
Y_pred = regressor.predict(X_test)
r2score = r2_score(Y_test, Y_pred)
rmse = np.sqrt(mean_squared_error(Y_test, Y_pred))
mae = mean_absolute_error(Y_test, Y_pred)
print('Done.\nR-squared: %.3f\nRMSE: %.3f \nMAE: %.3f' % (r2score, rmse, mae))
print(len(Y_pred))
temp = []
for i in range(len(Y_pred)):
temp.append(Y_pred[i][0])
d = {'Predicted': temp, 'Actual': Y_test}
df = pd.DataFrame(data=d)
df.head()
# Save the model
regressor.save('models/pm1_' + ROAD + '_' + YEAR + '.pkl')
###Output
_____no_output_____
###Markdown
Results and Analysis below
###Code
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Printing Predicted and Actual Results
###Code
startIndex = merged_dataset.shape[0] - Y_pred.shape[0]
dt = merged_dataset.index[startIndex:,]
temp = []
for i in range(len(Y_pred)):
temp.append(Y_pred[i][0])
d = {'Predicted': temp, 'Actual': Y_test, 'dt': dt}
df = pd.DataFrame(data=d)
df.head()
df.tail()
###Output
_____no_output_____
###Markdown
Visualize Actual and Predicted Traffic
###Code
line1 = df.Actual
line2 = df.Predicted
x = range(0, RBM_EPOCHS * len(HIDDEN_LAYER_STRUCT))
plt.plot(line1, c='red')
plt.plot(line2, c='blue')
plt.xlabel("Date")
plt.ylabel("Speed")
plt.show()
df.to_csv("output/pm1_" + ROAD + '_' + YEAR + EXT, index=False, encoding='utf-8')
###Output
_____no_output_____
###Markdown
Visualize trend of loss of RBM and DBN Training
###Code
line1 = rbm_error
line2 = dbn_error
x = range(0, RBM_EPOCHS * len(HIDDEN_LAYER_STRUCT))
plt.plot(range(0, RBM_EPOCHS * len(HIDDEN_LAYER_STRUCT)), line1, c='red')
plt.xticks(x)
plt.xlabel("Iteration")
plt.ylabel("Error")
plt.show()
plt.plot(range(DBN_EPOCHS), line2, c='blue')
plt.xticks(x)
plt.xlabel("Iteration")
plt.ylabel("Error")
plt.show()
plt.plot(range(0, RBM_EPOCHS * len(HIDDEN_LAYER_STRUCT)), line1, c='red')
plt.plot(range(DBN_EPOCHS), line2, c='blue')
plt.xticks(x)
plt.xlabel("Iteration")
plt.ylabel("Error")
plt.show()
###Output
_____no_output_____ |
_sources/contents/Python/Data Manipulation.ipynb | ###Markdown
Data Manipulation Questions ```{admonition} Problem: [GOOGLE] Score Bucketization:class: dropdown, tipLet's say you're given a list of standardized test scores from high schoolers from grades $9$ to $12$.Given the dataset, write code in Pandas to return the cumulative percentage of students that received scores within the buckets of $<50, <75, <90, <100$.Example Input:| user_id | grade | test score ||---------|-------|------------|| 1 | 10 | 85 || 2 | 10 | 60 || 3 | 11 | 90 || 4 | 10 | 30 || 5 | 11 | 99 |Example Output:| grade | test score | percentage ||-------|------------|------------|| 10 | <50 | 30% || 10 | <75 | 65% || 10 | <90 | 96% || 10 | <100 | 99% || 11 | <50 | 15% || 11 | <75 | 50% |```
###Code
import pandas as pd
import numpy as np
df = pd.DataFrame([[1,10,85],[2,10,60],[3,11,90],[4,10,30],[5,11,99]], columns = ["user_id","grade","test score"])
df["<50"] = np.where(df["test score"]<50,1,0)
df["<75"] = np.where(df["test score"]<75,1,0)
df["<90"] = np.where(df["test score"]<90,1,0)
df["<100"] = np.where(df["test score"]<100,1,0)
df = df.groupby(["grade"])[["<50","<75","<90","<100"]].sum().reset_index()
df = df.melt(id_vars=["grade"],var_name="test score",value_name="count")
df["grp_ttl"] = df.groupby("grade")["count"].transform('max')
df["percentage"] = 100*df["count"]/df["grp_ttl"]
df = (df[["grade","test score","percentage"]].copy()).sort_values(["grade","percentage"],ascending=True)
df["percentage"] = df.percentage.astype(int).astype(str)
df["percentage"] = df["percentage"] + "%"
df.head(10)
###Output
_____no_output_____
###Markdown
```{admonition} Problem: [NEXTDOOR] Complete Addresses:class: dropdown, tipYou’re given two dataframes. One contains information about addresses and the other contains relationships between various cities and states:df_addressesaddress4860 Sunset Boulevard, San Francisco, 941053055 Paradise Lane, Salt Lake City, 84103682 Main Street, Detroit, 482049001 Cascade Road, Kansas City, 641025853 Leon Street, Tampa, 33605df_citiescity stateSalt Lake City UtahKansas City MissouriDetroit MichiganTampa FloridaSan Francisco CaliforniaWrite a function complete_address to create a single dataframe with complete addresses in the format of street, city, state, zipcode.Input:import pandas as pdaddresses = {"address": ["4860 Sunset Boulevard, San Francisco, 94105", "3055 Paradise Lane, Salt Lake City, 84103", "682 Main Street, Detroit, 48204", "9001 Cascade Road, Kansas City, 64102", "5853 Leon Street, Tampa, 33605"]}cities = {"city": ["Salt Lake City", "Kansas City", "Detroit", "Tampa", "San Francisco"], "state": ["Utah", "Missouri", "Michigan", "Florida", "California"]}df_addresses = pd.DataFrame(addresses)df_cities = pd.DataFrame(cities)Output:def complete_address(df_addresses,df_cities) ->address4860 Sunset Boulevard, San Francisco, California, 941053055 Paradise Lane, Salt Lake City, Utah, 84103682 Main Street, Detroit, Michigan, 482049001 Cascade Road, Kansas City, Missouri, 641025853 Leon Street, Tampa, Florida, 33605```
###Code
import pandas as pd
addresses = {"address": ["4860 Sunset Boulevard, San Francisco, 94105", "3055 Paradise Lane, Salt Lake City, 84103", "682 Main Street, Detroit, 48204", "9001 Cascade Road, Kansas City, 64102", "5853 Leon Street, Tampa, 33605"]}
cities = {"city": ["Salt Lake City", "Kansas City", "Detroit", "Tampa", "San Francisco"], "state": ["Utah", "Missouri", "Michigan", "Florida", "California"]}
df_addresses = pd.DataFrame(addresses)
df_cities = pd.DataFrame(cities)
def complete_address(df_addresses,df_cities):
temp = df_addresses['address'].str.split(", ", n = 4, expand = True)
temp.columns = ['street','city','zip']
temp = temp.merge(df_cities, on=["city"], how="inner")
temp["final"] = temp[["street","city","state","zip"]].apply(lambda x: (", ").join(x), axis = 1)
temp = temp[["final"]].copy()
temp.columns = ["address"]
return temp
complete_address(df_addresses,df_cities)
###Output
_____no_output_____ |
src/preprocess/LA (core).ipynb | ###Markdown
Neighborhoods
###Code
sql = """INSERT INTO spatial_groups (city, core_geom, core_id, lower_ids, spatial_name, approx_geom)
SELECT a.city, a.core_geom, a.core_id, array_agg(a.core_id), 'core', ST_multi(a.core_geom)
FROM spatial_groups a
where a.city='{city}' and a.spatial_name = 'ego'
GROUP BY a.core_id, a.core_geom, a.city;
""".format(city=CITY, tempname=CITY.lower())
result = engine.execute(text(sql))
###Output
_____no_output_____
###Markdown
Land use ref: http://dts.edatatrace.com/dts3/content/doc/whelp/mergedProjects/dts2tt/mergedProjects/dts2ttcs/land_use_la.htm
###Code
land_gdf = gpd.read_file('zip://../../data/LA/land_use/Parcels 2014 Tax Roll.zip')
#land_gdf = land_gdf[(~(land_gdf['geometry'].isnull())) & (~(land_gdf['UseCode'].isnull()))]
land_gdf = land_gdf.drop_duplicates(subset=['ain'])
#land_gdf = land_gdf.rename(columns={'SQFTmain': 'sqftmain', 'UseCode': 'usecode', 'YearBuilt': 'yearbuilt', 'Roll_totLa': 'value'})
#land_gdf = land_gdf[['AssessorID', 'sqftmain', 'usecode', 'geometry', 'value']]
land_gdf = land_gdf[['ain', 'geometry']]
land_gdf.head()
zip_file = ZipFile('../../data/LA/land_use/parcels_data_2013.csv.zip')
land_2013_df = pd.read_csv(zip_file.open('parcels_data_2013.csv'), dtype={'AIN': str})
land_2013_df = land_2013_df.rename(columns={'SQFTmain': 'sqftmain',
'AIN': 'ain',
'PropertyUseCode': 'usecode',
'YearBuilt': 'yearbuilt', 'TotalValue': 'value'})
land_2013_df = land_2013_df[['ain', 'sqftmain', 'usecode', 'value']]
land_2013_df.head()
land_2013_df = land_2013_df.drop_duplicates(subset=['ain'])
land_gdf = pd.merge(land_gdf, land_2013_df, on='ain')
print(len(land_gdf))
wrong_pids = ['4211017901', '4211017804', '4218005900', '4221031008',
'4218020900', '4224013902', '2109001903', '2678027900',
'2679025900', '2680020901', '2687017900', '2688030900',
'2707003011', '2708010900', '2726009901', '2726012900',
'2746010042', '2746013901', '2761001906', '2779016900',
'2779047900', '2784003801', '2779010900', '2708021001',
'2111029903', '4211016902', '4211015904', '4211007916',
'2786002902', '2727021907', '4211014800', '4211017805',
'4218005902', '2108025900', '2678020900', '2687023012',
'2687020903', '2688024901', '2688031900', '2786002901',
'2708010013', '2708020005', '2726010900', '2761030904',
'2779017900', '2780005900', '2138014904', '2783028902',
'4211014902', '4211017807', '4224013901', '2108026900',
'2109001902', '2113006900', '2677016900', '2679016901',
'2685019900', '2689016901', '2688043900', '2786002813',
'2726014900', '2761032900', '2770018808', '2780004900',
'2681011902', '2111029902', '2779005900', '4218005901',
'2680018902', '2707003005', '2708020001', '2707002004',
'2761001907', '4211016901', '4211015902', '4211007917',
'2148032902', '4211007919', '4211014904', '4211017900',
'4211017803', '4211014901', '2108031900', '2685013032',
'2685013031', '2686003008', '2685023030', '2685018900',
'2685013900', '2689017900', '2708020012', '2746005900',
'2748001803', '2761031902', '2761040901', '2770018904',
'2770018903', '2779010901', '2779011905', '2779020905',
'2111029901', '4221022176', '2761001814', '4211007012',
'4224013900', '2783028801', '2689019900', '2205008901',
'2231018901', '2225010902', '2226017901', '2231002909',
'2231017900', '2205007900', '4211017901', '4211017804', '4218005900', '4221031008',
'4218020900', '4224013902', '5409013910', '5410015826',
'2109001903', '2678027900', '2679025900', '2680020901',
'2687017900', '2688030900', '2707003011', '2708010900',
'2726009901', '2726012900', '2746010042', '2746013901',
'2761001906', '2779016900', '2779047900', '2784003801',
'5173021811', '5173020911', '5173023900', '5173020903',
'2779010900', '2708021001', '5170011803', '2111029903',
'4211016902', '4211015904', '4211007916', '5172014806',
'5172014901', '2786002902', '2727021907', '4211014800',
'4211017805', '4218005902', '5409013905', '5409013906',
'5409015922', '5409014904', '5409021903', '5409019903',
'2108025900', '2678020900', '2687023012', '2687020903',
'2688024901', '2688031900', '2786002901', '2708010013',
'2708020005', '2726010900', '2761030904', '2779017900',
'2780005900', '5171024910', '5173020902', '5173020901',
'5173023901', '2138014904', '5164004804', '5172013010',
'5172013002', '5164004902', '2783028902', '4211014902',
'4211017807', '4224013901', '5409020910', '5409020911',
'5447017902', '2108026900', '2109001902', '2113006900',
'2677016900', '2679016901', '2685019900', '2689016901',
'2688043900', '2786002813', '2726014900', '2761032900',
'2770018808', '2780004900', '5171024010', '2681011902',
'2111029902', '2779005900', '4218005901', '2680018902',
'2707003005', '2708020001', '2707002004', '5166001901',
'5164017906', '2761001907', '5173022902', '4211016901',
'4211015902', '4211007917', '5172013803', '5172013901',
'2148032902', '4211007919', '4211014904', '5171015901',
'4211017900', '4211017803', '4211014901', '5409014905',
'2108031900', '2685013032', '2685013031', '2686003008',
'2685023030', '2685018900', '2685013900', '2689017900',
'2708020012', '2746005900', '2748001803', '2761031902',
'2761040901', '2770018904', '2770018903', '2779010901',
'2779011905', '2779020905', '5170010805', '5164004901',
'5173021902', '5173023902', '5173020810', '5173021810',
'5173021904', '5173023805', '2111029901', '4221022176',
'5171014808', '2761001814', '5173022808', '5173022903',
'5173022901', '4211007012', '4224013900', '2783028801',
'2689019900', '5173024900', '5166001900', '2205008901',
'2231018901', '2225010902', '2226017901', '2231002909',
'2231017900', '2205007900', '5447032900', '2368001030', '2366035901', '2366036905',
'2367015900', '2367018900', '2368019900', '2368023900',
'2375018903', '2126038901', '2134016901', '2134024904',
'2136015905', '2136017904', '2137013900', '2137014900',
'2137015902', '2137012900', '2138006901', '2138022901',
'2123022901', '5435038027', '5435038902', '5435039903',
'5437028903', '5437028906', '5437034908', '5437028907',
'5437035901', '5437034909', '5437034904', '5442010901',
'5442010902', '5442002916', '5445011042', '5445005904',
'5445006901', '5445007900', '5445010903', '5445006905',
'5168023015', '5169029013', '5168016904', '5170010900',
'2424042901', '2138014906', '5593012909', '5168023902',
'5171015900', '5437035902', '5172013900', '5593001270',
'5442009902', '2126033900', '5593018907', '5410002900',
'2360002909', '2366033900', '2366033901', '2368007901',
'2375021903', '2126038005', '2127011904', '2128031901',
'2138006903', '2138011900', '2138014905', '5435039006',
'5415002900', '5435036900', '5437028904', '5442002915',
'5437028900', '5445006903', '5445007901', '5169029272',
'5169029012', '5169016902', '5581003017', '5581003021',
'5581004023', '5593001902', '5169028017', '5435038904',
'2360003913', '5593002916', '5445004001', '5445004900',
'5447027901', '5415003901', '5415003900', '2360014902',
'2366020903', '2366027902', '2375004900', '2128003901',
'2138011902', '2124018906', '5442010020', '5442002903',
'5445008908', '5445007902', '5169029010', '5169015901',
'2423030906', '2423031902', '2423035902', '5581003011',
'5593018900', '5169016011', '5169029902', '5168017900',
'5435037904', '2131010900', '5442009900', '2127001903',
'5410006900', '2360012900', '2366026902', '2367018901',
'2375019903', '2132009900', '2138017900', '2138017901',
'2138023900', '2138029902', '2123021900', '2124001905',
'5437029900', '5435039900', '5445011043', '5445012044',
'5437028902', '5437036902', '5445008907', '5168016002',
'5168016903', '5581003008', '5581004022', '5593001903',
'5171014900', '5172014900', '5593002907', '5168017902',
'5593001900', '5445004902', '5445005903', '5445002902',
'5593001901', '2127005900', '2368010902', '5173024900',
'2248029903', '2263020902', '2263016904', '2248001904',
'2248028906', '2263013902', '2263015902', '2263021902',
'2248001905', '2263024900', '2263014902', '2271001902',
'5173021811', '5173020911', '5173023900', '5171014809',
'5171015900', '5172013900', '5173023901', '5164004804',
'5164004902', '5173020910', '5173021903', '5173022902',
'5171014900', '5173020907', '5173021902', '5173023902',
'5172014900', '5173020810', '5173021810', '5173021904',
'5173023805', '5173022808', '5173022903', '5173022901',
'5173024900']
land_gdf = land_gdf[~land_gdf.ain.isin(wrong_pids)]
land_gdf['landuse'] = 'none'
land_gdf.loc[land_gdf['usecode'].str[:1] == '0', 'landuse'] = 'residential'
land_gdf.loc[(land_gdf['usecode'].str[:1].isin({'1', '2', '3', '4', '5', '7'})) & (~land_gdf['usecode'].isin({'7100', '8840'})), 'landuse'] = 'commercial'
land_gdf.loc[land_gdf['usecode'].str[:2].isin({'82', '83'}), 'landuse'] = 'commercial'
land_gdf.loc[land_gdf['usecode'].isin({'8820', '8000', '8821', '8822', '8823', '8824', '8825', '8826', '8827', '8828', '8829', '8830', '8831', '8832', '8833', '8834', '8835', '8855', '8861', '8862', '8863', '8864', '8865', '8872', '8873', '8874', '8800', '8890', '8900'}), 'landuse'] = 'commercial'
land_gdf.loc[land_gdf['usecode'].str[:1] == '6', 'landuse'] = 'recreational'
land_gdf.loc[land_gdf['usecode'].isin({'7100', '8840', '8840', '8841', '8842', '8843', '8844', '8845', '8847', '8848', '8849', '8851', '8852', '8853'}), 'landuse'] = 'recreational'
# Vacant
land_gdf.loc[land_gdf['usecode'].str[-1] == 'V', 'landuse'] = 'vacant'
#Fixes
land_gdf.loc[land_gdf['usecode'].isin({'8100', '8109', '810X', '8860', '8500'}), 'landuse'] = 'none'
land_gdf.loc[land_gdf.ain.isin(['7467032900', '7469018904', '7469030901', '7469030900',
'7563001901', '7563001900', '7563002908', '7563002914',
'6038013900', '5414020901', '5414020900', '2178007900',
'2184026901', '5666025907', '6049025901', '4432001903',
'4432005913', '4432005800', '4432006901', '4490011903',
'4493014900', '4422003900', '4432002918', '4432002924',
'4432002923', '4434001903', '5037027915', '5046013900',
'5160001901', '5512004903', '5630030908', '4370012902',
'4387002900', '5404014900', '5581011900', '5581012900',
'5581010900', '5581013901', '5583025900', '5593002908',
'5593002910', '5109022900', '5161004909', '2526004901',
'2526004900', '2552007902', '2569021900', '5029017905',
'4355012904', '5029020904', '2701001910', '4432002919',
'7412014900', '7560028900', '2384024900', '5029017927',
'5459004930', '7446001901', '7467025900', '7469028900',
'5414027900', '2177034902', '2177034901', '5666024901',
'2184005900', '4432001900', '4491006900', '4409001902',
'4409001900', '4422002900', '4432002920', '4432003904',
'5028004902', '5029017921', '5029017910', '2470002901',
'2546013903', '2545022900', '4387002904', '4387017906',
'4387016900', '5565005900', '5565004900', '5570021902',
'5415004900', '5415012902', '5577019901', '5581016900',
'5101002900', '2551012901', '2846003900', '7563002909',
'5029017926', '5029017919', '5593002906', '2701001912',
'4493014906', '5581007912', '4379029900', '4379028902',
'4431009901', '4432003906', '5211021901', '2872001900',
'4386003900', '4386005900', '2177034900', '7467031900',
'7469018902', '7563002913', '7563002906', '7412012900',
'2184028901', '2184026902', '2184028900', '5672021900',
'6049025900', '6070004900', '4432005915', '4432006902',
'4432005914', '4432001901', '4490010900', '4490011902',
'4490024900', '4491009900', '4434001900', '4432002922',
'4434001901', '4432002925', '4432002917', '5037028905',
'5037028902', '2470001905', '2470002900', '2545024901',
'5608001902', '5630030906', '5630030907', '4379027902',
'4379027903', '4379027900', '4380034902', '4387002905',
'5415012901', '5581017900', '5581014900', '5581026900',
'5149031900', '5161005916', '5869016900', '4434001902',
'5029017900', '5404015900', '5029020905', '2701001917',
'5415005906', '5593002905', '4357004901', '5577011902',
'7561025902', '5593002909', '2701002909', '4493015900',
'2382015900', '4432001902', '7422017900', '7469018903',
'7469029900', '7563001902', '7412012903', '7562021900',
'7563006902', '2180024900', '2184005901', '2184027901',
'2671001903', '4490011900', '4490017900', '4491001900',
'4409001904', '4434005900', '4432002921', '5037028908',
'5160001900', '4370012901', '4386008901', '4386015906',
'4387002902', '5415004902', '5415005905', '5415005902',
'5415012903', '5582001900', '5593002912', '5113008912',
'5161005923', '2526003909', '2526003910', '2551003900',
'2551015902', '2551012900', '2552004900', '2552007906',
'2552009902', '2552007907', '2553017900', '2569022901',
'5415005904', '5047014900', '5029017924', '5581008900',
'5029017911', '5593002907', '4382029900', '4431009900',
'7412015900', '5593002917', '4432004901', '2552005901',
'7412010903', '4386004902', '2177034904', '2180026900',
'2180025900', '5302002900', '5302006900', '2287009903',
'2287009902', '2287009901', '2292014901', '2292014900',
'2292013901', '5630030902', '5302001900', '2820019900',
'5303025901']), 'landuse'] = 'recreational'
land_gdf = land_gdf.reset_index()
land_gdf.head()
###Output
_____no_output_____
###Markdown
Net area
###Code
unique_land_gdf = land_gdf.copy()
unique_land_gdf.loc[:, 'x'] = unique_land_gdf.geometry.centroid.x
unique_land_gdf.loc[:, 'y'] = unique_land_gdf.geometry.centroid.y
unique_land_gdf = unique_land_gdf.drop_duplicates(subset=['x', 'y'])
ins_gdf = process_geometry_SQL_insert(unique_land_gdf)
ins_gdf.to_sql('temptable_{}'.format(CITY.lower()), engine, if_exists='replace', index=False, dtype={'geom': Geometry('MultiPolygon', srid=4326)})
sql = """
UPDATE temptable_{tempname} p SET geom=ST_Multi(ST_buffer(p.geom, 0.0))
WHERE NOT ST_Isvalid(p.geom)
""".format(city=CITY, tempname=CITY.lower())
result = engine.execute(text(sql))
sql = """
DELETE
FROM temptable_{tempname} t
USING unused_areas u
WHERE u.city = '{city}' AND ST_Intersects(u.geom, t.geom) AND (NOT ST_Touches(u.geom, t.geom))
AND (ST_Contains(u.geom, t.geom) OR ST_AREA(ST_Intersection(t.geom, u.geom))/ST_Area(t.geom) > 0.5);
""".format(city=CITY, tempname=CITY.lower())
result = engine.execute(text(sql))
sql = """
INSERT INTO spatial_groups_net_area (sp_id, city, spatial_name, used_area)
SELECT sp_id, city, spatial_name, SUM(ST_Area(ST_Intersection(s.approx_geom, t.geom)::geography))/1000000.
FROM temptable_{tempname} t
INNER JOIN spatial_groups s ON ST_Intersects(s.approx_geom, t.geom) AND NOT ST_Touches(s.approx_geom, t.geom)
WHERE s.city = '{city}' AND s.spatial_name='core'
GROUP BY sp_id, city, spatial_name;
""".format(city=CITY, tempname=CITY.lower())
result = engine.execute(text(sql))
###Output
_____no_output_____
###Markdown
Refresh materialized views
###Code
sql = """
REFRESH MATERIALIZED VIEW spatial_groups_unused_areas;
"""
result = engine.execute(text(sql))
###Output
_____no_output_____ |
paper/notebooks/reddit_example_paper.ipynb | ###Markdown
1. Prepare reddit data 1.1 Load data and stopwords; filter data for paper analysis.
###Code
%%time
# import spacy stopwords
from spacy.lang.en.stop_words import STOP_WORDS
stopword_list=STOP_WORDS
# read reddit data
PATH = "../data/"
reddit_data = pd.read_json(f"{PATH}self_posts_conservative_and_conspiracy_subs.json",lines=True)
# filter out other subs
reddit_data = reddit_data[reddit_data.subreddit.isin(["conspiracy","The_Donald"])]
# add field for number of words
reddit_data["length"] = reddit_data["selftext"].apply(lambda x: len(x.split()))
# filter to at least 30 words
reddit_data = reddit_data[reddit_data.length>=30].reset_index(drop=True)
# add dummies indicating time period (t0 or t1) - see paper for definitions
reddit_data["t0"] = [1 if (i < 1468209600) & (i>=1436598580) else 0 for i in reddit_data["created_utc"]]
reddit_data["t1"] = [1 if i > 1535688000 else 0 for i in reddit_data["created_utc"]]
# filter
reddit_data = reddit_data[(reddit_data.t0==1)|(reddit_data.t1==1)]
print(reddit_data.groupby("subreddit").t0.value_counts())
# sample
s_size = 5000
rs = 42
sample = reddit_data.groupby(["subreddit", "t0"]).apply(lambda x: x.sample(s_size, random_state=rs)).reset_index(drop=True)
print(sample.groupby("subreddit").t0.value_counts())
del reddit_data
###Output
subreddit t0
The_Donald 0 5000
1 5000
conspiracy 0 5000
1 5000
Name: t0, dtype: int64
###Markdown
1.2 Remove special formatting and stopwords
###Code
tokenizer = ToktokTokenizer()
###Output
_____no_output_____
###Markdown
Remove stopwords before denoising, lemmatizing and removing special characters.
###Code
%%time
# combine title and body
sample["full_text"] = sample["title"] + ". " + sample["selftext"]
# remove URLs
URL_REGEX = r"((http|https)\:\/\/)?[a-zA-Z0-9\.\/\?\:@\-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*"
sample['full_text'] = sample['full_text'].apply(lambda x: re.sub(URL_REGEX,'', str(x)))
# apply redditcleaner function
sample['full_text'] = sample['full_text'].map(clean)
# lowercase
sample["full_text"] = sample["full_text"].str.lower()
###Output
CPU times: user 5.21 s, sys: 59.4 ms, total: 5.27 s
Wall time: 5.3 s
###Markdown
Lemmatize and remove stopwords with spaCy
###Code
%%time
nlp = spacy.load("en_core_web_sm")
posts = list(sample["full_text"])
lemmatized_list = []
for doc in nlp.pipe(posts, batch_size=32, n_process=3, disable=["parser", "ner"]):
lemmas = [token.lemma_.lower() for token in doc]
lemmas_no_stopwords = [lemma for lemma in lemmas if not lemma in stopword_list]
lemmatized = " ".join(lemmas_no_stopwords)
lemmatized_list.append(lemmatized)
sample["full_text_clean"] = lemmatized_list
print(sample["full_text"][2])
print()
print(sample["full_text_clean"][2])
###Output
how'd we miss this little gem?. i know hollywood hates conservatives but look at this trash from 2005: i've not seen the movie but apparently a group of libtards invites scary conservatives to dinner and murder them. and this was pre-antifa. can you imagine the outrage if the roles were reversed? unfuckingbelieveable!
miss little gem ? . know hollywood hate conservative look trash 2005 : movie apparently group libtard invite scary conservative dinner murder . pre - antifa . imagine outrage role reverse ? unfuckingbelieveable !
###Markdown
Denoise, remove special characters.
###Code
%%time
sample['full_text_clean']=sample['full_text_clean'].apply(denoise_text)
sample['full_text_clean']=sample['full_text_clean'].apply(remove_special_characters)
sample['full_text_clean']=sample['full_text_clean'].str.replace("ampamp","")
###Output
CPU times: user 1.57 s, sys: 143 ms, total: 1.71 s
Wall time: 1.72 s
###Markdown
Remove stopwords again, after other preprocessing.
###Code
%%time
sample['full_text_clean']= [remove_stopwords(r, stopword_list, tokenizer) for r in sample['full_text_clean']]
print(sample["full_text"][2])
print()
print(sample["full_text_clean"][2])
###Output
how'd we miss this little gem?. i know hollywood hates conservatives but look at this trash from 2005: i've not seen the movie but apparently a group of libtards invites scary conservatives to dinner and murder them. and this was pre-antifa. can you imagine the outrage if the roles were reversed? unfuckingbelieveable!
miss little gem know hollywood hate conservative look trash movie apparently group libtard invite scary conservative dinner murder pre antifa imagine outrage role reverse unfuckingbelieveable
###Markdown
Find phrases.
###Code
PHRASING = True
MIN = 100
THRESHOLD = 500
%%time
if PHRASING:
sample['full_text_clean']= get_phrases([tokenizer.tokenize(i) for i in sample['full_text_clean']],
min_count = MIN,
threshold = THRESHOLD)
sample["full_text_clean"] = [" ".join(post) for post in sample["full_text_clean"]]
###Output
_____no_output_____
###Markdown
Data _before_ preprocessing and phrasing.
###Code
sample['full_text'][0]
###Output
_____no_output_____
###Markdown
Data _after_ preprocessing and phrasing.
###Code
sample['full_text_clean'][0]
###Output
_____no_output_____
###Markdown
1.3 Separate r/conservative and r/The_Donald posts
###Code
con = sample[sample.subreddit == "conspiracy"].sort_values(by="created_utc").reset_index(drop=True)
td = sample[sample.subreddit == "The_Donald"].sort_values(by="created_utc").reset_index(drop=True)
con = con.full_text_clean.tolist()
td = td.full_text_clean.tolist()
###Output
_____no_output_____
###Markdown
2. WMD 2.1 Tokenize data, remove infrequent terms
###Code
con_tok = list(map(lambda x: tokenize(x, tokenizer), con))
td_tok = list(map(lambda x: tokenize(x, tokenizer), td))
# REMOVE WORDS OCCURRINIG <20 times
# 1. get list of word counts
all_tok = con_tok + td_tok
all_tok_flat = [item for sublist in all_tok for item in sublist]
print(len(all_tok_flat))
all_tok_flat = pd.Series(all_tok_flat)
word_counts = all_tok_flat.value_counts()
# 2. remove words occurring less than 20 times
thresh = 20
words_to_keep = list(word_counts[word_counts>=thresh].index)
# 3. filter out words
con_tok = [list(filter(lambda word: word in words_to_keep, l)) for l in con_tok]
td_tok = [list(filter(lambda word: word in words_to_keep, l)) for l in td_tok]
print(len([item for sublist in con_tok+td_tok for item in sublist]))
con_sample = [" ".join(doc) for doc in con_tok]
td_sample = [" ".join(doc) for doc in td_tok]
print(len(con_sample))
print(len(td_sample))
###Output
10000
10000
###Markdown
2.2 Load pretrained Google News W2V model
###Code
finetuned = True
if not finetuned:
print("Loading GoogleNews Vectors")
%time model = KeyedVectors.load_word2vec_format('/Users/jack/Downloads/GoogleNews-vectors-negative300.bin.gz', binary=True)
else:
print("Loading GoogleNews Vectors finetuned using Reddit data.")
%time model = KeyedVectors.load_word2vec_format('../../embeddings/reddit_w2v.txt', binary=False)
model.distance("trump", "conservative")
model.distance("trump","progressive")
model.distance("centipede","maga")
model.distance("conspiracy","theory")
###Output
_____no_output_____
###Markdown
2.3 Load corpus and remove OOV words
###Code
%%time
corpus = con_sample + td_sample
vectorizer = TfidfVectorizer(use_idf=False, tokenizer=tfidf_tokenize, norm='l1')
vectorizer.fit(corpus)
%time oov = [word for word in vectorizer.get_feature_names() if word not in model.key_to_index.keys()]
len(oov)
%time con_sample = list(map(lambda x: remove_oov(x, tokenizer, oov), con_sample))
%time td_sample = list(map(lambda x: remove_oov(x, tokenizer, oov), td_sample))
corpus = con_sample + td_sample
%time vectorizer = TfidfVectorizer(use_idf=True, tokenizer=tfidf_tokenize,norm='l1')
%time vectorizer.fit(corpus)
###Output
CPU times: user 51 µs, sys: 1e+03 ns, total: 52 µs
Wall time: 57.2 µs
CPU times: user 3.23 s, sys: 61.8 ms, total: 3.29 s
Wall time: 3.33 s
###Markdown
Bag-of-words vectorizer.
###Code
%time
con_nbow = vectorizer.transform(con_sample)
td_nbow = vectorizer.transform(td_sample)
con_tok = list(map(lambda x: tokenize(x, tokenizer), con_sample))
td_tok =list(map(lambda x: tokenize(x, tokenizer), td_sample))
%time oov_ = [word for word in vectorizer.get_feature_names() if word not in model.key_to_index.keys()]
len(oov_)
###Output
_____no_output_____
###Markdown
Remove empty docs, and make sure both samples are same size
###Code
con_tok = list(map(lambda x: tokenize(x, tokenizer), con_sample))
td_tok = list(map(lambda x: tokenize(x, tokenizer), td_sample))
con_tok_empty = [c for c,i in enumerate(con_tok) if len(i)==0]
td_tok_empty = [c for c,i in enumerate(td_tok) if len(i)==0]
print(con_tok_empty)
print(td_tok_empty)
# remove empty docs
for i in sorted(td_tok_empty,reverse=True):
del td_tok[i],td_sample[i], con_tok[i],con_sample[i]
print(len(con_tok),len(td_tok))
con_nbow = vectorizer.transform(con_sample)
td_nbow = vectorizer.transform(td_sample)
###Output
9997 9997
###Markdown
2.4 Get features and embeddings
###Code
features = vectorizer.get_feature_names()
word2idx = {word: idx for idx, word in enumerate(vectorizer.get_feature_names())}
idx2word = {idx: word for idx, word in enumerate(vectorizer.get_feature_names())}
###Output
_____no_output_____
###Markdown
Get the embedding matrix "E" for all features.
###Code
E = model[features]
###Output
_____no_output_____
###Markdown
2.5 Cluster In order to make the results of the WMD model more interpretable, we add the option to inspect the output not only by individual words, but also by *word clusters*. We do this by clustering the input words with two different algorithmsand assigning each word to a cluster. 2.5.1 Kmeans First, we get the embeddings for the words that are in our feature space.
###Code
X = model[features]
###Output
_____no_output_____
###Markdown
Then we select the number of clusters we want, initialize the Kmeans model and fit it.
###Code
%%time
K = range(10,110, 10)
###Output
CPU times: user 4 µs, sys: 1e+03 ns, total: 5 µs
Wall time: 7.87 µs
###Markdown
Assign labels and centroids to separate variables for later use. 2.5.2 T-SNE + Kmeans
###Code
method='barnes_hut'
n_components = 2
verbose = 1
E_tsne = TSNE(n_components=n_components, method=method, verbose=verbose).fit_transform(E)
plt.scatter(E_tsne[:, 0], E_tsne[:, 1], s=1);
%%time
tsne_ssd, tsne_silhouette = kmeans_search(E_tsne, K)
plot_kmeans(K,tsne_ssd,"elbow")
plot_kmeans(K,tsne_silhouette,"silhouette")
###Output
_____no_output_____
###Markdown
2.5.5 Choose clustering model
###Code
k = 100
%%time
km_tsne = cluster.KMeans(n_clusters=k,max_iter=300).fit(E_tsne)
labels = km_tsne.labels_
###Output
CPU times: user 1.94 s, sys: 276 ms, total: 2.22 s
Wall time: 2.22 s
###Markdown
Create an index that maps each word to a cluster.
###Code
word2cluster = {features[idx]: cl for idx, cl in enumerate(labels)}
###Output
_____no_output_____
###Markdown
Now, conversely, create an index that maps each cluster to a word.
###Code
cluster2words = defaultdict(list)
for key, value in word2cluster.items():
cluster2words[value].append(key)
###Output
_____no_output_____
###Markdown
2.6 Initialize documents Transform all reviews into "documents", each with a set of weights per word in the corpus ("nbow"), the sum of these weights ("weights_sum"), the indices of the words in the documents ("idxs") and the word vectors corresponding to each word ("vecs").
###Code
import random
random.seed(42)
%%time
con_docs, td_docs = [], []
for idx, doc in enumerate(con_tok):
con_docs.append(Document(doc, con_nbow[idx], word2idx, E))
for idx, doc in enumerate(td_tok):
td_docs.append(Document(doc, td_nbow[idx], word2idx, E))
###Output
CPU times: user 4.44 s, sys: 1.42 s, total: 5.86 s
Wall time: 5.95 s
###Markdown
2.7 Linear-Complexity Relaxed WMD (LC-RWMD) Run the [Linear-Complexity Relaxed WMD](https://arxiv.org/abs/1711.07227) to get the distances between all conspiracy and all TD posts. This is performed for the t0 corpus and for the t1 corpus.
###Code
# initialize LCRWMD
lc_rwmd_t0 = LC_RWMD(con_docs[:4997], td_docs[:4997],con_nbow[:4997],td_nbow[:4997],E)
lc_rwmd_t1 = LC_RWMD(con_docs[4997:], td_docs[4997:],con_nbow[4997:],td_nbow[4997:],E)
# get cosine distances
%time lc_rwmd_t0.get_D('cosine')
%time lc_rwmd_t1.get_D('cosine')
# write pickle to easily switch between cosine and euclidean if desired
# pickle.dump(lc_rwmd_t0,open("../checkpoints/lc_rwmd_t0_cosine.p","wb"))
# pickle.dump(lc_rwmd_t1,open("../checkpoints/lc_rwmd_t1_cosine.p","wb"))
# # read pickle
# lc_rwmd_t0 = pickle.load(open("../checkpoints/lc_rwmd_t0_cosine.p","rb"))
# lc_rwmd_t1 = pickle.load(open("../checkpoints/lc_rwmd_t1_cosine.p","rb"))
print(f"Mean of all 25M pairwise LCRWMD distances at t0: {np.concatenate(lc_rwmd_t0.D).mean()}")
print(f"Mean of all 25M pairwise LCRWMD distances at t1: {np.concatenate(lc_rwmd_t1.D).mean()}")
# RANDOM PAIRING TRIAL
from scipy.stats import ttest_ind
t0_distros = []
t1_distros = []
t_statistics = []
p_values = []
n = 10
for i in range(n):
print(i)
# generate random pairs
con_idx0, td_idx0 = [list(range(0,4997)) for r in range(2)]
con_idx1, td_idx1 = [list(range(0,5000)) for r in range(2)]
for idx in [con_idx0, td_idx0, con_idx1, td_idx1]:
shuffle(idx)
pairs0 = list(zip(con_idx0, td_idx0))
pairs1 = list(zip(con_idx1, td_idx1))
# get distannces of random pairs
print("Getting distances of random pairs...")
wmd_pairs_t0 = WMDPairs(con_docs[:4997],td_docs[:4997],pairs0,E,idx2word,metric="cosine")
wmd_pairs_t0.get_distances(decompose = True,
sum_clusters = True,
w2c = word2cluster,
c2w = cluster2words,
thread = True,
relax = False)
wmd_pairs_t1 = WMDPairs(con_docs[4997:],td_docs[4997:],pairs1,E,idx2word,metric="cosine")
wmd_pairs_t1.get_distances(decompose = True,
sum_clusters = True,
w2c = word2cluster,
c2w = cluster2words,
thread = True,
relax = False)
# create 1D array
distances_t0 = np.concatenate(wmd_pairs_t0.distances)
distances_t1 = np.concatenate(wmd_pairs_t1.distances)
# remove zeros
distances_t0 = distances_t0[distances_t0!=0]
distances_t1 = distances_t1[distances_t1!=0]
# t test
ttest = ttest_ind(distances_t0,distances_t1)
t_stat, p_val = ttest[0],ttest[1]
# save values
t0_distros.append(distances_t0)
t1_distros.append(distances_t1)
t_statistics.append(t_stat)
p_values.append(p_val)
# print(t0_means)
# print(t1_means)
# print(t_statistics)
# print(p_values)
np.concatenate(t0_distros).mean()
np.concatenate(t1_distros).mean()
ttest_ind(np.concatenate(t0_distros),np.concatenate(t1_distros))
plt.figure(figsize=(12,12))
sns.distplot(x = np.concatenate(t0_distros),hist=True,norm_hist=True,label="t0 (mean = 0.548)")
sns.distplot(x = np.concatenate(t1_distros),hist=True,norm_hist=True,label = "t1 (mean = 0.545)")
plt.ylabel("Density",fontsize=28)
plt.xlabel("Cosine distance",fontsize=28)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.legend(prop={'size': 20})
plt.savefig("/Users/jack/wmdecompose/paper/images/wmd_random.png",bbox_inches="tight",dpi=500)
plt.show()
###Output
/opt/anaconda3/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
/opt/anaconda3/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
2.8 Gale-Shapeley Pairing Use the [Gale-Shapeley matching algorithm](https://en.wikipedia.org/wiki/Gale%E2%80%93Shapley_algorithm) to find the optimal pairs between positive and negative reviews. This iterates over all the reviews and finds the set of matches that pairs each review with its optimal match given that all positive reviews have to be matched with a negative review and vice versa. The output is a dictionary of key-value pairs, where each pair represents an optimal match.
###Code
%%time
con_docs, td_docs = [], []
for idx, doc in enumerate(con_tok):
con_docs.append(Document(doc, con_nbow[idx], word2idx, E))
for idx, doc in enumerate(td_tok):
td_docs.append(Document(doc, td_nbow[idx], word2idx, E))
# Options: 'gale_shapeley','random','full'
pairing = 'gale_shapeley'
%%time
if pairing == 'gale_shapeley':
print("Running Gale-Shapeley pairing.")
# Run G-S pairing for the t0 docs and the t1 docs separately
matcher_t0 = Matcher(lc_rwmd_t0.D)
engaged_t0 = matcher_t0.matchmaker()
matcher_t0.check()
pairs_t0 = [(k, v) for k, v in engaged_t0.items()]
matcher_t1 = Matcher(lc_rwmd_t1.D)
engaged_t1 = matcher_t1.matchmaker()
matcher_t1.check()
pairs_t1 = [(k, v) for k, v in engaged_t1.items()]
%%time
wmd_pairs_flow0 = WMDPairs(con_docs[:4997],td_docs[:4997],pairs_t0,E,idx2word,metric="cosine")
wmd_pairs_flow0.get_distances(decompose = True,
sum_clusters = True,
w2c = word2cluster,
c2w = cluster2words,
thread = True,
relax = False)
wmd_pairs_flow1 = WMDPairs(con_docs[4997:],td_docs[4997:],pairs_t1,E,idx2word,metric="cosine")
wmd_pairs_flow1.get_distances(decompose = True,
sum_clusters = True,
w2c = word2cluster,
c2w = cluster2words,
thread = True,
relax = False)
wmd_pairs_flow0.distances[wmd_pairs_flow0.distances!=0].mean()
wmd_pairs_flow1.distances[wmd_pairs_flow1.distances!=0].mean()
ttest_ind(wmd_pairs_flow0.distances[wmd_pairs_flow0.distances!=0],wmd_pairs_flow1.distances[wmd_pairs_flow1.distances!=0])
wmd_pairs_flow0.get_differences()
wmd_pairs_flow1.get_differences()
###Output
_____no_output_____
###Markdown
3.1 Intepreting pairwise WMD flows
###Code
# words most distinguishing r/conspiracy from r/TD, t0
{k: v for k, v in sorted(wmd_pairs_flow0.wd_source_diff.items(), key=lambda item: item[1], reverse=True)}
# words most distinguishing r/conspiracy from r/TD, t1
{k: v for k, v in sorted(wmd_pairs_flow1.wd_source_diff.items(), key=lambda item: item[1], reverse=True)}
# words most distinguishing r/TD from r/conspiracy, t0
{k: v for k, v in sorted(wmd_pairs_flow0.wd_sink_diff.items(), key=lambda item: item[1], reverse=True)}
# words most distinguishing r/TD from r/conspiracy, t1
{k: v for k, v in sorted(wmd_pairs_flow1.wd_sink_diff.items(), key=lambda item: item[1], reverse=True)}
# words most distinguishing r/TD from r/conspiracy, t1
{k: v for k, v in sorted(wmd_pairs_flow1.wd_sink_diff.items(), key=lambda item: item[1], reverse=True)})
###Output
_____no_output_____
###Markdown
Combine into DF
###Code
top_words_t0 = {k: v for k, v in sorted(wmd_pairs_flow0.wd_source_diff.items(), key=lambda item: item[1], reverse=True)}
top_words_t0_df = pd.DataFrame.from_dict(top_words_t0, orient='index', columns = ["cost_t0"])
top_words_t0_df = top_words_t0_df.reset_index().rename(columns={"index":"word"})
# merge in t1
top_words_t1 = {k: v for k, v in sorted(wmd_pairs_flow1.wd_source_diff.items(), key=lambda item: item[1], reverse=True)}
top_words_t1_df = pd.DataFrame.from_dict(top_words_t1, orient='index', columns = ["cost_t1"])
top_words_t1_df = top_words_t1_df.reset_index().rename(columns={"index":"word"})
words_df = top_words_t0_df.merge(top_words_t1_df,left_on="word",right_on="word",how="outer").fillna(0)
words_df.head()
# add counts
def get_counts(vocab,list_of_lists):
d = {}
for v in vocab:
total = 0
for l in list_of_lists:
count = l.count(v)
total+=count
d[v] = total
return d
vocab = list(words_df["word"])
t0_consp = get_counts(vocab,con_tok[:4997])
t0_td = get_counts(vocab,td_tok[:4997])
t1_consp = get_counts(vocab,con_tok[4997:])
t1_td = get_counts(vocab,td_tok[4997:])
words_df["cons_t0"] = words_df.word.map(t0_consp)
words_df["cons_t1"] = words_df.word.map(t1_consp)
words_df["td_t0"] = words_df.word.map(t0_td)
words_df["td_t1"] = words_df.word.map(t1_td)
# add clusters
words_df["cluster"] = words_df.word.map(word2cluster)
for index,row in words_df.iterrows():
if row["cost_t0"]==0:
pct_change = "NA"
else:
pct_change = (row["cost_t1"] - row["cost_t0"])/row["cost_t0"]
words_df.loc[index,"cost_pct_change"] = pct_change
for index,row in words_df.iterrows():
if row["cons_t0"]==0:
pct_change = "NA"
else:
pct_change = (row["cons_t1"] - row["cons_t0"])/row["cons_t0"]
words_df.loc[index,"cons_pct_change"] = pct_change
for index,row in words_df.iterrows():
if row["td_t0"]==0:
pct_change = "NA"
else:
pct_change = (row["td_t1"] - row["td_t0"])/row["td_t0"]
words_df.loc[index,"td_pct_change"] = pct_change
words_df.head()
cluster_df = words_df.groupby("cluster")["cost_t0","cost_t1","cons_t0","cons_t1","td_t0","td_t1"].agg("sum")
cluster_df.head()
no_na = words_df[(words_df["cost_pct_change"]!="NA")&(words_df["cons_pct_change"]!="NA")&(words_df["td_pct_change"]!="NA")].reset_index(drop=True)
words_df["cost_abs_change"] = words_df["cost_t1"] - words_df["cost_t0"]
words_df.head()
plt.figure(figsize=(12,12))
#sns.distplot(x = list(words_df["cost_abs_change"]),hist=True,norm_hist=False,label="t0 (mean = 0.548)")
plt.hist(list(words_df["cost_abs_change"]),bins=96)
plt.yscale("log")
plt.ylabel("N words",fontsize=28)
plt.xlabel("Per-word differences in cumulative cost, t0 to t1",fontsize=28)
plt.xlim(-12,12)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
#plt.legend(prop={'size': 20})
plt.savefig("/Users/jack/wmdecompose/paper/images/word_level_distances.png",bbox_inches="tight",dpi=500)
plt.show()
words_df.to_csv("../checkpoints/wmd_words_df_cos.csv",index=False)
no_na.to_csv("../checkpoints/wmd_words_df_no_na_cos.csv",index=False)
# look at dynamically ranked clusters
n_clusters = 100
n_words = 10
# conspiracy t0
c1 = output_clusters(wd=wmd_pairs_flow0.wd_source_diff.items(),
cd=wmd_pairs_flow0.cd_source.items(),
c2w=cluster2words,
n_clusters=n_clusters,
n_words=n_words)
# conspiracy t1
c2 = output_clusters(wd=wmd_pairs_flow1.wd_source_diff.items(),
cd=wmd_pairs_flow1.cd_source.items(),
c2w=cluster2words,
n_clusters=n_clusters,
n_words=n_words)
# TD t0
c3 = output_clusters(wd=wmd_pairs_flow0.wd_sink_diff.items(),
cd=wmd_pairs_flow0.cd_sink.items(),
c2w=cluster2words,
n_clusters=n_clusters,
n_words=n_words)
# TD t1
c4 = output_clusters(wd=wmd_pairs_flow1.wd_sink_diff.items(),
cd=wmd_pairs_flow1.cd_sink.items(),
c2w=cluster2words,
n_clusters=n_clusters,
n_words=n_words)
c1
c2
c3
c4
###Output
_____no_output_____ |
BBM Hydrogen orbitals.ipynb | ###Markdown
Plotting radial hydrogen orbitals with sympyThis notebook, loosely inspired on https://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/, plots some radial hydrogen functions using sympy, as support for Chapter 2 of the Building Blocks of Matter course at Leiden University.
###Code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
from sympy.physics.hydrogen import R_nl
from sympy import var, simplify
from sympy import init_printing
from sympy.utilities.lambdify import lambdify
from sympy import integrate, oo
from scipy.constants import hbar, alpha, Rydberg, c
init_printing()
plt.rcParams['axes.xmargin'] = 0
plt.rcParams['figure.figsize'] = [8,6]
cmap = matplotlib.cm.get_cmap('tab20c')
var('n l r')
R_nl(2,1,r,1)*r**2
x_np = np.linspace(0, 20, 200)
linestyles = ['-', '--', '-.', ':']
Z = 1
for n_np in range(1,4):
for l_np in range(0,n_np):
plt.plot(x_np,lambdify(r, R_nl(n_np, l_np, r, Z))(x_np),
color=cmap(4*n_np-4+l_np),
linestyle=linestyles[l_np],
label="$n = {}, l = {}$".format(n_np, l_np))
plt.ylim([-0.12, 0.18])
plt.xlabel('radial distance ($a_0$)')
plt.ylabel('$R_{nl}$')
plt.legend()
plt.show()
x_np = np.linspace(0, 30, 300)
for n_np in range(1,5):
for l_np in range(0,n_np):
expected_r = integrate(r * R_nl(n_np, l_np, r, 1)**2 * r**2, (r,0,oo))
plt.plot(x_np, lambdify(r, R_nl(n_np, l_np, r, 1)**2 * r**2)(x_np),
color=cmap(4*(n_np-1) + l_np),
linestyle=linestyles[l_np],
label=r"$n = {}, l = {}, \langle r \rangle = {}$".format(n_np,
l_np,
expected_r))
plt.xlabel('radial distance ($a_0$)')
plt.ylabel('Probability')
plt.legend()
plt.show()
# Investigate the Transition amplitudes for the sharp and diffuse transitions
diffuse = R_nl(n, 1, r, 1) * r * R_nl(n+1, 2, r, 1) * r**2
sharp = R_nl(n, 1, r, 1) * r * R_nl(n+1, 0, r, 1) * r**2
display(diffuse)
display(sharp)
for n_np in [2, 3, 4]:
plt.plot(x_np*2, lambdify(r, diffuse.subs({n: n_np}))(x_np*2),
color=cmap(4*(n_np-1)),
label='Diffuse {} -> {}'.format(n_np, n_np+1))
plt.plot(x_np*2, lambdify(r, sharp.subs({n: n_np}))(x_np*2),
color=cmap(4*(n_np-1)),
linestyle='--',
label='Sharp {} -> {}'.format(n_np, n_np+1))
plt.xlabel('radial distance ($a_0$)')
plt.legend()
plt.show()
for n_np in [2, 3, 4]:
print("Transition amplitude diffuse {} -> {}: {:2f}".format(n_np,
n_np+1,
float(integrate(diffuse.subs({n: n_np}), (r,0,oo)))))
print("Transition amplitude sharp {} -> {}: {:2f}".format(n_np,
n_np+1,
float(integrate(sharp.subs({n: n_np}), (r,0,oo)))))
###Output
Transition amplitude diffuse 2 -> 3: 4.747992
Transition amplitude sharp 2 -> 3: 0.938404
Transition amplitude diffuse 3 -> 4: 7.565411
Transition amplitude sharp 3 -> 4: 2.443534
Transition amplitude diffuse 4 -> 5: 11.038943
Transition amplitude sharp 4 -> 5: 4.600278
|
Week 9/3.6. Implementation of Softmax Regression from Scratch.ipynb | ###Markdown
Defining the Softmax Operation
###Code
X = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
X.sum(0, keepdims=True), X.sum(1, keepdims=True)
def softmax(X):
X_exp = np.exp(X)
partition = X_exp.sum(1, keepdims=True)
return X_exp / partition # The broadcasting mechanism is applied here
X = np.random.normal(0, 1, (2, 5))
X_prob = softmax(X)
X_prob, X_prob.sum(1)
###Output
_____no_output_____
###Markdown
Defining the Model
###Code
# Defining the Model
def net(X):
return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)
###Output
_____no_output_____
###Markdown
Defining the Loss Function
###Code
y = np.array([0, 2])
y_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
y_hat[[0, 1], y]
# implement the cross-entropy loss function
def cross_entropy(y_hat, y):
return - np.log(y_hat[range(len(y_hat)), y])
cross_entropy(y_hat, y)
###Output
_____no_output_____
###Markdown
Classification Accuracy
###Code
def accuracy(y_hat, y):
"""Compute the number of correct predictions."""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
cmp = y_hat.astype(y.dtype) == y
return float(cmp.astype(y.dtype).sum())
accuracy(y_hat, y) / len(y)
def evaluate_accuracy(net, data_iter):
"""Compute the accuracy for a model on a dataset."""
metric = Accumulator(2) # No. of correct predictions, no. of predictions
for X, y in data_iter:
metric.add(accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
class Accumulator:
"""For accumulating sums over `n` variables."""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
evaluate_accuracy(net, test_iter)
###Output
_____no_output_____
###Markdown
Training
###Code
# define a function to train for one epoch
def train_epoch_ch3(net, train_iter, loss, updater):
"""Train a model within one epoch (defined in Chapter 3)."""
# Sum of training loss, sum of training accuracy, no. of examples
metric = Accumulator(3)
if isinstance(updater, gluon.Trainer):
updater = updater.step
for X, y in train_iter:
# Compute gradients and update parameters
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.size)
# Return training loss and training accuracy
return metric[0] / metric[2], metric[1] / metric[2]
# define a utility class that plot data in animation
class Animator:
"""For plotting data in animation."""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
# Incrementally plot multiple lines
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# Use a lambda function to capture arguments
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# Add multiple data points into the figure
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
# training function then trains a model net on a training dataset
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""Train a model (defined in Chapter 3)."""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
# optimize the loss function of the model with a learning rate 0.1
lr = 0.1
def updater(batch_size):
return d2l.sgd([W, b], lr, batch_size)
!pip uninstall matplotlib
!pip install --upgrade matplotlib
# train the model with 10 epochs
num_epochs = 10
train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, updater)
###Output
_____no_output_____
###Markdown
Prediction
###Code
# the predictions from the model
def predict_ch3(net, test_iter, n=6):
"""Predict labels (defined in Chapter 3)."""
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))
titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(
X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])
predict_ch3(net, test_iter)
###Output
_____no_output_____ |
Small Projects/Cost of Capital/Cost of Debt.ipynb | ###Markdown
Finding Cost of Debt Given Financial and Market Info Level 1- A chemical manufacturer has a 7.0% coupon, annual pay 1000 par value bond outstanding, priced at \\$1042.12 on 2021-01-06.- If the bond matures on 2024-01-06, what is the cost of debt for this company? The tax rate is 35%. Level 2- We search WMT from https://stockrow.com to get Walmart’s financials. Calculate the cost of debt for 2019-07-31 using the financial statements approach. Note that you will also need to determine the effective tax rate using actual tax paid and EBT.
###Code
import numpy_financial as npf
import pandas as pd
###Output
_____no_output_____
###Markdown
Level 1 Solution
###Code
coupon_yield = 0.07
par_value = 1000
premium_value = 1042.12
n = 3
tax_rate = 0.35
payment = par_value * coupon_yield
pretax_yield = npf.rate(n, payment, -premium_value, par_value)
def show_debt_costs(pretax_yield, tax_rate):
aftertax_yield = pretax_yield * (1 - tax_rate)
print(f'The pre-tax cost of debt is {pretax_yield:.2%}\n'
f'The after-tax cost of debt is {aftertax_yield:.02%}\n'
f'With a {tax_rate:.02%} tax rate')
show_debt_costs(pretax_yield, tax_rate)
###Output
The pre-tax cost of debt is 5.44%
The after-tax cost of debt is 3.54%
With a 35.00% tax rate
###Markdown
Level 2 Solution
###Code
inc_df = pd.read_excel('inc.xlsx', index_col = 0) # Document from stockrow.com
bs_df = pd.read_excel('bs.xlsx', index_col = 0) # Document from stockrow.com
date = pd.to_datetime('2019-07-31')
inc_df[date]
int_exp = inc_df[date]['Non-operating Interest Expenses']
total_debt = bs_df[date]['Total Debt']
pretax_cod = int_exp / total_debt
tax_paid = inc_df[date]['Income Tax Provision']
ebt = inc_df[date]['EBT']
tax_rate_wmt = tax_paid / ebt
show_debt_costs(pretax_cod, tax_rate_wmt)
###Output
The pre-tax cost of debt is 1.14%
The after-tax cost of debt is 0.85%
With a 25.10% tax rate
|
notebooks/ww_classifier.ipynb | ###Markdown
CS 224N Lecture 3: Word Window Classification Pytorch Exploration Author: Matthew Lamm
###Code
import pprint
import torch
import torch.nn as nn
pp = pprint.PrettyPrinter()
###Output
_____no_output_____
###Markdown
Our DataThe task at hand is to assign a label of 1 to words in a sentence that correspond with a LOCATION, and a label of 0 to everything else. In this simplified example, we only ever see spans of length 1.
###Code
train_sents = [s.lower().split() for s in ["we 'll always have Paris",
"I live in Germany",
"He comes from Denmark",
"The capital of Denmark is Copenhagen"]]
train_labels = [[0, 0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1, 0, 1]]
assert all([len(train_sents[i]) == len(train_labels[i]) for i in range(len(train_sents))])
test_sents = [s.lower().split() for s in ["She comes from Paris"]]
test_labels = [[0, 0, 0, 1]]
assert all([len(test_sents[i]) == len(test_labels[i]) for i in range(len(test_sents))])
###Output
_____no_output_____
###Markdown
Creating a dataset of batched tensors. PyTorch (like other deep learning frameworks) is optimized to work on __tensors__, which can be thought of as a generalization of vectors and matrices with arbitrarily large rank.Here well go over how to translate data to a list of vocabulary indices, and how to construct *batch tensors* out of the data for easy input to our model. We'll use the *torch.utils.data.DataLoader* object handle ease of batching and iteration. Converting tokenized sentence lists to vocabulary indices.Let's assume we have the following vocabulary:
###Code
id_2_word = ["<pad>", "<unk>", "we", "always", "have", "paris",
"i", "live", "in", "germany",
"he", "comes", "from", "denmark",
"the", "of", "is", "copenhagen"]
word_2_id = {w:i for i,w in enumerate(id_2_word)}
instance = train_sents[0]
print(instance)
def convert_tokens_to_inds(sentence, word_2_id):
return [word_2_id.get(t, word_2_id["<unk>"]) for t in sentence]
token_inds = convert_tokens_to_inds(instance, word_2_id)
pp.pprint(token_inds)
###Output
[2, 1, 3, 4, 5]
###Markdown
Let's convince ourselves that worked:
###Code
print([id_2_word[tok_idx] for tok_idx in token_inds])
###Output
['we', '<unk>', 'always', 'have', 'paris']
###Markdown
Padding for windows. In the word window classifier, for each word in the sentence we want to get the +/- n window around the word, where 0 <= n < len(sentence).In order for such windows to be defined for words at the beginning and ends of the sentence, we actually want to insert padding around the sentence before converting to indices:
###Code
def pad_sentence_for_window(sentence, window_size, pad_token="<pad>"):
return [pad_token]*window_size + sentence + [pad_token]*window_size
window_size = 2
instance = pad_sentence_for_window(train_sents[0], window_size)
print(instance)
###Output
['<pad>', '<pad>', 'we', "'ll", 'always', 'have', 'paris', '<pad>', '<pad>']
###Markdown
Let's make sure this works with our vocabulary:
###Code
for sent in train_sents:
tok_idxs = convert_tokens_to_inds(pad_sentence_for_window(sent, window_size), word_2_id)
print([id_2_word[idx] for idx in tok_idxs])
###Output
['<pad>', '<pad>', 'we', '<unk>', 'always', 'have', 'paris', '<pad>', '<pad>']
['<pad>', '<pad>', 'i', 'live', 'in', 'germany', '<pad>', '<pad>']
['<pad>', '<pad>', 'he', 'comes', 'from', 'denmark', '<pad>', '<pad>']
['<pad>', '<pad>', 'the', '<unk>', 'of', 'denmark', 'is', 'copenhagen', '<pad>', '<pad>']
###Markdown
Batching sentences together with a DataLoader When we train our model, we rarely update with respect to a single training instance at a time, because a single instance provides a very noisy estimate of the global loss's gradient. We instead construct small *batches* of data, and update parameters for each batch. Given some batch size, we want to construct batch tensors out of the word index lists we've just created with our vocab.For each length B list of inputs, we'll have to: (1) Add window padding to sentences in the batch like we just saw. (2) Add additional padding so that each sentence in the batch is the same length. (3) Make sure our labels are in the desired format.At the level of the dataest we want: (4) Easy shuffling, because shuffling from one training epoch to the next gets rid of pathological batches that are tough to learn from. (5) Making sure we shuffle inputs and their labels together! PyTorch provides us with an object *torch.utils.data.DataLoader* that gets us (4) and (5). All that's required of us is to specify a *collate_fn* that tells it how to do (1), (2), and (3).
###Code
l = torch.LongTensor(train_labels[0])
pp.pprint(("raw train label instance", l))
print(l.size())
one_hots = torch.zeros((2, len(l)))
pp.pprint(("unfilled label instance", one_hots))
print(one_hots.size())
one_hots[1] = l
pp.pprint(("one-hot labels", one_hots))
l_not = ~l.byte()
one_hots[0] = l_not
pp.pprint(("one-hot labels", one_hots))
from torch.utils.data import DataLoader
from functools import partial
def my_collate(data, window_size, word_2_id):
"""
For some chunk of sentences and labels
-add winow padding
-pad for lengths using pad_sequence
-convert our labels to one-hots
-return padded inputs, one-hot labels, and lengths
"""
x_s, y_s = zip(*data)
# deal with input sentences as we've seen
window_padded = [convert_tokens_to_inds(pad_sentence_for_window(sentence, window_size), word_2_id)
for sentence in x_s]
# append zeros to each list of token ids in batch so that they are all the same length
padded = nn.utils.rnn.pad_sequence([torch.LongTensor(t) for t in window_padded], batch_first=True)
# convert labels to one-hots
labels = []
lengths = []
for y in y_s:
lengths.append(len(y))
label = torch.zeros((len(y),2 ))
true = torch.LongTensor(y)
false = ~true.byte()
label[:, 0] = false
label[:, 1] = true
labels.append(label)
padded_labels = nn.utils.rnn.pad_sequence(labels, batch_first=True)
return padded.long(), padded_labels, torch.LongTensor(lengths)
# Shuffle True is good practice for train loaders.
# Use functools.partial to construct a partially populated collate function
example_loader = DataLoader(list(zip(train_sents,
train_labels)),
batch_size=2,
shuffle=True,
collate_fn=partial(my_collate, window_size=2, word_2_id=word_2_id))
for batched_input, batched_labels, batch_lengths in example_loader:
pp.pprint(("inputs", batched_input, batched_input.size()))
pp.pprint(("labels", batched_labels, batched_labels.size()))
pp.pprint(batch_lengths)
break
###Output
('inputs',
tensor([[ 0, 0, 6, 7, 8, 9, 0, 0, 0, 0],
[ 0, 0, 14, 1, 15, 13, 16, 17, 0, 0]]),
torch.Size([2, 10]))
('labels',
tensor([[[255., 0.],
[255., 0.],
[255., 0.],
[254., 1.],
[ 0., 0.],
[ 0., 0.]],
[[255., 0.],
[255., 0.],
[255., 0.],
[254., 1.],
[255., 0.],
[254., 1.]]]),
torch.Size([2, 6, 2]))
tensor([4, 6])
###Markdown
Modeling Thinking through vectorization of word windows.Before we go ahead and build our model, let's think about the first thing it needs to do to its inputs.We're passed batches of sentences. For each sentence i in the batch, for each word j in the sentence, we want to construct a single tensor out of the embeddings surrounding word j in the +/- n window.Thus, the first thing we're going to need a (B, L, 2N+1) tensor of token indices. A *terrible* but nevertheless informative *iterative* solution looks something like the following, where we iterate through batch elements in our (dummy), iterating non-padded word positions in those, and for each non-padded word position, construct a window:
###Code
dummy_input = torch.zeros(2, 8).long()
dummy_input[:,2:-2] = torch.arange(1,9).view(2,4)
pp.pprint(dummy_input)
dummy_output = [[[dummy_input[i, j-2+k].item() for k in range(2*2+1)]
for j in range(2, 6)]
for i in range(2)]
dummy_output = torch.LongTensor(dummy_output)
print(dummy_output.size())
pp.pprint(dummy_output)
###Output
torch.Size([2, 4, 5])
tensor([[[0, 0, 1, 2, 3],
[0, 1, 2, 3, 4],
[1, 2, 3, 4, 0],
[2, 3, 4, 0, 0]],
[[0, 0, 5, 6, 7],
[0, 5, 6, 7, 8],
[5, 6, 7, 8, 0],
[6, 7, 8, 0, 0]]])
###Markdown
*Technically* it works: For each element in the batch, for each word in the original sentence and ignoring window padding, we've got the 5 token indices centered at that word. But in practice will be crazy slow. Instead, we ideally want to find the right tensor operation in the PyTorch arsenal. Here, that happens to be __Tensor.unfold__.
###Code
dummy_input.unfold(1, 2*2+1, 1)
###Output
_____no_output_____
###Markdown
A model in full. In PyTorch, we implement models by extending the nn.Module class. Minimally, this requires implementing an *\_\_init\_\_* function and a *forward* function.In *\_\_init\_\_* we want to store model parameters (weights) and hyperparameters (dimensions).
###Code
class SoftmaxWordWindowClassifier(nn.Module):
"""
A one-layer, binary word-window classifier.
"""
def __init__(self, config, vocab_size, pad_idx=0):
super(SoftmaxWordWindowClassifier, self).__init__()
"""
Instance variables.
"""
self.window_size = 2*config["half_window"]+1
self.embed_dim = config["embed_dim"]
self.hidden_dim = config["hidden_dim"]
self.num_classes = config["num_classes"]
self.freeze_embeddings = config["freeze_embeddings"]
"""
Embedding layer
-model holds an embedding for each layer in our vocab
-sets aside a special index in the embedding matrix for padding vector (of zeros)
-by default, embeddings are parameters (so gradients pass through them)
"""
self.embed_layer = nn.Embedding(vocab_size, self.embed_dim, padding_idx=pad_idx)
if self.freeze_embeddings:
self.embed_layer.weight.requires_grad = False
"""
Hidden layer
-we want to map embedded word windows of dim (window_size+1)*self.embed_dim to a hidden layer.
-nn.Sequential allows you to efficiently specify sequentially structured models
-first the linear transformation is evoked on the embedded word windows
-next the nonlinear transformation tanh is evoked.
"""
self.hidden_layer = nn.Sequential(nn.Linear(self.window_size*self.embed_dim,
self.hidden_dim),
nn.Tanh())
"""
Output layer
-we want to map elements of the output layer (of size self.hidden dim) to a number of classes.
"""
self.output_layer = nn.Linear(self.hidden_dim, self.num_classes)
"""
Softmax
-The final step of the softmax classifier: mapping final hidden layer to class scores.
-pytorch has both logsoftmax and softmax functions (and many others)
-since our loss is the negative LOG likelihood, we use logsoftmax
-technically you can take the softmax, and take the log but PyTorch's implementation
is optimized to avoid numerical underflow issues.
"""
self.log_softmax = nn.LogSoftmax(dim=2)
def forward(self, inputs):
"""
Let B:= batch_size
L:= window-padded sentence length
D:= self.embed_dim
S:= self.window_size
H:= self.hidden_dim
inputs: a (B, L) tensor of token indices
"""
B, L = inputs.size()
"""
Reshaping.
Takes in a (B, L) LongTensor
Outputs a (B, L~, S) LongTensor
"""
# Fist, get our word windows for each word in our input.
token_windows = inputs.unfold(1, self.window_size, 1)
_, adjusted_length, _ = token_windows.size()
# Good idea to do internal tensor-size sanity checks, at the least in comments!
assert token_windows.size() == (B, adjusted_length, self.window_size)
"""
Embedding.
Takes in a torch.LongTensor of size (B, L~, S)
Outputs a (B, L~, S, D) FloatTensor.
"""
embedded_windows = self.embed_layer(token_windows)
"""
Reshaping.
Takes in a (B, L~, S, D) FloatTensor.
Resizes it into a (B, L~, S*D) FloatTensor.
-1 argument "infers" what the last dimension should be based on leftover axes.
"""
embedded_windows = embedded_windows.view(B, adjusted_length, -1)
"""
Layer 1.
Takes in a (B, L~, S*D) FloatTensor.
Resizes it into a (B, L~, H) FloatTensor
"""
layer_1 = self.hidden_layer(embedded_windows)
"""
Layer 2
Takes in a (B, L~, H) FloatTensor.
Resizes it into a (B, L~, 2) FloatTensor.
"""
output = self.output_layer(layer_1)
"""
Softmax.
Takes in a (B, L~, 2) FloatTensor of unnormalized class scores.
Outputs a (B, L~, 2) FloatTensor of (log-)normalized class scores.
"""
output = self.log_softmax(output)
return output
###Output
_____no_output_____
###Markdown
Training.Now that we've got a model, we have to train it.
###Code
def loss_function(outputs, labels, lengths):
"""Computes negative LL loss on a batch of model predictions."""
B, L, num_classes = outputs.size()
num_elems = lengths.sum().float()
# get only the values with non-zero labels
loss = outputs*labels
# rescale average
return -loss.sum() / num_elems
def train_epoch(loss_function, optimizer, model, train_data):
## For each batch, we must reset the gradients
## stored by the model.
total_loss = 0
for batch, labels, lengths in train_data:
# clear gradients
optimizer.zero_grad()
# evoke model in training mode on batch
outputs = model.forward(batch)
# compute loss w.r.t batch
loss = loss_function(outputs, labels, lengths)
# pass gradients back, startiing on loss value
loss.backward()
# update parameters
optimizer.step()
total_loss += loss.item()
# return the total to keep track of how you did this time around
return total_loss
config = {"batch_size": 4,
"half_window": 2,
"embed_dim": 25,
"hidden_dim": 25,
"num_classes": 2,
"freeze_embeddings": False,
}
learning_rate = .0002
num_epochs = 10000
model = SoftmaxWordWindowClassifier(config, len(word_2_id))
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loader = torch.utils.data.DataLoader(list(zip(train_sents, train_labels)),
batch_size=2,
shuffle=True,
collate_fn=partial(my_collate, window_size=2, word_2_id=word_2_id))
losses = []
for epoch in range(num_epochs):
epoch_loss = train_epoch(loss_function, optimizer, model, train_loader)
if epoch % 100 == 0:
losses.append(epoch_loss)
print(losses)
###Output
[364.9083557128906, 5.592585325241089, 4.333539605140686, 4.025648593902588, 3.8844380378723145, 3.800320625305176, 3.7496031522750854, 3.7059574127197266, 3.677868604660034, 3.6478893756866455, 3.6281195878982544, 3.6077922582626343, 3.5958402156829834, 3.580086350440979, 3.5669249296188354, 3.5566670894622803, 3.548741579055786, 3.5431668758392334, 3.532412528991699, 3.5259872674942017, 3.523716449737549, 3.5157454013824463, 3.5136560201644897, 3.506511926651001, 3.502467393875122, 3.4980881214141846, 3.49469256401062, 3.495018482208252, 3.489223003387451, 3.4864810705184937, 3.483937978744507, 3.481029987335205, 3.4822566509246826, 3.4767366647720337, 3.478212833404541, 3.4763729572296143, 3.474643588066101, 3.470025658607483, 3.471448302268982, 3.469985604286194, 3.4651877880096436, 3.464245557785034, 3.4626097679138184, 3.4614157676696777, 3.4636603593826294, 3.459538698196411, 3.458497405052185, 3.457494616508484, 3.459591269493103, 3.455302357673645, 3.4578123092651367, 3.45696759223938, 3.4530892372131348, 3.4520175457000732, 3.4512782096862793, 3.4508498907089233, 3.4532437324523926, 3.4494857788085938, 3.4485734701156616, 3.4479674100875854, 3.447365164756775, 3.450154662132263, 3.446488618850708, 3.4490644931793213, 3.4485479593276978, 3.4446771144866943, 3.444197177886963, 3.4439538717269897, 3.443262219429016, 3.443039059638977, 3.4457393884658813, 3.4421911239624023, 3.441568970680237, 3.4411717653274536, 3.4409974813461304, 3.4404067993164062, 3.4400572776794434, 3.4397042989730835, 3.439358115196228, 3.439216136932373, 3.442047357559204, 3.43857204914093, 3.438269019126892, 3.4377812147140503, 3.4408377408981323, 3.437371015548706, 3.4402583837509155, 3.439995050430298, 3.436378002166748, 3.436286449432373, 3.4358612298965454, 3.438954472541809, 3.4355430603027344, 3.438481092453003, 3.43490469455719, 3.434677004814148, 3.4344617128372192, 3.437579035758972, 3.437371850013733, 3.4338138103485107]
###Markdown
Prediction.
###Code
test_loader = torch.utils.data.DataLoader(list(zip(test_sents, test_labels)),
batch_size=1,
shuffle=False,
collate_fn=partial(my_collate, window_size=2, word_2_id=word_2_id))
for test_instance, labs, _ in test_loader:
outputs = model.forward(test_instance)
print(torch.argmax(outputs, dim=2))
print(torch.argmax(labs, dim=2))
###Output
tensor([[0, 0, 0, 0]], grad_fn=<NotImplemented>)
tensor([[0, 0, 0, 0]])
|
experimental/Analysis_data_20200503.ipynb | ###Markdown
Timings
###Code
import os
import sys
import pylab as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
data_folder = '/home/simon/git/vimms/experimental/data_20200503/timings'
qca_file = os.path.join(data_folder,'from_controller_fullscan_QCA.mzML')
pymzm_folder = '/home/simon/git/pymzm'
sys.path.append(pymzm_folder)
from mass_spec_utils.data_import.mzml import MZMLFile
qca_full = MZMLFile(qca_file)
def extract_timings(mzml_file_object):
time_dict = {(1,1):[],(1,2):[],(2,1):[],(2,2):[]}
for i,s in enumerate(mzml_file_object.scans[:-1]):
current = s.ms_level
next_ = mzml_file_object.scans[i+1].ms_level
tup = (current,next_)
time_dict[tup].append(60*mzml_file_object.scans[i+1].rt_in_minutes - 60*s.rt_in_minutes)
mean_times = {}
for k,v in time_dict.items():
if len(v) > 0:
me = sum(v)/len(v)
mean_times[k] = me
return time_dict,mean_times
time_dict,mean_times = extract_timings(qca_full)
print(mean_times)
plt.hist(time_dict[(1,1)],bins=20)
plt.title('Fullscan MS1 times, mean = {:.3f}'.format(mean_times[(1,1)]))
output_folder = '/home/simon/git/vimms/experimental/data_20200503/timings/'
plt.savefig(os.path.join(output_folder,'full.png'))
# Load a topN for comparison
topn_file = '/home/simon/git/vimms/experimental/data_20200503/TopN_vs_ROI/from_controller_TopN_QCA.mzML'
topn_file_obj = MZMLFile(topn_file)
time_dict,mean_times = extract_timings(topn_file_obj)
plt.hist(time_dict[(1,2)])
plt.title('TopN MS1 times, mean = {:.3f}'.format(mean_times[(1,2)]))
plt.savefig(os.path.join(output_folder,'topn.png'))
# Load a ROI for comparison
roi_file = '/home/simon/git/vimms/experimental/data_20200503/TopN_vs_ROI/from_controller_smart_ROI_QCA.mzML'
roi_file_obj = MZMLFile(roi_file)
time_dict,mean_times = extract_timings(roi_file_obj)
plt.hist(time_dict[(1,2)])
plt.title('ROI MS1 times, mean = {:.3f}'.format(mean_times[(1,2)]))
plt.savefig(os.path.join(output_folder,'roi.png'))
plt.hist(time_dict[(2,2)])
plt.title('MS2 scan time, mean = {:.3f}'.format(mean_times[(2,2)]))
plt.savefig(os.path.join(output_folder,'ms2.png'))
###Output
_____no_output_____
###Markdown
Don't know what's going on with this one....
###Code
machine_file = '/home/simon/git/vimms/experimental/data_20200503/timings/QCB_fullscan.mzML'
machine_file_obj = MZMLFile(machine_file)
time_dict,mean_times = extract_timings(machine_file_obj)
plt.hist(time_dict[(1,1)])
plt.title('Machine MS1 times, mean = {:.3f}'.format(mean_times[(1,1)]))
# plt.savefig(os.path.join(output_folder,'machine.png'))
###Output
Loaded 1848 scans
###Markdown
Testing the ROI v TopN QCB
###Code
root = '/home/simon/git/vimms/experimental/data_20200503/TopN_vs_ROI'
pp_file = os.path.join(root,'from_controller_TopN_QCB_pp.csv')
from mass_spec_utils.data_import.mzmine import load_picked_boxes
boxes = load_picked_boxes(pp_file)
topn_file = os.path.join(root,'from_controller_TopN_QCB.mzML')
topn_file_obj = MZMLFile(topn_file)
roi_file = os.path.join(root,'from_controller_smart_ROI_QCB.mzML')
roi_file_obj = MZMLFile(roi_file)
def summarise(mz_file_object):
n_scans = len(mz_file_object.scans)
n_ms1_scans = len(list(filter(lambda x: x.ms_level == 1,mz_file_object.scans)))
n_ms2_scans = len(list(filter(lambda x: x.ms_level == 2,mz_file_object.scans)))
print("Total scans = {}, MS1 = {}, MS2 = {}".format(n_scans,n_ms1_scans,n_ms2_scans))
print("TopN:")
summarise(topn_file_obj)
print("ROI:")
summarise(roi_file_obj)
from mass_spec_utils.data_import.mzmine import map_boxes_to_scans
topn_s2b,topn_b2s = map_boxes_to_scans(topn_file_obj,boxes,half_isolation_window=0)
roi_s2b,roi_b2s = map_boxes_to_scans(roi_file_obj,boxes,half_isolation_window=0)
print(len(topn_b2s))
print(len(roi_b2s))
###Output
1155
1457
###Markdown
QCA
###Code
pp_file = os.path.join(root,'from_controller_TopN_QCA_pp.csv')
from mass_spec_utils.data_import.mzmine import load_picked_boxes
boxes = load_picked_boxes(pp_file)
topn_file = os.path.join(root,'from_controller_TopN_QCA.mzML')
topn_file_obj = MZMLFile(topn_file)
roi_file = os.path.join(root,'from_controller_smart_ROI_QCA.mzML')
roi_file_obj = MZMLFile(roi_file)
print("TopN:")
summarise(topn_file_obj)
print("ROI:")
summarise(roi_file_obj)
topn_s2b,topn_b2s = map_boxes_to_scans(topn_file_obj,boxes,half_isolation_window=0)
roi_s2b,roi_b2s = map_boxes_to_scans(roi_file_obj,boxes,half_isolation_window=0)
print(len(topn_b2s))
print(len(roi_b2s))
###Output
986
1050
###Markdown
TODO- ~Run optimal with the QCA picked peaks. Where do we get to?~- Run simulator with the QCA TopN as the seed file -- does the performance we see match?
###Code
sys.path.append('/home/simon/git/vimms')
from vimms.MassSpec import IndependentMassSpectrometer
from vimms.Controller import TopNController,RoiController,SmartRoiController
from vimms.Roi import make_roi, RoiToChemicalCreator
from vimms.BOMAS import *
from vimms.Common import *
from vimms.Environment import *
from pathlib import Path
from vimms.PlotsForPaper import get_frag_events
QCB_MZML2CHEMS_DICT = {'min_ms1_intensity': 0,
'mz_tol': 5,
'mz_units':'ppm',
'min_length':1,
'min_intensity':0,
'start_rt':0,
'stop_rt':1560}
ps_frag_QCB = load_obj('/home/simon/git/vimms/experimental/simon_res/QCB/peak_sampler_mz_rt_int_beerqcb_fragmentation.p')
TopN_QCB_dataset = mzml2chems(os.path.join(root,'from_controller_TopN_QCB.mzML'), ps_frag_QCB, QCB_MZML2CHEMS_DICT, n_peaks=None)
TopN_QCA_dataset = mzml2chems(os.path.join(root,'from_controller_TopN_QCA.mzML'), ps_frag_QCB, QCB_MZML2CHEMS_DICT, n_peaks=None)
save_obj(TopN_QCB_dataset, os.path.join(root,'Simulator','TopN_QCB_dataset.mzml'))
save_obj(TopN_QCA_dataset, os.path.join(root,'Simulator','TopN_QCA_dataset.mzml'))
TopN_QCB_dataset = load_obj(os.path.join(root,'Simulator','TopN_QCB_dataset.mzml'))
TopN_QCA_dataset = load_obj(os.path.join(root,'Simulator','TopN_QCA_dataset.mzml'))
min_rt = 0
max_rt = 26*60 # entire run
min_ms1_intensity = 5000
mz_tol = 10
rt_tol = 15
N = 10
# these are derived from real data (see bottom of notebook)
roi_time_dict = {1: 0.71,2:0.20}
topn_time_dict = {1: 0.60,2:0.20}
ionisation_mode = POSITIVE
isolation_width = 1
output_folder = os.path.join(root,'Simulator','Output')
min_roi_intensity = 500
min_roi_length = 3 # still in scans, as to work in seconds, need to pass parameter. But doesn't matter when parameter below is equal to 1!
min_roi_length_for_fragmentation = 1
from vimms.MassSpec import IndependentMassSpectrometer
from vimms.Controller import TopNController,RoiController,SmartRoiController
from vimms.Environment import Environment
set_log_level_warning()
###Output
_____no_output_____
###Markdown
QCB topN
###Code
controller = TopNController(ionisation_mode, N, isolation_width, mz_tol, rt_tol, min_ms1_intensity)
mass_spec = IndependentMassSpectrometer(ionisation_mode, TopN_QCB_dataset,
ps_frag_QCB, add_noise=True,
scan_duration_dict = topn_time_dict)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True)
env.run()
env.write_mzML(output_folder,'qcb_topn.mzml')
###Output
_____no_output_____
###Markdown
QCA TopN
###Code
controller = TopNController(ionisation_mode, N, isolation_width, mz_tol, rt_tol, min_ms1_intensity)
mass_spec = IndependentMassSpectrometer(ionisation_mode, TopN_QCA_dataset,
ps_frag_QCB, add_noise=True,
scan_duration_dict = topn_time_dict)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True)
env.run()
env.write_mzML(output_folder,'qca_topn.mzml')
from vimms.Controller import OptimalTopNController
###Output
_____no_output_____
###Markdown
QCB Optimal
###Code
pp_file = os.path.join(root,'from_controller_TopN_QCB_pp.csv')
boxes = load_picked_boxes(pp_file)
score_method = 'intensity'
controller = OptimalTopNController(ionisation_mode, N, isolation_width, mz_tol, rt_tol,
min_ms1_intensity,boxes,
score_method = score_method)
mass_spec = IndependentMassSpectrometer(ionisation_mode, TopN_QCB_dataset,
ps_frag_QCB, add_noise=True,
scan_duration_dict = topn_time_dict)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True)
env.run()
env.write_mzML(output_folder,'qcb_optimal_{}.mzml'.format(score_method))
###Output
(1560.600s) ms_level=1 N=10 DEW=15: 100%|█████████▉| 1559.9999999999948/1560 [01:21<00:00, 19.16it/s]
###Markdown
QCA Optimal
###Code
pp_file = os.path.join(root,'from_controller_TopN_QCA_pp.csv')
score_method = 'intensity'
boxes = load_picked_boxes(pp_file)
controller = OptimalTopNController(ionisation_mode, N, isolation_width, mz_tol, rt_tol,
min_ms1_intensity,pp_file,
score_method = score_method)
mass_spec = IndependentMassSpectrometer(ionisation_mode, TopN_QCA_dataset,
ps_frag_QCB, add_noise=True,
scan_duration_dict = topn_time_dict)
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True)
env.run()
env.write_mzML(output_folder,'qca_optimal_{}.mzml'.format(score_method))
def evaluate(mzml_file,peak_file):
mzml_file_obj = MZMLFile(mzml_file)
boxes = load_picked_boxes(peak_file)
s2b,b2s = map_boxes_to_scans(mzml_file_obj,boxes,half_isolation_window=0,allow_last_overlap=True)
n_scans = len(mzml_file_obj.scans)
n_ms1_scans = len(list(filter(lambda x: x.ms_level == 1,mzml_file_obj.scans)))
n_ms2_scans = len(list(filter(lambda x: x.ms_level == 2,mzml_file_obj.scans)))
# compute average absolute difference in seconds between ms2 scan and peak apex
errs = []
for b,scans in b2s.items():
rt = b.rt_in_seconds
scan_times = [s.rt_in_seconds for s in scans]
temp_err = [abs(s-rt) for s in scan_times]
errs.append(min(temp_err))
print("Total scans = {}, MS1 = {}, MS2 = {}".format(n_scans,n_ms1_scans,n_ms2_scans))
print("Total boxes: ",len(boxes),"Fragmented: ",len(b2s))
print("Mean absolute error: ",np.mean(errs))
mz2pp = {os.path.join(output_folder,'qca_optimal.mzml'):os.path.join(root,'from_controller_TopN_QCA_pp.csv'),
os.path.join(output_folder,'qcb_optimal.mzml'):os.path.join(root,'from_controller_TopN_QCB_pp.csv'),
os.path.join(output_folder,'qca_topn.mzml'):os.path.join(root,'from_controller_TopN_QCA_pp.csv'),
os.path.join(output_folder,'qcb_topn.mzml'):os.path.join(root,'from_controller_TopN_QCB_pp.csv'),
os.path.join(output_folder,'qcb_optimal_2.mzml'):os.path.join(root,'from_controller_TopN_QCB_pp.csv')}
mz2pp = {os.path.join(output_folder,'qcb_optimal.mzml'):os.path.join(root,'from_controller_TopN_QCB_pp.csv'),
os.path.join(output_folder,'qcb_optimal_urgency.mzml'):os.path.join(root,'from_controller_TopN_QCB_pp.csv'),
os.path.join(output_folder,'qcb_optimal_apex.mzml'):os.path.join(root,'from_controller_TopN_QCB_pp.csv'),
os.path.join(output_folder,'qcb_optimal_random.mzml'):os.path.join(root,'from_controller_TopN_QCB_pp.csv')}
for k,v in mz2pp.items():
print(k.split(os.sep)[-1],v.split(os.sep)[-1])
evaluate(k,v)
print()
print()
###Output
qcb_optimal.mzml from_controller_TopN_QCB_pp.csv
Loaded 4266 scans
Total scans = 4266, MS1 = 1711, MS2 = 2555
Total boxes: 5667 Fragmented: 2560
Mean absolute error: 14.357589947553805
qcb_optimal_urgency.mzml from_controller_TopN_QCB_pp.csv
Loaded 4316 scans
Total scans = 4316, MS1 = 1630, MS2 = 2686
Total boxes: 5667 Fragmented: 2692
Mean absolute error: 17.20212011653345
qcb_optimal_apex.mzml from_controller_TopN_QCB_pp.csv
Loaded 4291 scans
Total scans = 4291, MS1 = 1667, MS2 = 2624
Total boxes: 5667 Fragmented: 2628
Mean absolute error: 13.67822969217118
qcb_optimal_random.mzml from_controller_TopN_QCB_pp.csv
Loaded 4249 scans
Total scans = 4249, MS1 = 1704, MS2 = 2545
Total boxes: 5667 Fragmented: 2550
Mean absolute error: 15.679441694055122
###Markdown
*TODO*- Why more boxes fragmented than scans? Must imply overlapping boxes?- Check this
###Code
boxes = load_picked_boxes(os.path.join(root,'from_controller_TopN_QCB_pp.csv'))
mzml_file_obj = MZMLFile(os.path.join(output_folder,'qcb_optimal.mzml'))
mzml_file_obj = MZMLFile(os.path.join(root,'from_controller_TopN_QCB.mzML'))
s2b,b2s = map_boxes_to_scans(mzml_file_obj,boxes,half_isolation_window=0,allow_last_overlap=True)
multi_boxes = list(filter(lambda x: len(s2b[x])>1,list(s2b.keys())))
mb = multi_boxes[0]
print(mb.rt_in_seconds,mb.precursor_mz)
for box in s2b[mb]:
print(box.mz_range,box.rt_range_in_seconds)
###Output
70.9074114079998 252.0252227783203
[252.02505493164062, 252.0253448486328] [48.64181555299998, 170.694868752]
[252.02508544921875, 252.0253448486328] [30.573936528, 94.2535038889998]
|
02-IntroToNLP/01-POS_Tagging.ipynb | ###Markdown
Part-of-Speech Tagging using NLTKOne task in NLP has been to reliably identify a word's part of speech. This can help us with the ever-present task of identifying content words, but can be used in a variety of analyses. Part-of-speech tagging is a specific instance in the larger category of word tagging, or placing words in pre-determined categories.Today we'll learn how to identify a word's part of speech and think through reasons we may want to do this. Learning Goals:* Understand the intuition behind tagging and information extraction* Use NLTK to tag the part of speech of each word* Count most frequent words based on their part of speech Outline* [Part-of-Speech Tagging](pos)* [Counting words based on their part of speech](counting) Key Terms* *part-of-speech tagging*: * the process of marking up a word in a text as corresponding to a particular part of speech, based on both its definition and its context* *named entity recognition*: * a subtask of information extraction that seeks to locate and classify named entities in text into pre-defined categories such as the names of persons, organizations, locations, expressions of times, quantities, monetary values, percentages, etc* *tree* * data structure made up of nodes or vertices and edges without having any cycle. * *treebank*: * a parsed text corpus that annotates syntactic or semantic sentence structure* *tuple*: * a sequence of immutable Python objects Further ResourcesFor more information on information extraction using NLTK, see chapter 7: http://www.nltk.org/book/ch07.html Part-of-Speech Tagging You may have noticed that stop words are typically short function words. Intuitively, if we could identify the part of speech of a word, we would have another way of identifying content words. NLTK can do that too!NLTK has a function that will tag the part of speech of every token in a text. For this, we will re-create our original tokenized text sentence from the previous tutorial, with the stop words and punctuation intact.NLTK uses the Penn Treebank Project to tag the part-of-speech of the words. The NLTK algoritm is deterministic - it assigns the most common part of speech for each word, as found in the Penn Treebank. You can find a list of all the part-of-speech tags here:https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
###Code
import nltk
from nltk import word_tokenize
sentence = "For me it has to do with the work that gets done at the crossroads of \
digital media and traditional humanistic study. And that happens in two different ways. \
On the one hand, it's bringing the tools and techniques of digital media to bear \
on traditional humanistic questions; on the other, it's also bringing humanistic modes \
of inquiry to bear on digital media."
sentence_tokens = word_tokenize(sentence)
#check we did everything correctly
sentence_tokens
#use the nltk pos function to tag the tokens
tagged_sentence_tokens = nltk.pos_tag(sentence_tokens)
#view tagged sentence
tagged_sentence_tokens
###Output
_____no_output_____
###Markdown
Now comes more complicated code. Stay with me. The above output is a list of *tuples*. A tuple is a sequence of Python objects. In this case, each of these tuples is a sequence of strings. To loop through tuples is intuitively the same as looping through a list, but slightly different syntax. Note that this is not a list of lists, as we saw in our lesson on Pandas. This is a list of tuples.Let's pull out the part-of-speech tag from each tuple above and save that to a list. Notice the order stays exactly the same.
###Code
word_tags = [tag for (word, tag) in tagged_sentence_tokens]
print(word_tags)
###Output
_____no_output_____
###Markdown
Question: What is the difference in syntax for the above code compared to our standard list comprehension code? Counting words based on their part of speech We can count the part-of-speech tags in a similar way we counted words, to output the most frequent types of words in our text. We can also count words based on their part of speech.First, we count the frequency of each part-of-speech tag.
###Code
tagged_frequency = nltk.FreqDist(word_tags)
tagged_frequency.most_common()
###Output
_____no_output_____
###Markdown
This sentence contains a lot of adjectives. So let's first look at the adjectives. Notice the syntax here.
###Code
adjectives = [word for (word,pos) in tagged_sentence_tokens if pos == 'JJ' or pos=='JJR' or pos=='JJS']
#print all of the adjectives
print(adjectives)
###Output
_____no_output_____
###Markdown
Let's do the same for nouns.
###Code
nouns = [word for (word,pos) in tagged_sentence_tokens if pos=='NN' or pos=='NNS']
#print all of the nouns
print(nouns)
###Output
_____no_output_____
###Markdown
And now verbs.
###Code
#verbs = [word for (word,pos) in tagged_sentence_tokens if pos == 'VB' or pos=='VBD' or pos=='VBG' or pos=='VBN' or pos=='VBP' or pos=='VBZ']
verbs = [word for (word,pos) in tagged_sentence_tokens if pos in ['VB', 'VBD','VBG','VBN','VBP','VBZ']]
#print all of the verbs
print(verbs)
##Ex: Print the most frequent nouns, adjective, and verbs in the sentence
######What does this tell us?
######Compare this to what we did earlier with removing stop words.
##Ex: Compare the most frequent part-of-speech used in two of the texts in our data folder
###Output
_____no_output_____ |
train-on-remote-vm/train-on-remote-vm.ipynb | ###Markdown
Train on Remote Virtual MachinesTrain MLflow Projects on remote DSVMs (Data Science Virtual Machines). Table of Contents1. Prerequisites - 1.1 Initialize Tracking Store and Experiment - 1.2 Create and Attach DSVM - 1.3 Configure the Backend Configuration object - 1.4 Modify your Environment Specification3. Submit Run Prerequisites Ensure you have done the following before running this notebook,- Connected to an AML Workspace- Have an existing remote DSVM in that Workspace- Have an MLproject file with an environment specification
###Code
# Prereq Checks
# Workspace check
from azureml.core import Workspace
workspace = Workspace.from_config()
print(workspace.name, workspace.resource_group, workspace.location, workspace.subscription_id, sep = '\n')
# Existing DSVM check
from azureml.core.compute import ComputeTarget
from azureml.core.compute_target import ComputeTargetException
dsvm_name = "dsvm"
try:
cpu_cluster = ComputeTarget(workspace = workspace, name = dsvm_name)
print("Found existing cluster, yay!")
except ComputeTargetException:
print("This compute target is not associated with your workspace!")
###Output
_____no_output_____
###Markdown
Create and Attach a DSVM as a Compute TargetYou can spin up a DSVM in two ways:1. Create a DSVM in your resource group using the following command```az vm create --resource-group --name --image microsoft-dsvm:linux-data-science-vm-ubuntu:linuxdsvmubuntu:latest--admin-username --admin-password --generate-ssh-keys --authentication-type password```2. Go to the [Azure Portal](https://ms.portal.azure.com/home) and in the search bar, type "Data Science Virtual Machine". On the right under "Marketplace", there should be an option to select "Data Science Virtual Machine - Ubuntu 18.04". Select 'Create' and add the required information. Set the region to be in Central US EUAP. Initialize Tracking Store and Experiment Set Tracking URI Set the MLflow tracking URI to point to your Azure ML Workspace. The subsequent logging calls from MLflow APIs will go to Azure ML services and will be tracked under your Workspace.
###Code
from azureml import core
from azureml.core import Workspace
import mlflow
workspace = Workspace.from_config()
mlflow.set_tracking_uri(workspace.get_mlflow_tracking_uri())
###Output
_____no_output_____
###Markdown
Create ExperimentCreate an Mlflow Experiment to organize your runs. It can be set either by passing the name as a **parameter** in the mlflow.projects.run call or by the following,
###Code
experiment_name = "mlflow-example"
mlflow.set_experiment(experiment_name)
###Output
_____no_output_____
###Markdown
Create the Backend Configuration ObjectThe backend configuration object will store necesary information for the integration such as the compute target and whether to use your local managed environment or a system managed environment. The integration will accept "COMPUTE" and "USE_CONDA" as parameters where "COMPUTE" is set to the name of your remote compute cluster and "USE_CONDA" which creates a new environment for the project from the environment configuration file. If "COMPUTE" is present in the object, the project will be automatically submitted to the remote compute and ignore "USE_CONDA". Mlflow accepts a dictionary object or a JSON file.
###Code
# dictionary
backend_config = {"COMPUTE": "dsvm", "USE_CONDA": False}
###Output
_____no_output_____
###Markdown
Modify your Environment specificationAdd the azureml-mlflow package as a pip dependency to your environment configuration file. The project can run without this addition, but key artifacts and metrics will not be logged to your Workspace. Adding it to to the file will look like this,```name: mlflow-examplechannels: - defaults - anaconda - conda-forgedependencies: - python=3.6 - scikit-learn=0.19.1 - pip - pip: - mlflow - azureml-mlflow``` Submit Run
###Code
remote_mlflow_run = mlflow.projects.run(uri=".",
parameters={"alpha":0.3},
backend = "azureml",
backend_config = backend_config)
###Output
_____no_output_____ |
labo/ML0120EN-3.1-Reveiw-LSTM-basics.ipynb | ###Markdown
RECURRENT NETWORKS IN DEEP LEARNING Hello and welcome to this notebook. In this notebook, we will go over concepts of the Long Short-Term Memory (LSTM) model, a refinement of the original Recurrent Neural Network model. By the end of this notebook, you should be able to understand the Long Short-Term Memory model, the benefits and problems it solves, and its inner workings and calculations. RECURRENT NETWORKS IN DEEP LEARNINGObjective for this Notebook 1. Learn Long Short-Term Memory Model 2. Stacked LTSM Table of Contents Introduction Long Short-Term Memory Model LTSM Stacked LTSM IntroductionRecurrent Neural Networks are Deep Learning models with simple structures and a feedback mechanism built-in, or in different words, the output of a layer is added to the next input and fed back to the same layer.The Recurrent Neural Network is a specialized type of Neural Network that solves the issue of **maintaining context for Sequential data** -- such as Weather data, Stocks, Genes, etc. At each iterative step, the processing unit takes in an input and the current state of the network, and produces an output and a new state that is re-fed into the network.Representation of a Recurrent Neural NetworkHowever, this model has some problems. It's very computationally expensive to maintain the state for a large amount of units, even more so over a long amount of time. Additionally, Recurrent Networks are very sensitive to changes in their parameters. As such, they are prone to different problems with their Gradient Descent optimizer -- they either grow exponentially (Exploding Gradient) or drop down to near zero and stabilize (Vanishing Gradient), both problems that greatly harm a model's learning capability.To solve these problems, Hochreiter and Schmidhuber published a paper in 1997 describing a way to keep information over long periods of time and additionally solve the oversensitivity to parameter changes, i.e., make backpropagating through the Recurrent Networks more viable. This proposed method is called Long Short-Term Memory (LSTM).(In this notebook, we will cover only LSTM and its implementation using TensorFlow) Long Short-Term Memory ModelThe Long Short-Term Memory, as it was called, was an abstraction of how computer memory works. It is "bundled" with whatever processing unit is implemented in the Recurrent Network, although outside of its flow, and is responsible for keeping, reading, and outputting information for the model. The way it works is simple: you have a linear unit, which is the information cell itself, surrounded by three logistic gates responsible for maintaining the data. One gate is for inputting data into the information cell, one is for outputting data from the input cell, and the last one is to keep or forget data depending on the needs of the network.Thanks to that, it not only solves the problem of keeping states, because the network can choose to forget data whenever information is not needed, it also solves the gradient problems, since the Logistic Gates have a very nice derivative.Long Short-Term Memory ArchitectureThe Long Short-Term Memory is composed of a linear unit surrounded by three logistic gates. The name for these gates vary from place to place, but the most usual names for them are: the "Input" or "Write" Gate, which handles the writing of data into the information cell the "Output" or "Read" Gate, which handles the sending of data back onto the Recurrent Network the "Keep" or "Forget" Gate, which handles the maintaining and modification of the data stored in the information cellDiagram of the Long Short-Term Memory UnitThe three gates are the centerpiece of the LSTM unit. The gates, when activated by the network, perform their respective functions. For example, the Input Gate will write whatever data it is passed into the information cell, the Output Gate will return whatever data is in the information cell, and the Keep Gate will maintain the data in the information cell. These gates are analog and multiplicative, and as such, can modify the data based on the signal they are sent.For example, an usual flow of operations for the LSTM unit is as such: First off, the Keep Gate has to decide whether to keep or forget the data currently stored in memory. It receives both the input and the state of the Recurrent Network, and passes it through its Sigmoid activation. If $K*t$ has value of 1 means that the LSTM unit should keep the data stored perfectly and if $K_t$ a value of 0 means that it should forget it entirely. Consider $S*{t-1}$ as the incoming (previous) state, $x_t$ as the incoming input, and $W_k$, $B_k$ as the weight and bias for the Keep Gate. Additionally, consider $Old\_{t-1}$ as the data previously in memory. What happens can be summarized by this equation:$$K_t = \sigma(W_k \times \[S\_{t-1}, x_t] + B_k)$$$$Old_t = K_t \times Old\_{t-1}$$ As you can see, $Old\_{t-1}$ was multiplied by value was returned by the Keep Gate($K_t$) -- this value is written in the memory cell.Then, the input and state are passed on to the Input Gate, in which there is another Sigmoid activation applied. Concurrently, the input is processed as normal by whatever processing unit is implemented in the network, and then multiplied by the Sigmoid activation's result $I_t$, much like the Keep Gate. Consider $W_i$ and $B_i$ as the weight and bias for the Input Gate, and $C_t$ the result of the processing of the inputs by the Recurrent Network.$$I_t = \sigma (W_i \times \[S\_{t-1},x_t]+B_i)$$$$New_t = I_t \times C_t$$ $New_t$ is the new data to be input into the memory cell. This is then added to whatever value is still stored in memory.$$Cell_t = Old_t + New_t$$ We now have the candidate data which is to be kept in the memory cell. The conjunction of the Keep and Input gates work in an analog manner, making it so that it is possible to keep part of the old data and add only part of the new data. Consider however, what would happen if the Forget Gate was set to 0 and the Input Gate was set to 1:$$Old_t = 0 \times Old\_{t-1}$$$$New_t = 1 \times C_t$$$$Cell_t = C_t$$ The old data would be totally forgotten and the new data would overwrite it completely.The Output Gate functions in a similar manner. To decide what we should output, we take the input data and state and pass it through a Sigmoid function as usual. The contents of our memory cell, however, are pushed onto a Tanh function to bind them between a value of -1 to 1. Consider $W_o$ and $B_o$ as the weight and bias for the Output Gate. $$O_t = \sigma (W_o \times \[S\_{t-1},x_t] + B_o)$$$$Output_t = O_t \times tanh(Cell_t)$$ And that $Output_t$ is what is output into the Recurrent Network.The Logistic Function plottedAs mentioned many times, all three gates are logistic. The reason for this is because it is very easy to backpropagate through them, and as such, it is possible for the model to learn exactly _how_ it is supposed to use this structure. This is one of the reasons for which LSTM is a very strong structure. Additionally, this solves the gradient problems by being able to manipulate values through the gates themselves -- by passing the inputs and outputs through the gates, we have now a easily derivable function modifying our inputs.In regards to the problem of storing many states over a long period of time, LSTM handles this perfectly by only keeping whatever information is necessary and forgetting it whenever it is not needed anymore. Therefore, LSTMs are a very elegant solution to both problems. InstructionsWe start by installing everything we need for this exercise:
###Code
#!pip install grpcio==1.24.3
!pip install tensorflow==2.2.0-rc0
###Output
Requirement already satisfied: tensorflow==2.2.0-rc0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (2.2.0rc0)
Requirement already satisfied: astunparse==1.6.3 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.6.3)
Requirement already satisfied: h5py<2.11.0,>=2.10.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (2.10.0)
Requirement already satisfied: absl-py>=0.7.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.0.0)
Requirement already satisfied: protobuf>=3.8.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (3.19.1)
Requirement already satisfied: six>=1.12.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.16.0)
Requirement already satisfied: keras-preprocessing>=1.1.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.1.2)
Requirement already satisfied: grpcio>=1.8.6 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.24.3)
Requirement already satisfied: numpy<2.0,>=1.16.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.21.4)
Requirement already satisfied: opt-einsum>=2.3.2 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (3.3.0)
Requirement already satisfied: scipy==1.4.1 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.4.1)
Requirement already satisfied: google-pasta>=0.1.8 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (0.2.0)
Requirement already satisfied: gast==0.3.3 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (0.3.3)
Requirement already satisfied: wheel>=0.26 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (0.37.0)
Requirement already satisfied: tensorboard<2.2.0,>=2.1.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (2.1.1)
Requirement already satisfied: wrapt>=1.11.1 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.13.3)
Requirement already satisfied: tensorflow-estimator<2.2.0,>=2.1.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (2.1.0)
Requirement already satisfied: termcolor>=1.1.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorflow==2.2.0-rc0) (1.1.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (0.4.6)
Requirement already satisfied: google-auth<2,>=1.6.3 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (1.35.0)
Requirement already satisfied: markdown>=2.6.8 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (3.3.6)
Requirement already satisfied: werkzeug>=0.11.15 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (2.0.1)
Requirement already satisfied: setuptools>=41.0.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (59.4.0)
Requirement already satisfied: requests<3,>=2.21.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (2.26.0)
Requirement already satisfied: rsa<5,>=3.1.4 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (4.8)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (4.2.4)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (0.2.8)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (1.3.1)
Requirement already satisfied: importlib-metadata>=4.4 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from markdown>=2.6.8->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (4.8.2)
Requirement already satisfied: certifi>=2017.4.17 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (2021.10.8)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (1.26.7)
Requirement already satisfied: idna<4,>=2.5 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (3.1)
Requirement already satisfied: charset-normalizer~=2.0.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (2.0.8)
Requirement already satisfied: typing-extensions>=3.6.4 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (4.0.1)
Requirement already satisfied: zipp>=0.5 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (3.6.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (0.4.8)
Requirement already satisfied: oauthlib>=3.0.0 in /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.2.0,>=2.1.0->tensorflow==2.2.0-rc0) (3.2.0)
###Markdown
LSTMLets first create a tiny LSTM network sample to understand the architecture of LSTM networks. We need to import the necessary modules for our code. We need numpy and tensorflow, obviously. Additionally, we can import directly the tensorflow\.keras.layers , which includes the function for building RNNs.
###Code
import numpy as np
import tensorflow as tf
if not tf.__version__ == '2.2.0-rc0':
print(tf.__version__)
raise ValueError('please upgrade to TensorFlow 2.2.0-rc0, or restart your Kernel (Kernel->Restart & Clear Output)')
###Output
1.14.0
###Markdown
IMPORTANT! => Please restart the kernel by clicking on "Kernel"->"Restart and Clear Outout" and wait until all output disapears. Then your changes are beeing picked up We want to create a network that has only one LSTM cell. We have to pass 2 elements to LSTM, the prv_output and prv_state, so called, h and c. Therefore, we initialize a state vector, state. Here, state is a tuple with 2 elements, each one is of size \[1 x 4], one for passing prv_output to next time step, and another for passing the prv_state to next time stamp.
###Code
LSTM_CELL_SIZE = 4 # output size (dimension), which is same as hidden size in the cell
state = (tf.zeros([1,LSTM_CELL_SIZE]),)*2
state
lstm = tf.keras.layers.LSTM(LSTM_CELL_SIZE, return_sequences=True, return_state=True)
lstm.states=state
print(lstm.states)
###Output
WARNING:tensorflow:From /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
(<tf.Tensor 'zeros:0' shape=(1, 4) dtype=float32>, <tf.Tensor 'zeros:0' shape=(1, 4) dtype=float32>)
###Markdown
As we can see, the states has 2 parts, the new state c, and also the output h. Lets check the output again: Let define a sample input. In this example, batch_size = 1, and features = 6:
###Code
# Batch size x time steps x features.
sample_input = tf.constant([[3,2,2,2,2,2]],dtype=tf.float32)
batch_size = 1
sentence_max_length = 1
n_features = 6
new_shape = (batch_size, sentence_max_length, n_features)
inputs = tf.constant(np.reshape(sample_input, new_shape), dtype = tf.float32)
###Output
_____no_output_____
###Markdown
Now, we can pass the input to lstm_cell, and check the new state:
###Code
output, final_memory_state, final_carry_state = lstm(inputs)
print('Output : ', tf.shape(output))
print('Memory : ',tf.shape(final_memory_state))
print('Carry state : ',tf.shape(final_carry_state))
###Output
_____no_output_____
###Markdown
Stacked LSTMWhat about if we want to have a RNN with stacked LSTM? For example, a 2-layer LSTM. In this case, the output of the first layer will become the input of the second. Lets create the stacked LSTM cell:
###Code
cells = []
###Output
_____no_output_____
###Markdown
Creating the first layer LTSM cell.
###Code
LSTM_CELL_SIZE_1 = 4 #4 hidden nodes
cell1 = tf.keras.layers.LSTMCell(LSTM_CELL_SIZE_1)
cells.append(cell1)
###Output
_____no_output_____
###Markdown
Creating the second layer LTSM cell.
###Code
LSTM_CELL_SIZE_2 = 5 #5 hidden nodes
cell2 = tf.keras.layers.LSTMCell(LSTM_CELL_SIZE_2)
cells.append(cell2)
###Output
_____no_output_____
###Markdown
To create a multi-layer LTSM we use the tf.keras.layers.StackedRNNCells function, it takes in multiple single layer LTSM cells to create a multilayer stacked LTSM model.
###Code
stacked_lstm = tf.keras.layers.StackedRNNCells(cells)
###Output
_____no_output_____
###Markdown
Now we can create the RNN from stacked_lstm:
###Code
lstm_layer= tf.keras.layers.RNN(stacked_lstm ,return_sequences=True, return_state=True)
###Output
_____no_output_____
###Markdown
Lets say the input sequence length is 3, and the dimensionality of the inputs is 6. The input should be a Tensor of shape: \[batch_size, max_time, dimension], in our case it would be (2, 3, 6)
###Code
#Batch size x time steps x features.
sample_input = [[[1,2,3,4,3,2], [1,2,1,1,1,2],[1,2,2,2,2,2]],[[1,2,3,4,3,2],[3,2,2,1,1,2],[0,0,0,0,3,2]]]
sample_input
batch_size = 2
time_steps = 3
features = 6
new_shape = (batch_size, time_steps, features)
x = tf.constant(np.reshape(sample_input, new_shape), dtype = tf.float32)
###Output
_____no_output_____
###Markdown
we can now send our input to network, and check the output:
###Code
output, final_memory_state, final_carry_state = lstm_layer(x)
print('Output : ', tf.shape(output))
print('Memory : ',tf.shape(final_memory_state))
print('Carry state : ',tf.shape(final_carry_state))
###Output
_____no_output_____ |
examples/PALET - Jan 28 2022.ipynb | ###Markdown
Enrollment Object The "Paletable" object, Enrollment.
###Code
from palet.Enrollment import Enrollment
###Output
_____no_output_____
###Markdown
Enrollment by Year (2020-2021) I want to get an enrollment breakdown
###Code
#api = Enrollment()
api = Enrollment().byMonth().byState()
## This is likely becoming deprecated (i.e. setting runids)
api.runids = [5149,6297]
print(api.sql())
###Output
_____no_output_____
###Markdown
What does the API give me?
###Code
display(api.fetch())
###Output
_____no_output_____
###Markdown
How did it give me these results?
###Code
print(api.sql())
###Output
_____no_output_____
###Markdown
Enrollment by Month (2020-2021) Can I drill down by month?
###Code
display(api.byMonth().fetch())
###Output
_____no_output_____
###Markdown
How does this query differ from my previous one?
###Code
print(api.byMonth().sql())
###Output
_____no_output_____
###Markdown
Enrollment by Gender (Female)Now, I want to see Enrollment, by month and state focusing specifically on female beneficiaries
###Code
display(api.byGender('F').fetch())
###Output
_____no_output_____ |
examples/DateComponents.ipynb | ###Markdown
Load Modules
###Code
import sys
sys.path.append('..')
from datefeatures import DateComponents
import numpy as np
import pandas as pd
from randdate import randdate
from datetime import datetime
from sklearn.pipeline import Pipeline, FeatureUnion
from mlxtend.feature_selection import ColumnSelector
###Output
_____no_output_____
###Markdown
Example 1
###Code
# generate fake dates
X = np.c_[np.array(randdate(10)), np.array(randdate(10))]
# transform date variable to fetures
cmp = DateComponents(year=False, month=True, day=False, hour=False, minute=False, second=False)
cmp.fit(X)
Z = cmp.transform(X)
Z.head()
###Output
_____no_output_____
###Markdown
Example 2
###Code
n_samples = 100000
X = np.c_[np.array(randdate(n_samples)), np.array(randdate(n_samples)), np.array(randdate(n_samples))]
cmp = DateComponents(year=True, month=False, day=False, hour=False, minute=False, second=False)
%time Z = cmp.fit_transform(X)
cmp = DateComponents(year=False, month=False, day=False, hour=True, minute=True, second=False)
%time Z = cmp.fit_transform(X)
cmp = DateComponents(year=True, month=True, day=True, hour=True, minute=True, second=True, microsecond=True)
%time Z = cmp.fit_transform(X)
###Output
CPU times: user 968 ms, sys: 70.8 ms, total: 1.04 s
Wall time: 1.04 s
###Markdown
Example 3
###Code
n_samples = 5
# generate fake dates
X = np.c_[np.array(randdate(n_samples))]
# emulate missing value
X[1,0] = np.nan
###Output
_____no_output_____
###Markdown
Example 3a -- without correction
###Code
cmp = DateComponents(missing=False)
# What will happen?
Z = cmp.fit_transform(X)
Z.dtypes
Z.head()
###Output
_____no_output_____
###Markdown
Example 3b -- with missing value correction
###Code
cmp = DateComponents(missing=True, year=True)
Z = cmp.fit_transform(X)
Z.dtypes
Z.head()
###Output
_____no_output_____
###Markdown
Example 4
###Code
X = np.array(datetime(2016, 1, 1, 23, 59, 58, 12345)).reshape(1, -1)
cmp = DateComponents(
year=False, month=False, day=False,
hour=True, minute=True, second=True, microsecond=True)
Z = cmp.fit_transform(X)
Z
###Output
_____no_output_____
###Markdown
Example 5
###Code
# generate fake dates
n_samples = 5
X = np.c_[np.array(randdate(n_samples))]
# make pipeline
pipe = Pipeline(steps=[
('pre', DateComponents())
])
Z = pipe.fit_transform(X)
Z
###Output
_____no_output_____
###Markdown
Example 6
###Code
# generate fake dates
n_samples = 5
X = pd.DataFrame(data=randdate(n_samples), columns=['this_date'])
X['some_numbers'] = np.random.randn(n_samples)
X
# make pipeline
pipe = Pipeline(steps=[
# process column by column
('col_by_col', FeatureUnion(transformer_list=[
('dates', Pipeline(steps=[
('sel1', ColumnSelector(cols=('this_date'))),
('pre1', DateComponents())
])),
('numbers', ColumnSelector(cols=('some_numbers')))
]))
# do some other stuff ..
])
Z = pipe.fit_transform(X)
Z
###Output
_____no_output_____ |
assignments/assignment3_solutions.ipynb | ###Markdown
Assignment 3Welcome to the third programming assignment for the course. This assignments will help to familiarise you with Boolean function oracles while revisiting the topics discussed in this week's lectures. Submission GuidelinesFor final submission, and to ensure that you have no errors in your solution, please use the 'Restart and Run All' option availble in the Kernel menu at the top of the page. To submit your solution, run the completed notebook and attach the solved notebook (with results visible) as a .ipynb file using the 'Add or Create' option under the 'Your Work' heading on the assignment page in Google Classroom.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, execute
from qiskit.providers.aer import QasmSimulator
from qiskit.visualization import *
from qiskit.quantum_info import *
basis_gates = ['id', 'x', 'y', 'z', 's', 't', 'sdg', 'tdg', 'h', 'p', 'sx' ,'r', 'rx', 'ry', 'rz', 'u', 'u1', 'u2', 'u3', 'cx', 'ccx', 'barrier', 'measure', 'snapshot']
###Output
_____no_output_____
###Markdown
A quantum oracle implementation of the classical OR operationWe've already seen that the Toffoli gate implements the quantum version of the classical AND operation. The first part of this exercise will require you to construct such a quantum implementation for the OR operation.The logical OR operation takes two Boolean inputs and returns 1 as the result if either or both of the inputs are 1. It is often denoted using the $\vee$ symbol (it is also called the disjunction operation). The truth table for the classical OR operation is given below:| $x$ | $y$ | $x\vee y$ ||----- |----- |----------- || 0 | 0 | 0 || 0 | 1 | 1 || 1 | 0 | 1 || 1 | 1 | 1 | De Morgan's lawsFinding a gate that is the direct quantum analogue of the OR operation might prove to be difficult. Luckily, there are a set of two relation in Boolean algebra that can provide a helpful workaround. $$\overline{x\vee y} = \overline{x} \wedge \overline{y}$$This is read as _not ($x$ or $y$) = not $x$ and not $y$_$$\overline{x\wedge y} = \overline{x} \vee \overline{y}$$This is read as _not ($x$ or $y$) = not $x$ and not $y$_ **Problem 1**1. Using the expressions for De Morgan's laws above, construct a Boolean formula for $x \vee y$ consisting only of the logical AND and NOT operations. 2. We have provided the `QuantumCircuit()` for a quantum bit oracle to implement the OR operation. Apply the appropriate gates to this circuit based on the expression calculated in Step 1. Do NOT add a measurementWarning: Please be careful to ensure that the circuit below matches the oracle structure i.e. the input qubit states are not altered after the operation of the oracle. **Solution**1. Using De Morgan's laws, $x \vee y = \overline{ \overline{x} \wedge \overline{y}}$. 2. To achieve this, we have placed an $X$ gate on both inputs $q_0$ and $q_1$, leaving them in the state $\overline{q_0}$ and $\overline{q_1}$ respectively, then used a Toffoli gate to perform then AND operation to get $\overline{q_0} \wedge \overline{q_1}$ on the output qubit $q_2$, and then applied an $X$ gate to the output for the overall NOT operation to get the final output $\overline{\overline{q_0} \wedge \overline{q_1}}$. It is also important to add two $X$ gates to the $\overline{q_0}$ and $\overline{q_1}$ states of the top two qubits respectively to maintain the oracle structure by returning them to their original states $q_0$ and $q_1$.Note that this is not the only possible solution.
###Code
or_oracle = QuantumCircuit(3)
or_oracle.x(range(2))
or_oracle.ccx(0,1,2)
or_oracle.x(range(3))
# Do not change below this line
or_oracle.draw(output='mpl')
or_tt = ['000', '011', '101', '111']
def check_or_oracle(tt_row):
check_qc = QuantumCircuit(3)
for i in range(2):
if (tt_row[i] == '1'):
check_qc.x(i)
check_qc.extend(or_oracle)
check_qc.measure_all()
return (execute(check_qc.reverse_bits(),backend=QasmSimulator(), shots=1).result().get_counts().most_frequent() == tt_row)
try:
assert list(or_oracle.count_ops()) != [], f"Circuit cannot be empty"
assert 'measure' not in or_oracle.count_ops(), f"Please remove measurements"
assert set(or_oracle.count_ops().keys()).difference(basis_gates) == set(), f"Only the following basic gates are allowed: {basis_gates}"
for tt_row in or_tt:
assert check_or_oracle(tt_row), f" Input {tt_row[0:2]}: Your encoding is not correct"
print("Your oracle construction passed all checks")
except AssertionError as e:
print(f'Your code has an error: {e.args[0]}')
except Exception as e:
print(f'This error occured: {e.args[0]}')
###Output
Your oracle construction passed all checks
###Markdown
Linear functions and the Bernstein-Vazirani AlgorithmThe Deutch-Jozsa algorithm allows us to distinguish between constant and balanced Boolean functions. There is an extension to the Deutsch-Jozsa algorithm that allows us to extract some information about a certain other class of functions. This is what we will be exploring in the next segment. An $n$-bit Boolean function $f(x)$ is called linear if it can be written as the bitwise product of a particular $n$-bit binary string $a$ and the function variable $x$ (which is also a binary string of length $n$), i.e., linear functions can be written as $$f(x) = a\cdot x \;(\text{ mod } 2)$$You might recall from the discussion on the Hadamard transform, that for any general $n$-qubit computational basis state, the Hadamard transform has the following effect$$H^{\otimes n}|a\rangle = \frac{1}{2^{n/2}}\sum\limits_{x=0}^{n-1}(-1)^{a\cdot x}|x\rangle$$Due to the self-inverting nature of the Hadamard transformation, we can apply $H^{\otimes n}$ to both sides of the above equation and get (after flipping sides)$$H^{\otimes n} \left( \frac{1}{2^{n/2}}\sum\limits_{x=0}^{n-1}(-1)^{a\cdot x}|x\rangle \right) = |a\rangle$$The term inside the brackets on the left hand side of the equation looks like what we would get if we passed an equal superposition state through a phase oracle for the Boolean function $f(x) = a\cdot x \;(\text{ mod } 2)$. This is depicted in the equation below:$$\frac{1}{2^{n/2}}\sum\limits_{x=0}^{n-1}|x\rangle \xrightarrow{U_f} \frac{1}{2^{n/2}}\sum\limits_{x=0}^{n-1}(-1)^{a\cdot x}|x\rangle$$The Bernstein-Vazirani algorithm uses all the things discussed above. Given an oracle for a function that we know is linear, we can find the binary string $a$ corresponding to the linear function. The steps of the algorithm are shown in the equation below and then described in words.$$|0^{\otimes n}\rangle \xrightarrow{H^{\otimes n}} \frac{1}{2^{n/2}}\sum\limits_{x=0}^{n-1}|x\rangle \xrightarrow{U_f} \frac{1}{2^{n/2}}\sum\limits_{x=0}^{n-1}(-1)^{a\cdot x}|x\rangle \xrightarrow{H^{\otimes n}} |a\rangle$$In the expression above, we've omitted (for readability) the mention of the extra qubit in the $|-\rangle$ state that is required for the oracle output, but it is necessary. **Problem 2**Consider the Boolean function $f(x) = (\overline{x_1} \wedge x_0) \vee (x_1 \wedge \overline{x_0})$. Take it as given that this function is a linear function. We want to find the 2-bit binary string $a$ such that the function. Your objective is to use this expression above to implement the quantum bit oracle for this Boolean function. This is more complex than any expression we have seen so far, so the implementation will be carried out in a few steps. A `QuantumCircuit()` with 3 qubits is provided below.- $q_0$ and $q_1$ are the input qubits for the variables $x_0$ and $x_1$ respectively.- $q_2$ is the output qubit and stores the value of the final Boolean function expression
###Code
bv_oracle = QuantumCircuit(3)
bv_oracle.cx(0,2)
bv_oracle.cx(1,2)
bv_oracle.draw('mpl')
###Output
_____no_output_____
###Markdown
Using the bit oracle provided above, construct a circuit for the Bernstein-Vazirani algorithm.The steps for the algorithm are as follows:1. Start will $(n+1)$ qubits in the $|0\rangle$ state. Here $n=2$. These will serve as input to the oracle. We also need an extra qubit for the oracle output, since we need a phase oracle, add gates to prepare the state $|-\rangle$ in this qubit ($q_2$). 2. Apply an $H$ gate to all the input qubits. 3. Apply the oracle $U_f$ 4. Apply an $H$ gate to all the input qubits. 5. Measure the $n$ input qubits. If the function corresponding to $U_f$ is linear, the final state measured will be the binary string $a$.Astute readers will notice that the steps followed in the Bernstein-Vazirani and the Deutsch-jozsa algorithms are the same. `bv_circ` is a `QuantumCircuit(3,2)` given below. Add necessary operations to the circuit below to realise the steps for the Bernstein-Vazirani algorithm. **Solution**1. The $|-\rangle$ state can be created using an $X$ gate followed by an $H$ gate or an $H$ gate followed by a $Z$ gate. The remaining steps have been solved in the circuit below. Note that this is not a unique oracle given for the Boolean function and using any equivalent oracle will also be correct.
###Code
bv_circ = QuantumCircuit(3,2)
bv_circ.x(2)
bv_circ.h(range(3))
bv_circ.barrier()
# Extend the circuit using bv_oracle
bv_circ.extend(bv_oracle)
bv_circ.barrier()
# Apply the Hadamard transformation on all qubits and then measure q_0 and q_1
bv_circ.h(range(3))
bv_circ.measure([0,1], [0,1])
# Do not remove this line
bv_circ.draw(output='mpl')
try:
assert list(bv_circ.count_ops()) != [], f"Circuit cannot be empty"
assert set(bv_circ.count_ops().keys()).difference(basis_gates) == set(), f"Only the following basic gates are allowed: {basis_gates}"
counts = execute(bv_circ.reverse_bits(), backend=QasmSimulator(), shots=8192).result().get_counts()
assert list(counts.keys()) == ['11'], "Your circuit did not produce the right answer"
print(" Your circuit produced the correct output. Please submit for evaluation.")
except AssertionError as e:
print(f'Your code has an error: {e.args[0]}')
except Exception as e:
print(f'This error occured: {e.args[0]}')
plot_histogram(counts)
###Output
Your circuit produced the correct output. Please submit for evaluation.
|
python/d2l-en/mxnet/chapter_preface/index.ipynb | ###Markdown
PrefaceJust a few years ago, there were no legions of deep learning scientistsdeveloping intelligent products and services at major companies and startups.When we entered the field, machine learning did not command headlines in daily newspapers.Our parents had no idea what machine learning was,let alone why we might prefer it to a career in medicine or law.Machine learning was a blue skies academic disciplinewhose industrial significance was limitedto a narrow set of real-world applications,including speech recognition and computer vision.Moreover, many of these applicationsrequired so much domain knowledgethat they were often regarded as entirely separate areas for which machine learning was one small component.At that time, neural networks---the predecessors of the deep learning methodsthat we focus on in this book---were generally regarded as outmoded.In just the past five years, deep learning has taken the world by surprise,driving rapid progress in such diverse fields as computer vision, natural language processing, automatic speech recognition, reinforcement learning, and biomedical informatics.Moreover, the success of deep learningon so many tasks of practical interesthas even catalyzed developments in theoretical machine learning and statistics.With these advances in hand, we can now build cars that drive themselveswith more autonomy than ever before (and less autonomy than some companies might have you believe),smart reply systems that automatically draft the most mundane emails,helping people dig out from oppressively large inboxes,and software agents that dominate the world's best humansat board games like Go, a feat once thought to be decades away.Already, these tools exert ever-wider impacts on industry and society,changing the way movies are made, diseases are diagnosed,and playing a growing role in basic sciences---from astrophysics to biology. About This BookThis book represents our attempt to make deep learning approachable,teaching you the *concepts*, the *context*, and the *code*. One Medium Combining Code, Math, and HTMLFor any computing technology to reach its full impact,it must be well-understood, well-documented, and supported bymature, well-maintained tools.The key ideas should be clearly distilled,minimizing the onboarding time needing to bring new practitioners up to date.Mature libraries should automate common tasks,and exemplar code should make it easy for practitionersto modify, apply, and extend common applications to suit their needs.Take dynamic web applications as an example.Despite a large number of companies, like Amazon,developing successful database-driven web applications in the 1990s,the potential of this technology to aid creative entrepreneurshas been realized to a far greater degree in the past ten years,owing in part to the development of powerful, well-documented frameworks.Testing the potential of deep learning presents unique challengesbecause any single application brings together various disciplines.Applying deep learning requires simultaneously understanding(i) the motivations for casting a problem in a particular way;(ii) the mathematical form of a given model;(iii) the optimization algorithms for fitting the models to data;(iv) the statistical principles that tell us when we should expect our models to generalize to unseen dataand practical methods for certifying that they have, in fact, generalized;and (v) the engineering techniquesrequired to train models efficiently,navigating the pitfalls of numerical computingand getting the most out of available hardware.Teaching both the critical thinking skills required to formulate problems,the mathematics to solve them,and the software tools to implement those solutions all in one place presents formidable challenges.Our goal in this book is to present a unified resourceto bring would-be practitioners up to speed.When we started this book project,there were no resources that simultaneously(i) were up to date; (ii) covered the full breadthof modern machine learning with substantial technical depth;and (iii) interleaved exposition of the quality one expects from an engaging textbook with the clean runnable codethat one expects to find in hands-on tutorials.We found plenty of code examples forhow to use a given deep learning framework(e.g., how to do basic numerical computing with matrices in TensorFlow)or for implementing particular techniques(e.g., code snippets for LeNet, AlexNet, ResNets, etc.)scattered across various blog posts and GitHub repositories.However, these examples typically focused on*how* to implement a given approach,but left out the discussion of *why* certain algorithmic decisions are made.While some interactive resources have popped up sporadicallyto address a particular topic, e.g., the engaging blog postspublished on the website [Distill](http://distill.pub), or personal blogs,they only covered selected topics in deep learning,and often lacked associated code.On the other hand, while several deep learning textbooks have emerged---e.g., :cite:`Goodfellow.Bengio.Courville.2016`, which offers a comprehensive survey on the basics of deep learning---these resources do not marry the descriptionsto realizations of the concepts in code,sometimes leaving readers clueless as to how to implement them.Moreover, too many resources are hidden behind the paywallsof commercial course providers.We set out to create a resource that could(i) be freely available for everyone;(ii) offer sufficient technical depth to provide a starting point on the pathto actually becoming an applied machine learning scientist;(iii) include runnable code, showing readers*how* to solve problems in practice;(iv) allow for rapid updates, both by usand also by the community at large;and (v) be complemented by a [forum](http://discuss.d2l.ai)for interactive discussion of technical details and to answer questions.These goals were often in conflict.Equations, theorems, and citations are best managed and laid out in LaTeX.Code is best described in Python.And webpages are native in HTML and JavaScript.Furthermore, we want the content to beaccessible both as executable code, as a physical book,as a downloadable PDF, and on the Internet as a website.At present there exist no tools and no workflowperfectly suited to these demands, so we had to assemble our own.We describe our approach in detail in :numref:`sec_how_to_contribute`.We settled on GitHub to share the source and to facilitate community contributions,Jupyter notebooks for mixing code, equations and text,Sphinx as a rendering engine to generate multiple outputs,and Discourse for the forum.While our system is not yet perfect,these choices provide a good compromise among the competing concerns.We believe that this might be the first book publishedusing such an integrated workflow. Learning by DoingMany textbooks present concepts in succession, covering each in exhaustive detail.For example, Chris Bishop's excellent textbook :cite:`Bishop.2006`,teaches each topic so thoroughlythat getting to the chapteron linear regression requires a non-trivial amount of work.While experts love this book precisely for its thoroughness,for true beginners, this property limits its usefulness as an introductory text.In this book, we will teach most concepts *just in time*.In other words, you will learn concepts at the very momentthat they are needed to accomplish some practical end.While we take some time at the outset to teachfundamental preliminaries, like linear algebra and probability,we want you to taste the satisfaction of training your first modelbefore worrying about more esoteric probability distributions.Aside from a few preliminary notebooks that provide a crash coursein the basic mathematical background,each subsequent chapter introduces both a reasonable number of new conceptsand provides single self-contained working examples---using real datasets.This presents an organizational challenge.Some models might logically be grouped together in a single notebook.And some ideas might be best taught by executing several models in succession.On the other hand, there is a big advantage to adheringto a policy of *one working example, one notebook*:This makes it as easy as possible for you tostart your own research projects by leveraging our code.Just copy a notebook and start modifying it.We will interleave the runnable code with background material as needed.In general, we will often err on the side of making toolsavailable before explaining them fully (and we will follow up byexplaining the background later).For instance, we might use *stochastic gradient descent*before fully explaining why it is useful or why it works.This helps to give practitioners the necessaryammunition to solve problems quickly,at the expense of requiring the readerto trust us with some curatorial decisions.This book will teach deep learning concepts from scratch.Sometimes, we want to delve into fine details about the modelsthat would typically be hidden from the userby deep learning frameworks' advanced abstractions.This comes up especially in the basic tutorials,where we want you to understand everythingthat happens in a given layer or optimizer.In these cases, we will often present two versions of the example:one where we implement everything from scratch,relying only on NumPy-like functionalityand automatic differentiation,and another, more practical example,where we write succinct code using the high-level APIs of deep learning frameworks.Once we have taught you how some component works,we can just use the high-level APIs in subsequent tutorials. Content and StructureThe book can be roughly divided into three parts,focusing on preliminaries, deep learning techniques,and advanced topics focused on real systems and applications (:numref:`fig_book_org`).:label:`fig_book_org`* The first part covers basics and preliminaries.:numref:`chap_introduction` offers an introduction to deep learning.Then, in :numref:`chap_preliminaries`,we quickly bring you up to speed on the prerequisites requiredfor hands-on deep learning, such as how to store and manipulate data,and how to apply various numerical operations based on basic concepts from linear algebra, calculus, and probability.:numref:`chap_linear` and :numref:`chap_perceptrons`cover the most basic concepts and techniques in deep learning,including regression and classification;linear models and multilayer perceptrons;and overfitting and regularization.* The next five chapters focus on modern deep learning techniques.:numref:`chap_computation` describes the key computational components of deep learning systemsand lays the groundworkfor our subsequent implementationsof more complex models.Next, :numref:`chap_cnn` and :numref:`chap_modern_cnn`,introduce convolutional neural networks (CNNs), powerful tools that form the backbone of most modern computer vision systems.Similarly, :numref:`chap_rnn` and :numref:`chap_modern_rnn`introduce recurrent neural networks (RNNs), models that exploit sequential (e.g., temporal) structure in data and are commonly usedfor natural language processing and time series prediction.In :numref:`chap_attention`, we introduce a relatively new class of modelsbased on so-called attention mechanismsthat has displaced RNNs as the dominant architecturefor most natural language processing tasks.These sections will bring you up to speed on the most powerful and general toolsthat are widely used by deep learning practitioners.* Part three discusses scalability, efficiency, and applications.First, in :numref:`chap_optimization`,we discuss several common optimization algorithmsused to train deep learning models.The next chapter, :numref:`chap_performance`,examines several key factorsthat influence the computational performance of your deep learning code.In :numref:`chap_cv`,we illustrate major applications of deep learning in computer vision.In :numref:`chap_nlp_pretrain` and :numref:`chap_nlp_app`,we show how to pretrain language representation models and apply them to natural language processing tasks. Code:label:`sec_code`Most sections of this book feature executable code.We believe that some intuitions are best developedvia trial and error,tweaking the code in small ways and observing the results.Ideally, an elegant mathematical theory might tell usprecisely how to tweak our code to achieve a desired result.However, today deep learning practitioners todaymust often tread where no cogent theory can provide firm guidance. Despite our best attempts, formal explanations for the efficacy of various techniques are still lacking,both because the mathematics to characterize these modelscan be so difficult and also because serious inquiry on these topicshas only just recently kicked into high gear.We are hopeful that as the theory of deep learning progresses,future editions of this book can provide insights that eclipsethose presently available.To avoid unnecessary repetition, we encapsulate some of our most frequently imported and referred-to functions and classes in the `d2l` package.To indicate a block of code, such as a function, class, or collection of import statements,that will be subsequently accessed via the `d2l` package, we will mark it with `@save`. We offer a detailed overview of these functions and classes in :numref:`sec_d2l`.The `d2l` package is lightweight and only requiresthe following dependencies:
###Code
#@save
import collections
import hashlib
import math
import os
import random
import re
import shutil
import sys
import tarfile
import time
import zipfile
from collections import defaultdict
import pandas as pd
import requests
from IPython import display
from matplotlib import pyplot as plt
d2l = sys.modules[__name__]
###Output
_____no_output_____
###Markdown
Most of the code in this book is based on Apache MXNet,an open-source framework for deep learningthat is the preferred choice of AWS (Amazon Web Services),as well as many colleges and companies.All of the code in this book has passed tests under the newest MXNet version.However, due to the rapid development of deep learning, some code *in the print edition* may not work properly in future versions of MXNet.We plan to keep the online version up-to-date.In case you encounter any problems,please consult :ref:`chap_installation`to update your code and runtime environment.Here is how we import modules from MXNet.
###Code
#@save
from mxnet import autograd, context, gluon, image, init, np, npx
from mxnet.gluon import nn, rnn
###Output
_____no_output_____ |
Product_recommendation.ipynb | ###Markdown
Amazon Datascraper and Handler
###Code
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import requests
def search_am(phrase):
link="https://www.amazon.in/s?k="
l_end="&ref=nb_sb_noss"
phrase_w= phrase.replace(' ','+')
link_full=link+phrase_w+l_end
#print(link_full)
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 5)
driver.get(link_full)
names_f=[]
names=driver.find_elements_by_tag_name("a")
i=0
for name in names:
className = name.get_attribute('class')
if className=='a-link-normal a-text-normal':
names_f.append(name)
i+=1
links=[]
for i in names_f:
temp= i.get_attribute('href')
links.append(temp)
driver.quit()
return links
def get_element_dets(link):
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 2)
driver.get(link)
title_o= driver.find_elements_by_id("productTitle")
title=title_o[0].text
number_o= driver.find_elements_by_id("acrCustomerReviewText")
try:
popularity=(number_o[0].text)
except:
popularity='0'
rate=driver.find_elements_by_css_selector("#reviewsMedley > div > div.a-fixed-left-grid-col.a-col-left > div.a-section.a-spacing-none.a-spacing-top-mini.cr-widget-ACR > div.a-fixed-left-grid.AverageCustomerReviews.a-spacing-small > div > div.a-fixed-left-grid-col.aok-align-center.a-col-right > div > span > span")
try:
rate_o=(rate[0].text).split(' ')[0]
except:
rate_o='0'
feat_f=[]
tag=[]
value=[]
#features=driver.find_elements_by_css_selector("#feature-bullets > ul > li > span")
#for f in features:
# feat_f.append(f.text)
price=0
try:
tag_o=driver.find_elements_by_tag_name('th')
for name in tag_o:
className = name.get_attribute('class')
if className=='a-color-secondary a-size-base prodDetSectionEntry':
tag.append(name.text)
value_o=driver.find_elements_by_tag_name('td')
for name in value_o:
className = name.get_attribute('class')
if className=='a-size-base':
value.append(name.text)
i=0
while i<len(value):
t=str(tag[i])+':'+str(value[i])
feat_f.append(t)
i+=1
except:
feat_f=[':']
try:
price_o= driver.find_elements_by_id("priceblock_ourprice")
for name in price_o:
className = name.get_attribute('class')
if className=='a-size-medium a-color-price priceBlockBuyingPriceString':
price=(name.text)
break
except:
price=0
#price=price_o.text
feedbacks=driver.find_elements_by_tag_name("a")
feedback_f=[]
for feed in feedbacks:
className = feed.get_attribute('class')
if className=='a-size-base a-link-normal review-title a-color-base review-title-content a-text-bold':
feedback_f.append(feed.text)
driver.quit()
return feedback_f,title,rate_o,popularity,feat_f,price
def caller(phrase):
links=search_am(phrase)
data={}
print(len(links))
for link in links:
data[link]={}
feedback_f,title,rate,popularity,feat_f,price=get_element_dets(link)
data[link]['feedback']=feedback_f
data[link]['title']=title
data[link]['rate']=rate
data[link]['popularity']=popularity
data[link]['features']=feat_f
if isinstance(price, int):
data[link]['price']=price
else:
data[link]['price']=price.split(' ')[1]
#print(len(data))
return data
##Done
###Output
_____no_output_____
###Markdown
Popularity and Rating Based System
###Code
def assign_popularity_rating():
with open('products.json', 'r') as openfile:
data = json.load(openfile)
temp=0
for k in data.keys():
p=int(data[k]['popularity'].split(' ')[0])
r=float(data[k]['rate'])
if p<50:
temp=1
elif p<100:
temp=2
elif p<150:
temp=3
else:
temp=4
score=(temp)
data[k]['Popularity_Score']=score
data[k]['Rating_Score']=r
with open("products_mod.json", "w") as outfile:
json.dump(data, outfile)
## Done
###Output
_____no_output_____
###Markdown
Review Sentiment Based System
###Code
from textblob import TextBlob
def assign_sentiment_rating():
with open('products_mod.json', 'r') as openfile:
data = json.load(openfile)
sm=0
for k in data.keys():
temp=data[k]['feedback']
#print(temp)
#res = json.loads(temp)
z=0
sm=0
for i in temp:
#print(i)
z+=1
t=TextBlob(i).sentiment.polarity
#print(t)
sm+=t
if (z==0):
rating=0
else:
#print(sm
#print(z)
rating=sm/z
data[k]['Review_Score']=rating
with open("products_mod_2.json", "w") as outfile:
json.dump(data, outfile)
## Done
###Output
_____no_output_____
###Markdown
Price Relevance System
###Code
def check_price_relevence():
with open('products_mod_2.json', 'r') as openfile:
data = json.load(openfile)
print("Specify the approx price to tune search")
price=int(input())
print("Specify a margin")
margin=int(input())
for k in data.keys():
data_ref=str(data[k]['price']).replace(',','')
temp=float(data_ref)
if temp<price+margin and temp>price-margin:
rating=1
else:
rating=0
data[k]['Price_relevence_Score']=rating
with open("products_mod_3.json", "w") as outfile:
json.dump(data, outfile)
###Output
_____no_output_____
###Markdown
Relevence Based System
###Code
import pandas as pd
import ast
def form_featureset():
with open('products_mod_3.json', 'r') as openfile:
data = json.load(openfile)
feat=[]
set_c=[]
for k in data.keys():
temp=data[k]['features']
temp2=[]
for i in temp:
tag=i.split(':')[0]
if tag not in feat:
feat.append(tag)
#print(feat)
for k in data.keys():
temp=data[k]['features']
temp2=[-1]*len(feat)
for i in temp:
tag=i.split(':')[0]
#print(tag)
ind= feat.index(tag)
#print(ind)
temp2[ind]= i.split(':')[1]
set_c.append(temp2)
df=pd.DataFrame(set_c,columns=feat)
df.to_csv('product_descriptions.csv',index=False)
return df
## Done
###Output
_____no_output_____
###Markdown
Merging
###Code
def sort_d(data):
tot={}
l=[]
for k in data.keys():
tot[k]=data[k]['Total_score']
#print(tot)
l.append(sorted(tot.items(), reverse=True, key = lambda x : x[1]))
l_f=[]
i=0
#print((l[0])[0][1])
while i<5:
l_f.append(l[0][i][0])
i+=1
return l_f
def tune_search(choice):
with open('products_mod_3.json', 'r') as openfile:
data = json.load(openfile)
for k in data.keys():
price_rel=data[k]['Price_relevence_Score']
review_score=data[k]['Review_Score']
pop_score=data[k]['Popularity_Score']
pop_score_k=pop_score/4
rate_score=data[k]['Rating_Score']
rate_score_k=rate_score/5
if choice==1:
total_score=5*pop_score_k+rate_score_k+review_score+price_rel
if choice==2:
total_score=pop_score_k+5*rate_score_k+review_score+price_rel
if choice==3:
total_score=pop_score_k+rate_score_k+review_score+5*price_rel
if choice==4:
total_score=pop_score_k+rate_score_k+5*review_score+price_rel
else:
total_score=pop_score_k+rate_score_k+review_score+price_rel
data[k]['Total_score']=total_score
#print(data[k]['Total_score'])
links=sort_d(data)
return links
import json
import webbrowser
import time
import datetime
def communicator():
print("Specify the order")
order=input()
print("Any Brand You want to specify? If not say No/no")
brand=input()
print("Price Range? If not say No/no")
price=input()
if brand.lower()!='no':
order_m=order+" by "+brand
else:
order_m=order
if price.lower()!='no':
order_f=order_m+" price "+price
else:
order_f=order_m
data=caller(order_f)
with open("products.json", "w") as outfile:
json.dump(data, outfile)
assign_popularity_rating()
assign_sentiment_rating()
check_price_relevence()
df=form_featureset()
print("Your results are ready...........")
print("product_descriptions.csv has been saved.You can check the company model and features for referral later as per convinience")
print("Please specify how your choices should be sorted? \n 1 for popularity based \n 2 for rating based \n 3 for price relevence based \n 4 for review based \n 5 for overall.")
choice= int(input())
c={1:'Popularity Based',2:'Rating Based',3:'Price Based',4:'Review Based',5:'Overall'}
links=tune_search(choice)
print("Here are your best 5 results")
for link in links:
webbrowser.open(link)
time.sleep(5)
options=[1,2,3,4,5]
options.remove(choice)
print("Here are the other bests")
for i in options:
links=tune_search(i)
print('\n\n')
print(c[i])
print("\n")
for l in links:
print(l)
print('\n')
print('\n')
###Output
_____no_output_____
###Markdown
Caller
###Code
communicator()
###Output
Specify the order
Laptop
Any Brand You want to specify? If not say No/no
Dell
Price Range? If not say No/no
Between 70000 and 80000
21
Specify the approx price to tune search
70000
Specify a margin
10000
Your results are ready...........
product_descriptions.csv has been saved.You can check the company model and features for referral later as per convinience
Please specify how your choices should be sorted?
1 for popularity based
2 for rating based
3 for price relevence based
4 for review based
5 for overall.
5
Here are your best 5 results
Here are the other bests
Popularity Based
https://www.amazon.in/Dell-Inspiron-5370-13-3-inch-Graphics/dp/B07B2W7DCB/ref=sr_1_4?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-4
https://www.amazon.in/Inspiron-5370-13-3-inch-i7-8550U-Graphics/dp/B07B6K4YM6/ref=sr_1_12?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-12
https://www.amazon.in/Inspiron-5593-15-6-inch-i5-1035G1-Microsoft/dp/B08BWV7W7R/ref=sr_1_14?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-14
https://www.amazon.in/Inspiron-5491-Touchscreen-i5-10210U-Integrated/dp/B0842Z6Z7C/ref=sr_1_13?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-13
https://www.amazon.in/gp/slredirect/picassoRedirect.html/ref=pa_sp_atf_aps_sr_pg1_1?ie=UTF8&adId=A0149295HB2TARSCMC1Z&url=%2FInspiron-5390-13-3-inch-i5-8265U-Graphics%2Fdp%2FB089QQLWKK%2Fref%3Dsr_1_1_sspa%3Fdchild%3D1%26keywords%3DLaptop%2Bby%2BDell%2Bprice%2BBetween%2B70000%2Band%2B80000%26qid%3D1595324585%26sr%3D8-1-spons%26psc%3D1&qualifier=1595324585&id=2229676179053351&widgetName=sp_atf
Rating Based
https://www.amazon.in/Dell-Inspiron-5370-13-3-inch-Graphics/dp/B07B2W7DCB/ref=sr_1_4?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-4
https://www.amazon.in/Inspiron-5370-13-3-inch-i7-8550U-Graphics/dp/B07B6K4YM6/ref=sr_1_12?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-12
https://www.amazon.in/Inspiron-5593-15-6-inch-i5-1035G1-Microsoft/dp/B08BWV7W7R/ref=sr_1_14?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-14
https://www.amazon.in/Inspiron-5491-Touchscreen-i5-10210U-Integrated/dp/B0842Z6Z7C/ref=sr_1_13?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-13
https://www.amazon.in/gp/slredirect/picassoRedirect.html/ref=pa_sp_atf_aps_sr_pg1_1?ie=UTF8&adId=A0149295HB2TARSCMC1Z&url=%2FInspiron-5390-13-3-inch-i5-8265U-Graphics%2Fdp%2FB089QQLWKK%2Fref%3Dsr_1_1_sspa%3Fdchild%3D1%26keywords%3DLaptop%2Bby%2BDell%2Bprice%2BBetween%2B70000%2Band%2B80000%26qid%3D1595324585%26sr%3D8-1-spons%26psc%3D1&qualifier=1595324585&id=2229676179053351&widgetName=sp_atf
Price Based
https://www.amazon.in/Dell-Inspiron-5370-13-3-inch-Graphics/dp/B07B2W7DCB/ref=sr_1_4?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-4
https://www.amazon.in/Inspiron-5370-13-3-inch-i7-8550U-Graphics/dp/B07B6K4YM6/ref=sr_1_12?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-12
https://www.amazon.in/Inspiron-5593-15-6-inch-i5-1035G1-Microsoft/dp/B08BWV7W7R/ref=sr_1_14?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-14
https://www.amazon.in/Inspiron-5491-Touchscreen-i5-10210U-Integrated/dp/B0842Z6Z7C/ref=sr_1_13?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-13
https://www.amazon.in/gp/slredirect/picassoRedirect.html/ref=pa_sp_atf_aps_sr_pg1_1?ie=UTF8&adId=A0149295HB2TARSCMC1Z&url=%2FInspiron-5390-13-3-inch-i5-8265U-Graphics%2Fdp%2FB089QQLWKK%2Fref%3Dsr_1_1_sspa%3Fdchild%3D1%26keywords%3DLaptop%2Bby%2BDell%2Bprice%2BBetween%2B70000%2Band%2B80000%26qid%3D1595324585%26sr%3D8-1-spons%26psc%3D1&qualifier=1595324585&id=2229676179053351&widgetName=sp_atf
Review Based
https://www.amazon.in/Inspiron-5370-13-3-inch-i7-8550U-Graphics/dp/B07B6K4YM6/ref=sr_1_12?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-12
https://www.amazon.in/Dell-Inspiron-5370-13-3-inch-Graphics/dp/B07B2W7DCB/ref=sr_1_4?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-4
https://www.amazon.in/Inspiron-5491-Touchscreen-i5-10210U-Integrated/dp/B0842Z6Z7C/ref=sr_1_13?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-13
https://www.amazon.in/Inspiron-5593-15-6-inch-i5-1035G1-Microsoft/dp/B08BWV7W7R/ref=sr_1_14?dchild=1&keywords=Laptop+by+Dell+price+Between+70000+and+80000&qid=1595324585&sr=8-14
https://www.amazon.in/gp/slredirect/picassoRedirect.html/ref=pa_sp_atf_aps_sr_pg1_1?ie=UTF8&adId=A0149295HB2TARSCMC1Z&url=%2FInspiron-5390-13-3-inch-i5-8265U-Graphics%2Fdp%2FB089QQLWKK%2Fref%3Dsr_1_1_sspa%3Fdchild%3D1%26keywords%3DLaptop%2Bby%2BDell%2Bprice%2BBetween%2B70000%2Band%2B80000%26qid%3D1595324585%26sr%3D8-1-spons%26psc%3D1&qualifier=1595324585&id=2229676179053351&widgetName=sp_atf
|
TeamCobra_CliffWalking.ipynb | ###Markdown
Challenge Assignment Cliff Walking with Reinforcement Learning CSCI E-82A>**Make sure** you include your name along with the name of your team and team members in the notebook you submit. **Your name and team name here:** Team Cobra- Zhong Gao- Heng Li- Matt Smith IntroductionIn this challenge you will apply Monte Carlo reinforcement learning algorithms to a classic problem in reinforcement learning, known as the **cliff walking problem**. The cliff walking problem is a type of game. The goal is for the agent to find the highest reward (lowest cost) path from a starting state to the goal. There are a number of versions of the cliff walking problems which have been used as research benchmarks over the years. You can find a short discussion of the cliff walking problem on page 132 of Sutton and Barto, second edition. In the general cliff walking problem the agent starts in one corner of the state-space and must travel to goal, or terminal state, in another corner of the state-space. Between the starting state and goal state there is an area with a **cliff**. If the agent falls off a cliff it is sent back to the starting state. A schematic diagram of the state-space is shown in the diagram below. State-space of cliff-walking problem Problem DescriptionThe agent must learn a policy to navigate from the starting state to the terminal state. The properties this problem are as follows:1. The state-space has two **continuous variables**, x and y.2. The starting state is at $x = 0.0$, $y = 0.0$. 3. The terminal state has two segments: - At $y = 0.0$ is in the range $9.0 \le x \le 10.0$. - At $x = 10.0$ is in the range $0.0 \le y \le 1.0$. 4. The cliff zone is bounded by: - $0.0 \le y \le 1.0$ and - $1.0 \le x \le 9.0$. 5. An agent entering the cliff zone is returned to the starting state.6. The agent moves 1.0 units per time step. 7. The 8 possible **discrete actions** are moves in the following directions: - +x, - +x, +y, - +y - -x, +y, - -x, - -x, -y, - -y, and - +x, -y. 8. The rewards are: - -1 for a time step in the state-space, - -10 for colliding with an edge (barrier) of the state-space, - -100 for falling off the cliff and returning to the starting state, and - +1000 for reaching the terminal or goal state. InstructionsIn this challenge you and your team will do the following. Include commentary on each component of your algorithms. Make sure you answer the questions. Environment Simulator Your reinforcement learning agent cannot contain any information about the environment other that the starting state and the possible actions. Therefore, you must create an environment simulator, with the following input and output:- Input: Arguments of state, the $(x,y)$ tuple, and discrete action- Output: the new state (s'), reward, and if the new state meets the terminal or goal criteria.Make sure you test your simulator functions carefully. The test cases must include, steps with each of the actions, falling off the cliff from each edge, hitting the barriers, and reaching the goal (terminal) edges. Errors in the simulator will make the rest of this challenge difficult. > **Note**: For this problem, coordinate state is represented by a tuple of continuous variables. Make sure that you maintain coordinate state as continuous variables for this problem.
###Code
import math
from math import cos
import numpy as np
import numpy.random as nr
import matplotlib.pyplot as plt
%matplotlib inline
# Variables
n_states = 10**2
n_episodes = 1000
radian = (45 * math.pi)/180 #for diagonal movement
initial_state = tuple((0,0))
state = tuple((0,0))
action_index = {0: tuple((0,1)), 1: tuple((1,1)), 2: tuple((1,0)),
3: tuple((1,-1)), 4: tuple((0,-1)), 5: tuple((-1,-1)),
6:tuple((-1,0)), 7: tuple((-1,-1))}
n_actions = len(action_index)
def sim_walk(state, action):
# Translate action diagonals if necessary
if (abs(action[0]) == abs(action[1])):
action = tuple(np.multiply(action, tuple((cos(radian), cos(radian)))))
# Update position/state
terminal = False
state_prime = tuple(np.add(state, action))
# Check location in grid and terminal state
grid_prime = tuple((math.floor(state_prime[0]), math.floor(state_prime[1]))) # the current state in grid units
reward = -1
# Restart if off cliff
if (off_cliff(state_prime)):
state_prime = tuple((0,0))
reward = -100
# Check if goal is met (before boundary)
if (is_terminal(state_prime)):
state_prime = grid_prime
terminal = True
print("Reached terminal state.")
reward = 1000
# Check if boundary hit
if (off_grid(state_prime)):
state_prime = state
reward = -10
return (state_prime, reward, terminal)
def off_cliff(current_state):
return ((1 <= current_state[0] <= 9) &
((0 <= current_state[1] <= 1)))
def is_terminal(current_state):
return ((current_state[0] > 9) &
(current_state[1] < 1))
def off_grid(current_state):
return ((current_state[0] < 0) |
(current_state[0] >= 10) |
(current_state[1] < 0) |
(current_state[1] >= 10))
def find_grid_state(state):
return tuple((math.floor(state[0]), math.floor(state[1])))
## Test the function
state_list = [state]
for i in range(20):
s_prime, reward, terminal = sim_walk(state, action_index[i % 8])
state = s_prime
state_list.append(s_prime)
print(f"State: {state}, Reward: {reward}, Terminal: {terminal}")
def plot_walk(states):
plt.axis([-0.1,10, -0.1,10])
plt.title("Cliff Walk")
plt.plot(*zip(*states))
plt.show()
plot_walk(state_list)
# magnitude = 1
# def radians(degrees):
# return (degrees * math.pi)/180
# current_state = (0,0)
# random = np.random.random() * 360
# action = tuple((math.sin(radians(random)), math.cos(radians(random))))
# def get_action():
# return tuple((math.sin(radians(random)), math.cos(radians(random))))
# state_list = [current_state]
# for i in range(20):
# s_prime, reward, terminal = sim_walk(current_state, get_action())
# current_state = s_prime
# state_list.append(s_prime)
# print(f"State: {current_state}, Reward: {reward}, Terminal: {terminal}")
# def plot_walk(states):
# plt.axis([-0.1,10, -0.1,10])
# plt.title("Cliff Walk")
# plt.plot(*zip(*states))
# plt.show()
# plot_walk(state_list)
###Output
_____no_output_____
###Markdown
Grid ApproximationThe state-space of the cliff walking problem is continuous. Therefor, you will need to use a **grid approximation** to construct a policy. The policy is specified as the probability of action for each grid cell. For this problem, use a 10x10 grid. > **Note:** While the policy uses a grid approximation, state should be represented as continuous variables. Initial PolicyStart with a uniform initial policy. A uniform policy has an equal probability of taking any of the 8 possible actions for each cell in the grid representation. > **Note:** As has already been stated, the coordinate state representation for this problem is a tuple of coordinate values. However, policy, state-values and action-values are represented with a grid approximation. > **Hint:** You may wish to use a 3-dimensional numpy array to code the policy for this problem. With 8 possible actions, this approach will be easier to work with.
###Code
initial_policy = np.ones((8, 10, 10)) / 8
initial_policy
###Output
_____no_output_____
###Markdown
Monte Carlo State Value Estimation For the initial uniform policy, compute the state values using the Monte Carlo RL algorithm:1. Compute and print the state values for each grid in the representation. Use at least 1,000 episodes. This will take some time to execute. 2. Plot the grid of state values, as an image (e.g. matplotlib [imshow](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.imshow.html)). 3. Compute the Forbenious norm (Euclidean norm) of the state value array with [numpy.linalg.norm](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html). You will use this figure as a basis to compare your improved policy. Study your plot to ensure your state values seem correct. Do these state values seem reasonable given the uniform policy and why? Make sure you pay attention to the state values of the cliff zone. > **Hint:** Careful testing at each stage of your algorithm development will potentially save you considerable time. Test your function(s) to for a single episode to make sure your algorithm converges. Then test for say 10 episodes to ensure the state values update in a reasonable manner at each episode. > **Note:** The Monte Carlo episodes can be executed in parallel for production systems. The Markov chain of each episode is statistically independent.
###Code
def take_action(state, policy, actions = action_index):
grid_state = find_grid_state(state)
action = actions[nr.choice(range(len(actions)), p = policy[:, grid_state[0], grid_state[1]])]
s_prime, reward, is_terminal = sim_walk(state, action)
return (action, s_prime, reward, is_terminal)
print(take_action(initial_state, initial_policy))
def MC_episode(policy, G, n_visits, episode, n_states):
## For each episode we use a list to keep track of states we have visited.
## Once we visit a state we need to accumulate values to get the returns
states_visited = []
## Find the starting state
current_state = tuple((0,0))
current_grid_state = find_grid_state(current_state)
terminal = False
g = 0.0
counter = 0
state_list = [current_grid_state]
while(not terminal):
## Find the next action and reward
action, s_prime, reward, terminal = take_action(current_state, policy)
counter += 1
state_list.append(find_grid_state(s_prime))
## Add the reward to the states visited if this is a first visit
if(current_grid_state not in states_visited):
## Mark that the current state has been visited
states_visited.append(current_grid_state)
## Add the reward to states visited
for state in states_visited:
n_visits[state[0]][state[1]] = n_visits[state[0]][state[1]] + 1.0
G[state[0]][state[1]] = G[state[0]][state[1]] + (reward - G[state[0]][state[1]])/n_visits[state[0]][state[1]]
## Update the current state for next transition
current_state = s_prime
plot_walk(state_list)
print(counter)
return (G, n_visits)
def MC_state_values(policy, n_episodes):
## Create list of states
states = list(range(0,policy.shape[1:][0] * policy.shape[1:][1]))
n_states = len(states)
## An array to hold the accumulated returns as we visit states
G = np.zeros(policy.shape[1:])
## An array to keep track of how many times we visit each state so we can
## compute the mean
n_visits = np.zeros(policy.shape[1:])
## Iterate over the episodes
for i in range(n_episodes):
G, n_visits = MC_episode(policy, G, n_visits, i, n_states)
return(G)
nr.seed(234)
state_values = MC_state_values(initial_policy, n_episodes = 1)
print(state_values.reshape((10,10)))
###Output
Reached terminal state.
###Markdown
Monte Carlo State Policy Improvement Finally, you will perform Monte Carlo RL policy improvement:1. Starting with the uniform policy, compute action-values for each grid in the representation. Use at least 1,000 episodes. 2. Use these action values to find an improved policy.3. To evaluate your updated policy compute the state-values for this policy. 4. Plot the grid of state values for the improved policy, as an image. 5. Compute the Forbenious norm (Euclidean norm) of the state value array. Compare the state value plot for the improved policy to the one for the initial uniform policy. Does the improved state values increase generally as distance to the terminal states decreases? Is this what you expect and why? Compare the norm of the state values with your improved policy to the norm for the uniform policy. Is the increase significant? > **Hint:** Careful testing at each stage of your algorithm development will potentially save you considerable time. Test your function(s) to for a single episode to make sure your algorithm converges. Then test for say 10 episodes to ensure the state values update in a reasonable manner at each episode. > **Note:** You could continue to improve policy using the general policy improvement algorithm (GPI). In the interest of time, you are not required to do so here.
###Code
def print_Q(Q):
Q = pd.DataFrame(Q, columns = ['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW'])
print(Q)
def MC_action_values(policy, Q, n_episodes, inital_state):
n_states = len(policy)
n_actions = len(policy[0])
n_visits = np.zeros((n_states, n_actions))
neighbors = {}
for _ in range(n_episodes):
Q, n_visits = MC_action_value_episode(policy, Q, n_visits, initial_state, n_states, n_actions)
return(Q)
initial_Q = np.zeros((n_states, n_actions))
updated_Q = MC_action_values(initial_policy, initial_Q, n_episodes, initial_state)
print_Q(updated_Q)
initial_copy = deepcopy(initial_policy)
def update_policy(policy, Q, epsilon, action_index = action_index):
keys = list(policy[0].keys())
for state in range(len(policy)):
q = Q[state,:]
max_action_index = np.where(q == max(q))[0]
n_transitions = float(len(q))
n_max_transitions = float(len(max_action_index))
p_max_transitions = (1.0 - epsilon *(n_transitions - n_max_transitions))/(n_max_transitions)
for key in keys:
if(action_index[key] in max_action_index): policy[state][key] = p_max_transitions
else: policy[state][key] = epsilon
return(policy)
improved_policy = update_policy(initial_copy, initial_Q, 0.01)
improved_policy
nr.seed(457)
state_values = MC_state_values(improved_policy, n_episodes = 10000)
print(state_values.reshape((10,10)))
###Output
_____no_output_____ |
Python/scikit-learn/QuantileTransformer.ipynb | ###Markdown
이 노트북의 코드에 대한 설명은 [QuantileTransformer](https://tensorflow.blog/2018/01/14/quantiletransformer/) 글을 참고하세요.
###Code
%load_ext watermark
%watermark -v -p sklearn,numpy,scipy
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.preprocessing import QuantileTransformer
X, y = make_blobs(n_samples=500, centers=2, random_state=4)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='black')
plt.show()
quan = QuantileTransformer(n_quantiles=100)
quan.fit(X)
print(quan.quantiles_.shape)
quan.quantiles_[:10]
X_quan = quan.transform(X)
plt.scatter(X_quan[:, 0], X_quan[:, 1], c=y, edgecolors='black')
plt.show()
quan = QuantileTransformer(output_distribution='normal', n_quantiles=100)
X_quan = quan.fit_transform(X)
plt.scatter(X_quan[:, 0], X_quan[:, 1], c=y, edgecolors='black')
plt.show()
X_quan.mean(axis=0), X_quan.std(axis=0)
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(X)
plt.scatter(X_std[:, 0], X_std[:, 1], c=y, edgecolors='black')
plt.show()
###Output
_____no_output_____ |
Solution/Day_08_Solution_v2.ipynb | ###Markdown
[作業目標]1. [簡答題] 請問 Pandas 套件最主要的貢獻是什麼?2. 根據提供的資料集,印出他們的屬性分別為何?(屬性:shape、size、values、index、columns、dtypes、len) 作業 1. [簡答題] 請問 Pandas 套件最主要的貢獻是什麼?
###Code
將適用於數學的陣列型態,封裝成適合用於資料分析的型態
###Output
_____no_output_____
###Markdown
2. 根據提供的資料集,印出他們的屬性分別為何?(屬性:shape、size、values、index、columns、dtypes、len)
###Code
# 記得先 Import 正確的套件
import numpy as np
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/MachineLearningLiuMing/scikit-learn-primer-guide/master/Data.csv')
df
# 參考解答
###Output
_____no_output_____ |
18-11-22-Deep-Learning-with-PyTorch/03-Convolutional Neural Networks/Part 5 - Convolution visualisation.ipynb | ###Markdown
Convolutional LayerIn this notebook, we visualize four filtered outputs (a.k.a. activation maps) of a convolutional layer. In this example, *we* are defining four filters that are applied to an input image by initializing the **weights** of a convolutional layer, but a trained CNN will learn the values of these weights. Import the image
###Code
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'data/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
###Output
_____no_output_____
###Markdown
Define and visualize the filters
###Code
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
###Output
_____no_output_____
###Markdown
Define a convolutional layer The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll start by defining a:* Convolutional layerInitialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network! `__init__` and `forward`To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the forward behavior of a network that applyies those initialized layers to an input (`x`) in the function `forward`. In PyTorch we convert all inputs into the Tensor datatype, which is similar to a list data type in Python. Below, I define the structure of a class called `Net` that has a convolutional layer that can contain four 3x3 grayscale filters.
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
###Output
Net(
(conv): Conv2d(1, 4, kernel_size=(4, 4), stride=(1, 1), bias=False)
)
###Markdown
Visualize the output of each filterFirst, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.
###Code
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
###Output
_____no_output_____
###Markdown
Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied.
###Code
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
###Output
_____no_output_____
###Markdown
ReLu activationIn this model, we've used an activation function that scales the output of the convolutional layer. We've chose a ReLu function to do this, and this function simply turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`.
###Code
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
###Output
_____no_output_____ |
generators/ship_gen.ipynb | ###Markdown
Image visualization* generator에서 나오는 이미지는 전처리 된 것* shuffle된 상태이므로 load_image(i)의 순서와는 다르다.
###Code
%matplotlib inline
#The line above is necesary to show Matplotlib's plots inside a Jupyter Notebook
import cv2
from matplotlib import pyplot as plt
for i in range(10):
batch_inputs, batch_targets = train_generator[i]
image1 = batch_inputs[0]
#print(np.shape(train_generator[2][1][0][0,:,4]))
image2 = train_generator.load_image(0)
#image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
#image3 =tf.image.per_image_standardization(image2)
fig,ax = plt.subplots(3,figsize=(50,50))
#print(np.shape(image))
ax[0].imshow(montage_rgb(image1))
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image1[..., 0] *= std[0]
image1[..., 1] *= std[1]
image1[..., 2] *= std[2]
image1[..., 0] += mean[0]
image1[..., 1] += mean[1]
image1[..., 2] += mean[2]
image1 *= 255
ax[1].imshow(montage_rgb(image1.astype(int)))
#print(train_generator[2][1][0][0,:,2])
ax[2].imshow(image2)
#ax[2].imshow(image3)
#plt.imshow(image2)
plt.show()
np.any(np.array([1,2,3])==1)
###Output
_____no_output_____
###Markdown
Augmentation
###Code
misc_effect
np.shape([train_generator.load_annotations(0)])
###Output
_____no_output_____
###Markdown
Class 분포 * 'container': 4106, * 'oil tanker': 1579, * 'aircraft carrier': 57, * 'maritime vessels': 10989이미지 patch화 되었으므로 실제보다 약간 더 많음 mAP로 평가되므로 aircraft carrier의 정확도가 중요할것 같다. 어떻게 balancing 할지 고민해야함.1. data selection2. weight loss
###Code
dic = {'0':0,'1':0,'2':0,'3':0}
totsize = []
shipnum = []
for i in range(train_generator.size()):
if i%100==0:
print(i)
totsize += [train_generator.load_annotations(i)['totalsize']]
shipnum += [train_generator.load_annotations(i)['num']]
for label in train_generator.load_annotations(i)['labels']:
dic[str(label)]+=1
print(dic)
import matplotlib.pyplot as plt
print("배 평균갯수 : ",np.array(shipnum).mean())
print("배 총 갯수 : ",np.array(shipnum).sum())
shipnum = np.sort([shipnum])[0][::-1]
plt.hist(shipnum, bins=50)
plt.xlim([1, 10])
plt.show()
for n in range(1,10):
print("배 {}개이상인 이미지 갯수 : ".format(n),len(np.where(np.array(shipnum)>=n)[0]))
_size=[]
for size in totsize:
_size.extend(size)
print("배 평균 크기 : ", np.array(_size).mean())
size_mean = np.array(_size).mean()
_size = np.sort([_size])[0][::-1]
print("평균보다 큰 배의 갯수 : ", len(np.where(_size>size_mean)[0]))
count = 0
for size in totsize:
if len(np.where(size>size_mean)[0])>0 : count +=1
print(count)
o_l = []
for i in range(train_generator.size()):
o_l.append(train_generator.object_len(i))
len(np.array(o_l)>5)
annotations = {'labels': np.empty((0,), dtype=np.int32)}
annotations['labels'] = np.concatenate(
[annotations['labels'],[1,2,3]])
annotations
print(train_generator[2][1][0][0,:,9])
train_generator.load_image(2)
image_input = tf.keras.layers.Input(shape=[64,64,3])
image_input.shape[-1]
mean_image_subtraction(image_input)
image1[:,:,:,0] -= 103.939
image1[:,:,:,1] -= 116.779
image1[:,:,:,2] -= 123.68
print(np.mean(image1))
fig,ax = plt.subplots(1)
ax.imshow(image1[0])
def mean_image_subtraction(images, means=[123.68, 116.78, 103.94]):
'''
image normalization
:param images:
:param means:
:return:
'''
num_channels = image_input.shape[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=-1, num_or_size_splits=num_channels, value=images)
print(channels)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=3, values=channels)
print(np.mean(image1))
print(np.mean(image2))
print(np.mean(image3))
print(train_generator.classes.keys())
my_dic = dict({"0":0,"1":0,"2":0,"3":0})
print(train_generator.size())
for i in range(train_generator.size()):
if i%100==0:
print(i)
for label in train_generator.load_annotations(i)['labels']:
my_dic[str(label)]+=1
my_dic
print(train_generator.load_annotations(0))
print(train_generator.load_annotations(0)['quadrangles'].astype(np.double).dtype)
from utils.compute_overlap import compute_overlap
compute_overlap(train_generator.load_annotations(0)['bboxes'].astype(np.double),train_generator.load_annotations(0)['bboxes'].astype(np.double))
validation_generator.size()
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
tf.test.is_gpu_available(train_generator.load_annotations(0)['bboxes'],train_generator.load_annotations(0)['bboxes'])
from matplotlib.patches import Polygon,Rectangle
import colorsys
import random
import matplotlib.pyplot as plt
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def visualize(image, boxes,figsize=(16, 16),box_type='rect',name=None):
"""
desc : bbox와 함께 이미지를 그린다.
-input-
image_id : 시각화 하기를 원하는 이미지 인덱스 번호
figsize : 이미지 크기
"""
fig, ax = plt.subplots(1, figsize=figsize)
char_boxes=boxes
char_len=len(char_boxes)
colors = random_colors(char_len)
print ("box channel : ", np.shape(boxes)[1])
for i in range(char_len):
color = colors[i]
# Bounding box
if not np.any(char_boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
if box_type == 'rect':
if np.shape(boxes)[1] == 4 and len(np.shape(boxes))==3 : # 4 - vertex
box = char_boxes[i]
y_max = max(box[0, 1], box[1, 1], box[2, 1], box[3, 1])
y_min = min(box[0, 1], box[1, 1], box[2, 1], box[3, 1])
x_max = max(box[0, 0], box[1, 0], box[2, 0], box[3, 0])
x_min = min(box[0, 0], box[1, 0], box[2, 0], box[3, 0])
else : # 2 - vertex
box = char_boxes[i]
y_max = max(box[1], box[3])
y_min = min(box[1], box[3])
x_max = max(box[0], box[2])
x_min = min(box[0], box[2])
width = (x_max-x_min)
height = (y_max-y_min)
print(width,height,x_min,y_min)
p = Rectangle((x_min,y_min), width, height, linewidth=2,
edgecolor=color, facecolor='none')
elif box_type == 'quad':
p = Polygon(char_boxes[i], facecolor="none", edgecolor=color)
else :
raise ("check the box_type")
ax.add_patch(p)
if name is not None :
ax.set_title(name)
ax.imshow(image)
###Output
_____no_output_____
###Markdown
load_image, load_annotations 확인
###Code
image = train_generator.load_image(0)
bboxes = train_generator.load_annotations(0)['bboxes']
print(bboxes[0,1])
visualize(image,bboxes)
###Output
_____no_output_____
###Markdown
annotation 확인 및 시각화
###Code
for i in range(900,910):
image = train_generator.load_image(i)
quadboxes = train_generator.load_annotations(i)['quadrangles']
visualize(image,quadboxes,box_type='quad')
if __name__ == '__main__':
train_generator = ShipGenerator(
'datasets/ship_detection',
'train',
phi=1,
batch_size=1,
detect_ship =True
)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
anchors = train_generator.anchors
batch_inputs, batch_targets = train_generator[0]
image = batch_inputs[0][0]
image[..., 0] *= std[0]
image[..., 1] *= std[1]
image[..., 2] *= std[2]
image[..., 0] += mean[0]
image[..., 1] += mean[1]
image[..., 2] += mean[2]
image *= 255.
regression = batch_targets[0][0]
valid_ids = np.where(regression[:, -1] == 1)[0]
boxes = anchors[valid_ids]
deltas = regression[valid_ids]
class_ids = np.argmax(batch_targets[1][0][valid_ids], axis=-1)
mean_ = [0, 0, 0, 0]
std_ = [0.2, 0.2, 0.2, 0.2]
width = boxes[:, 2] - boxes[:, 0]
height = boxes[:, 3] - boxes[:, 1]
x1 = boxes[:, 0] + (deltas[:, 0] * std_[0] + mean_[0]) * width
y1 = boxes[:, 1] + (deltas[:, 1] * std_[1] + mean_[1]) * height
x2 = boxes[:, 2] + (deltas[:, 2] * std_[2] + mean_[2]) * width
y2 = boxes[:, 3] + (deltas[:, 3] * std_[3] + mean_[3]) * height
for x1_, y1_, x2_, y2_, class_id in zip(x1, y1, x2, y2, class_ids):
x1_, y1_, x2_, y2_ = int(x1_), int(y1_), int(x2_), int(y2_)
cv2.rectangle(image, (x1_, y1_), (x2_, y2_), (0, 255, 0), 2)
class_name = train_generator.labels[class_id]
label = class_name
ret, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.3, 1)
cv2.rectangle(image, (x1_, y2_ - ret[1] - baseline), (x1_ + ret[0], y2_), (255, 255, 255), -1)
cv2.putText(image, label, (x1_, y2_ - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.imshow('image', image.astype(np.uint8)[..., ::-1])
cv2.waitKey(0)
# 36864, 46080, 48384, 48960, 49104
# if first_valid_id < 36864:
# stride = 8
# elif 36864 <= first_valid_id < 46080:
# stride = 16
# elif 46080 <= first_valid_id < 48384:
# stride = 32
# elif 48384 <= first_valid_id < 48960:
# stride = 64
# else:
# stride = 128
pass
###Output
_____no_output_____ |
Section3_6.ipynb | ###Markdown
###Code
! apt update
! apt install openjdk-8-jdk-headless -qq > /dev/null
! wget -q http://archive.apache.org/dist/spark/spark-2.3.1/spark-2.3.1-bin-hadoop2.7.tgz
! tar xf spark-2.3.1-bin-hadoop2.7.tgz
! pip install -q findspark
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-2.3.1-bin-hadoop2.7"
! ls
import findspark
findspark.init()
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
spark
from pyspark.sql import types
schema = types.StructType([
types.StructField('id', types.IntegerType()),
types.StructField("first_name", types.StringType()),
types.StructField("last_name", types.StringType()),
types.StructField("gender", types.StringType()),
types.StructField("City", types.StringType()),
types.StructField("JobTitle", types.StringType()),
types.StructField("Salary", types.StringType()),
types.StructField("Latitude", types.FloatType()),
types.StructField("Longitude", types.FloatType())
])
df = spark.read.csv("original.csv", header=True, schema=schema)
df.dtypes
df.show()
df.registerTempTable("original")
q1 = spark.sql('SELECT * FROM original')
q1.show()
q2 = spark.sql('select concat(first_name, " ", last_name) as full_name from original where gender = "Female"')
q2.show()
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.