markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
---|---|---|---|---|
Extract Results | def extract_results(response):
"""
Function accepts a NOAA query response (JSON) return the results
key values as well as the number of records (for use in validation).
"""
data= response['results']
# for quality control to verify retrieval of all rows
length= len(data)
return data, length
def collator(results):
"""
Functions accepts the results key of an NOAA query response (JSON)
and returns a tidy data set in PANDAS, where each record is an
observation about a day.
"""
df= pd.DataFrame(results)
df= df.drop(['attributes','station'], axis=1)
df= df.pivot(index= 'date',columns= 'datatype', values= 'value').reset_index()
return df
def get_ncdc(start_dt, end_dt, station):
"""
Function accepts a start date (MM-DD-YYY) an end date (MM-DD-YYYY)
and a NOAA station ID. Date limit is 1 year.
Function returns a tidy dataset in a PANDAS DataFrame where
each row represents an observation about a day, a record count
and a query parameters dictionary.
"""
# count for verifying retrieval of all rows
record_count= 0
# initial query
query, query_dict= query_builder(start_dt, end_dt, station)
response= execute_query(query)
# extract results and count
results, length= extract_results(response)
record_count += length
# get offsets for remaining queries
off_d, count= offsetter(response)
# execute remaining queries and operations
for offset in off_d:
query, _= query_builder(start_dt, end_dt, station, off_d[offset])
print(query)
response= execute_query(query)
next_results, next_length= extract_results(response)
record_count += next_length
# concat results lists
results += next_results
assert record_count == count, 'record count != count'
collated_data= collator(results)
return collated_data, record_count, query_dict
test, qc, params = get_ncdc('2014-01-01', '2014-12-31', station= 'GHCND:USW00023174')
test.date.head()
test.date.tail()
test.info()
test[test.date.isnull()]
y1, qc, params = get_ncdc('2014-05-03', '2015-05-02', station= 'GHCND:USW00023174')
y2, qc, params = get_ncdc('2015-05-03', '2016-05-02', station= 'GHCND:USW00023174')
y3, qc, params = get_ncdc('2016-05-03', '2017-05-02', station= 'GHCND:USW00023174')
y1.info()
years= pd.concat([y1, y2, y3])
years.date.head()
years.date.tail()
years.to_csv('LAX_3years.csv', index= False) | NOAA_sandbox.ipynb | baumanab/noaa_requests | gpl-3.0 |
CSV Generator | def gen_csv(df, query_dict):
"""
Arguments: PANDAS DataFrame, a query parameters dictionary
Returns: A CSV of the df with dropped index and named by dict params
"""
# extract params
station= query_dict['stationid']
start= query_dict['startdate']
end= query_dict['enddate']
# using os.path in case of future expansion to other directories
path= os.path.join(station + '_' + start + '_' + end + '.' + 'csv')
# remove problem characters (will add more in future)
exclude_chars= ':'
path= path.replace(exclude_chars, "_")
# export to csv
my_csv= df.to_csv(path, index= False)
return my_csv, path
stuff, path= gen_csv(test, query_dict)
path
ls *csv
#!/usr/bin/env python
# coding: utf-8
"""Python code for querying NOAA daily summary weather and returnig a CSV per year
for a specfic station. Code is intended to be executed from CLI."""
import sys
# set path to tools library and import
sys.path.append(r'noaa_weather_tools')
import noaa_weather_tools
NOAA_Token_Here= 'enter token as string'
print("Check dt format('DD-MM-YYYY', and whether dates span <= 1 year from a current or past date")
print("If dates exceed one year, NCDC query returns a null object")
print("Need a token take a token, have a token, keep it to yourself @ https://www.ncdc.noaa.gov/cdo-web/token")
print('start_dt: {}\n end_dt: {}'.format(sys.argv[1], sys.argv[2]))
def noaa_dailysum_weather_processor(start_dt, end_dt, station):
"""Function accepts a station ID, and beginning/end datetime as strings with date format as
'MM-DD-YYYY' which span <= 1 year from a current or past date, passing them to the query_builder function.
Function creates a .csv file of NOAA (NCDC) Daily Summary data for a specific station."""
print(15 * '.' + "reticulating splines" + 5* '.' + "getting records")
df, record_count, query_parameters= noaa_weather_tools.get_ncdc(start_dt, end_dt, station)
print(15* '.' + "exporting to csv")
my_csv, my_path= noaa_weather_tools.gen_csv(df, query_parameters)
print("spines reticulated")
return my_csv
noaa_dailysum_weather_processor('2014-05-03', '2015-05-02', station= 'GHCND:USW00023174')
ls *csv | NOAA_sandbox.ipynb | baumanab/noaa_requests | gpl-3.0 |
Not surprisingly, solving the example gives a very accurate result: | x = generate_variables('x')[0]
sdp = SdpRelaxation([x])
sdp.get_relaxation(1, objective=x**2)
sdp.solve()
print(sdp.primal, sdp.dual) | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Notice that even in this formulation, there is an implicit constraint on a moment: the top left element of the moment matrix is 1. Given a representing measure, this means that $\int_\mathbf{K} \mathrm{d}\mu=1$. It is actually because of this that a $\lambda$ dual variable appears in the dual formulation:
$$max_{\lambda, \sigma_0} \lambda$$
such that
$$2x^2 - \lambda = \sigma_0\
\sigma_0\in \Sigma{[x]}, \mathrm{deg}\sigma_0\leq 2.$$
In fact, we can move $\lambda$ to the right-hand side, where the sum-of-squares (SOS) decomposition is, $\lambda$ being a trivial SOS multiplied by the constraint $\int_\mathbf{K} \mathrm{d}\mu$, that is, by 1.
We normally think of adding some $g_i(x)$ polynomial constraints that define a semialgebraic set, and then constructing matching localizing matrices. We can, however, impose more constraints on the moments. For instance, we can add a constraint that $\int_\mathbf{K} x\mathrm{d}\mu = 1$. All of these constraints will have a constant instead of an SOS polynomial in the dual. To ensure the moments are not substituted out while generating the SDP, we enter them as pairs of moment inequalities. Solving this problem gives the correct result again: | moments = [x-1, 1-x]
sdp = SdpRelaxation([x])
sdp.get_relaxation(1, objective=x**2, momentinequalities=moments)
sdp.solve()
print(sdp.primal, sdp.dual) | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
The dual changed, slightly. Let $\gamma_\beta=\int_\mathbf{K} x^\beta\mathrm{d}\mu$ for $\beta=0, 1$. Then the dual reads as
$$max_{\lambda_\beta, \sigma_0} \sum_{\beta=0}^1\lambda_\beta \gamma_\beta$$
such that
$$2x^2 - \sum_{\beta=0}^1\lambda_\beta x^\beta = \sigma_0\
\sigma_0\in \Sigma{[x]}, \mathrm{deg}\sigma_0\leq 2.$$
Indeed, if we extract the coefficients, we will see that $x$ gets a $\lambda_1=2$ (note that equalities are replaced by pairs of inequalities): | coeffs = [-sdp.extract_dual_value(0, range(1))]
coeffs += [sdp.y_mat[2*i+1][0][0] - sdp.y_mat[2*i+2][0][0]
for i in range(len(moments)//2)]
sigma_i = sdp.get_sos_decomposition()
print(coeffs, [sdp.dual, sigma_i[1]-sigma_i[2]]) | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Moment constraints play a crucial role in the joint+marginal approach of the SDP relaxation of polynomial optimization problems, and hence also indirectly in the bilevel polynomial optimization problems.
Joint+marginal approach
In a parametric polynomial optimization problem, we can separate two sets of variables, and one set acts as a parameter to the problem. More formally, we would like to find the following function:
$$ J(x) = \inf_{y\in\mathbb{R}^m}{f(x,y): h_j(y)\geq 0, j=1,\ldots,r},$$
where $x\in\mathbf{X}={x\in \mathbb{R}^n: h_k(x)\geq 0, k=r+1,\ldots,t}$. This can be relaxed as an SDP, and we can extract an approximation $J_k(x)$ at level-$k$ from the dual solution. The primal form reads as
$$ \mathrm{inf}z L_z(f)$$
such that
$$ M_k(z)\succeq 0,\
M{k-v_j}(h_j z)\succeq 0, j=1,\ldots,t\
L_z(x^\beta) = \gamma_\beta, \forall\beta\in\mathbb{N}_k^n.$$
Notice that the localizing matrices also address the polynomial constraints that define the semialgebraic set $\mathbf{X}$. If the positivity constraints are fulfilled, then we have a finite Borel representing measure $\mu$ on $\mathbf{K}={h_j(y)\geq 0, j=1,\ldots,r}$ such that $z_{\alpha\beta}=\int_\mathbf{K} x^\alpha y^\beta\mathrm{d}\mu$.
The part that is different from the regular Lasserre hierachy is the last line, where $\gamma_\beta=\int_\mathbf{X} x^\beta\mathrm{d}\varphi(x)$. This establishes a connection between the moments of $x$ on $\mathbf{K}$ in measure $\mu$ and the moments of $x$ on $\mathbf{X}$ in measure $\varphi$. This $\varphi$ measure is a Borel probability measure on $\mathbf{X}$ with a positive density with respect to the Lebesgue measure on $\mathbb{R}^n$. In other words, the marginal of one measure must match the other on $\mathbf{X}$.
The dual of the primal form of the SDP with these moment constraints is
$$ \mathrm{sup}{p, \sigma_i} \int\mathbf{X} p \mathrm{d}\varphi$$
such that
$$ f - p = \sigma_0 + \sum_{j=1}^t \sigma_j h_j\
p\in\mathbb{R}[x], \sigma_j\in\Sigma[x, y], j=0,\ldots,t\
\mathrm{deg} p\leq 2k, \mathrm{deg} \sigma_0\leq 2k, \mathrm{deg}\sigma_j h_j \leq 2k, j=1,\ldots, t.
$$
The polynomial $p=\sum_{\beta=0}^{2k} \lambda_\beta x^\beta$ is the approximation $J_k(x)$. Below we reproduce the three examples of the paper.
Example 1
Let $\mathbf{X}=[0,1]$, $\mathbf{K}={(x,y): 1-x^2-y^2\geq 0; x,y\in\mathbf{X}}$, and $f(x,y)= - xy^2$. We know that $J(x) = -1(1-x^2)x$. First we declare $J(x)$, a helper function to define a polynomial, and we set up the symbolic variables $x$ and $y$. | def J(x):
return -(1-x**2)*x
def Jk(x, coeffs):
return sum(ci*x**i for i, ci in enumerate(coeffs))
x = generate_variables('x')[0]
y = generate_variables('y')[0] | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Next, we define the level of the relaxation and the moment constraints: | level = 4
gamma = [integrate(x**i, (x, 0, 1)) for i in range(1, 2*level+1)]
marginals = flatten([[x**i-N(gamma[i-1]), N(gamma[i-1])-x**i] for i in range(1, 2*level+1)]) | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Finally we define the objective function and the constraints that define the semialgebraic sets, and we generate and solve the relaxation. | f = -x*y**2
inequalities = [1.0-x**2-y**2, 1-x, x, 1-y, y]
sdp = SdpRelaxation([x, y], verbose=0)
sdp.get_relaxation(level, objective=f, momentinequalities=marginals,
inequalities=inequalities)
sdp.solve()
print(sdp.primal, sdp.dual, sdp.status)
coeffs = [sdp.extract_dual_value(0, range(len(inequalities)+1))]
coeffs += [sdp.y_mat[len(inequalities)+1+2*i][0][0] - sdp.y_mat[len(inequalities)+1+2*i+1][0][0]
for i in range(len(marginals)//2)] | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
To check the correctness of the approximation, we plot the optimal and the approximated functions over the domain. | x_domain = [i/100. for i in range(100)]
plt.plot(x_domain, [J(xi) for xi in x_domain], linewidth=2.5)
plt.plot(x_domain, [Jk(xi, coeffs) for xi in x_domain], linewidth=2.5)
plt.show() | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Example 2
The set $\mathbf{X}=[0,1]$ remains the same. Let $\mathbf{K}={(x,y): 1-y_1^2-y_2^2\geq 0}$, and $f(x,y) = xy_1 + (1-x)y_2$. Now the optimal $J(x)$ will be $-\sqrt{x^2+(1-x)^2}$. | def J(x):
return -sqrt(x**2+(1-x)**2)
x = generate_variables('x')[0]
y = generate_variables('y', 2)
f = x*y[0] + (1-x)*y[1]
gamma = [integrate(x**i, (x, 0, 1)) for i in range(1, 2*level+1)]
marginals = flatten([[x**i-N(gamma[i-1]), N(gamma[i-1])-x**i] for i in range(1, 2*level+1)])
inequalities = [1-y[0]**2-y[1]**2, x, 1-x]
sdp = SdpRelaxation(flatten([x, y]))
sdp.get_relaxation(level, objective=f, momentinequalities=marginals,
inequalities=inequalities)
sdp.solve()
print(sdp.primal, sdp.dual, sdp.status)
coeffs = [sdp.extract_dual_value(0, range(len(inequalities)+1))]
coeffs += [sdp.y_mat[len(inequalities)+1+2*i][0][0] - sdp.y_mat[len(inequalities)+1+2*i+1][0][0]
for i in range(len(marginals)//2)]
plt.plot(x_domain, [J(xi) for xi in x_domain], linewidth=2.5)
plt.plot(x_domain, [Jk(xi, coeffs) for xi in x_domain], linewidth=2.5)
plt.show() | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Example 3
Note that this is Example 4 in the paper. The set $\mathbf{X}=[0,1]$ remains the same, whereas $\mathbf{K}={(x,y): xy_1^2+y_2^2-x= 0, y_1^2+xy_2^2-x= 0}$, and $f(x,y) = (1-2x)(y_1+y_2)$. The optimal $J(x)$ is $-2|1-2x|\sqrt{x/(1+x)}$. We enter the equalities as pairs of inequalities. | def J(x):
return -2*abs(1-2*x)*sqrt(x/(1+x))
x = generate_variables('x')[0]
y = generate_variables('y', 2)
f = (1-2*x)*(y[0] + y[1])
gamma = [integrate(x**i, (x, 0, 1)) for i in range(1, 2*level+1)]
marginals = flatten([[x**i-N(gamma[i-1]), N(gamma[i-1])-x**i] for i in range(1, 2*level+1)])
inequalities = [x*y[0]**2 + y[1]**2 - x, - x*y[0]**2 - y[1]**2 + x,
y[0]**2 + x*y[1]**2 - x, - y[0]**2 - x*y[1]**2 + x,
1-x, x]
sdp = SdpRelaxation(flatten([x, y]))
sdp.get_relaxation(level, objective=f, momentinequalities=marginals,
inequalities=inequalities)
sdp.solve(solver="mosek")
print(sdp.primal, sdp.dual, sdp.status)
coeffs = [sdp.extract_dual_value(0, range(len(inequalities)+1))]
coeffs += [sdp.y_mat[len(inequalities)+1+2*i][0][0] - sdp.y_mat[len(inequalities)+1+2*i+1][0][0]
for i in range(len(marginals)//2)]
plt.plot(x_domain, [J(xi) for xi in x_domain], linewidth=2.5)
plt.plot(x_domain, [Jk(xi, coeffs) for xi in x_domain], linewidth=2.5)
plt.show() | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Bilevel problem of nonconvex lower level
We define the bilevel problem as follows:
$$ \min_{x\in\mathbb{R}^n, y\in\mathbb{R}^m}f(x,y)$$
such that
$$g_i(x, y) \geq 0, i=1,\ldots,s,\
y\in Y(x)=\mathrm{argmin}_{w\in\mathbb{R}^m}{G(x,w): h_j(w)\geq 0, j=1,...,r}.$$
The more interesting case is when the when the lower level problem's objective function $G(x,y)$ is nonconvex. We consider the $\epsilon$-approximation of this case:
$$ \min_{x\in\mathbb{R}^n, y\in\mathbb{R}^m}f(x,y)$$
such that
$$g_i(x, y) \geq 0, i=1,\ldots,s,\
h_j(y) \geq 0, j=1,\dots, r,\
G(x,y) - \mathrm{min}_{w\in\mathbb{R}^m}{G(x,w): h_j(w)\geq 0, j=1,...,r}\leq \epsilon.$$
This approximation will give an increasing lower bound) on the original problem. The min function on the right of $G(x,y)$ is essentially a parametric polynomial optimization problem, that is, our task is to find $J(x)$. We have to ensure that the parameter set is compact, so we add a set of constraints on the coordinates of $x$: ${M^2-x_l^2\geq 0, l=1,\ldots,n}$ for some $M>0$.
The idea is that we relax this as an SDP at some level $k$ and fixed $\epsilon$ to obtain the following single-level polynomial optimization problem:
$$ \min_{x\in\mathbb{R}^n, y\in\mathbb{R}^m}f(x,y)$$
such that
$$g_i(x, y) \geq 0, i=1,\ldots,s,\
h_j(y) \geq 0, j=1,\dots, r,\
G(x,y) - J_k(x)\leq \epsilon.$$
Then we relax this is an another SDP at level $k$.
Consider a test problem defined as follows:
$$ \min_{(x,y)\in\mathbb{R}^2} x+y$$
such that
$$x\in[-1,1], \
y\in \mathrm{argmin}_{w\in\mathbb{R}^m}{\frac{xy^2}{2}-\frac{y^3}{3}, y\in[-1,1]}$$.
This is clearly a bilevel problem. We set up the necessary variables and constraints, requesting a level-3 relaxation, and also fixing $\epsilon$ and a choice of $M$. | x = generate_variables('x')[0]
y = generate_variables('y')[0]
f = x + y
g = [x <= 1.0, x >= -1.0]
G = x*y**2/2.0 - y**3/3.0
h = [y <= 1.0, y >= -1.0]
epsilon = 0.001
M = 1.0
level = 3 | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
We define the relaxation of the parametric polynomial optimization problem that returns an approximation of $J(x)$ from the dual: | def lower_level(k, G, h, M):
gamma = [integrate(x**i, (x, -M, M))/(2*M) for i in range(1, 2*k+1)]
marginals = flatten([[x**i-N(gamma[i-1]), N(gamma[i-1])-x**i] for i in range(1, 2*k+1)])
inequalities = h + [x**2 <= M**2]
lowRelaxation = SdpRelaxation([x, y])
lowRelaxation.get_relaxation(k, objective=G,
momentinequalities=marginals,
inequalities=inequalities)
lowRelaxation.solve()
print("Low-level:", lowRelaxation.primal, lowRelaxation.dual, lowRelaxation.status)
coeffs = []
for i in range(len(marginals)//2):
coeffs.append(lowRelaxation.y_mat[len(inequalities)+1+2*i][0][0] -
lowRelaxation.y_mat[len(inequalities)+1+2*i+1][0][0])
blocks = [i for i in range(len(inequalities)+1)]
constant = lowRelaxation.extract_dual_value(0, blocks)
return constant + sum(ci*x**(i+1) for i, ci in enumerate(coeffs)) | Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Finally, we put it all together: | Jk = lower_level(level, G, h, M)
inequalities = g + h + [G - Jk <= epsilon]
highRelaxation = SdpRelaxation([x, y], verbose=0)
highRelaxation.get_relaxation(level, objective=f,
inequalities=inequalities)
highRelaxation.solve()
print("High-level:", highRelaxation.primal, highRelaxation.status)
print("Optimal x and y:", highRelaxation[x], highRelaxation[y])
| Parameteric and Bilevel Polynomial Optimization Problems.ipynb | peterwittek/ipython-notebooks | gpl-3.0 |
Next, import relevent modules | import LMIPy as lmi
import os
import json
import shutil
from pprint import pprint
from datetime import datetime
from tqdm import tqdm | docs/Update_GFW_Layers_Vault.ipynb | Vizzuality/gfw | mit |
First, pull the gfw repo and check that the following path correctly finds the data/layers folder, inside which, you should find a production and staging folder. | envs = ['staging', 'production']
path = './backup/configs'
# Create directory and archive previous datasets
with open(path + '/metadata.json') as f:
date = json.load(f)[0]['updatedAt']
shutil.make_archive(f'./backup/archived/archive_{date}', 'zip', path)
# Check correct folders are found
if not all([folder in os.listdir(path) for folder in envs]):
print(f'Boo! Incorrect path: {path}')
else:
print('Good to go!') | docs/Update_GFW_Layers_Vault.ipynb | Vizzuality/gfw | mit |
Run the following to save, build .json files and log changes.
Update record | %%time
for env in envs:
# Get all old ids
old_ids = [file.split('.json')[0] for file in os.listdir(path + f'/{env}') if '_metadata' not in file]
old_datasets = []
files = os.listdir(path + f'/{env}')
# Extract all old datasets
for file in files:
if '_metadata' not in file:
with open(path + f'/{env}/{file}') as f:
old_datasets.append(json.load(f))
# Now pull all current gfw datasets and save
col = lmi.Collection(app=['gfw'], env=env)
col.save(path + f'/{env}')
# Get all new ids
new_ids = [file.split('.json')[0] for file in os.listdir(path + f'/{env}') if '_metadata' not in file]
# See which are new, and which have been removed
added = list(set(new_ids) - set(old_ids))
removed = list(set(old_ids) - set(new_ids))
changed = []
# COmpare old and new, logging those that have changed
for old_dataset in old_datasets:
ds_id = old_dataset['id']
old_ids.append(ds_id)
with open(path + f'/{env}/{ds_id}.json') as f:
new_dataset = json.load(f)
if old_dataset != new_dataset:
changed.append(ds_id)
# Create metadata json
with open(path + f'/{env}/_metadata.json', 'w') as f:
meta = {
'updatedAt': datetime.today().strftime('%Y-%m-%d@%Hh-%Mm-%Ss'),
'env': env,
'differences': {
'changed': changed,
'added': added,
'removed': removed
}
}
# And save it too!
json.dump(meta,f)
print('Done!')
# Generate rich metadata
metadata = []
for env in tqdm(envs):
with open(path + f'/{env}/_metadata.json') as f:
metadata.append(json.load(f))
for env in tqdm(metadata):
for change_type, ds_list in env['differences'].items():
tmp = []
for dataset in ds_list:
# generate Dataset entity to get name etc...
print(dataset)
tmp.append(str(lmi.Dataset(dataset)))
env['differences'][change_type] = tmp
with open(path + f'/metadata.json', 'w') as f:
# And save it too!
json.dump(metadata,f)
pprint(metadata) | docs/Update_GFW_Layers_Vault.ipynb | Vizzuality/gfw | mit |
Load the head observations
The first step in time series analysis is to load a time series of head observations. The time series needs to be stored as a pandas.Series object where the index is the date (and time, if desired). pandas provides many options to load time series data, depending on the format of the file that contains the time series. In this example, measured heads are stored in the csv file head_nb1.csv.
The heads are read from a csv file with the read_csv function of pandas and are then squeezed to create a pandas Series object. To check if you have the correct data type, use the type command as shown below. | ho = pd.read_csv('../data/head_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True)
print('The data type of the oseries is:', type(ho)) | examples/notebooks/02_fix_parameters.ipynb | pastas/pasta | mit |
The variable ho is now a pandas Series object. To see the first five lines, type ho.head(). | ho.head() | examples/notebooks/02_fix_parameters.ipynb | pastas/pasta | mit |
The series can be plotted as follows | ho.plot(style='.', figsize=(12, 4))
plt.ylabel('Head [m]');
plt.xlabel('Time [years]'); | examples/notebooks/02_fix_parameters.ipynb | pastas/pasta | mit |
Load the stresses
The head variation shown above is believed to be caused by two stresses: rainfall and evaporation. Measured rainfall is stored in the file rain_nb1.csv and measured potential evaporation is stored in the file evap_nb1.csv.
The rainfall and potential evaporation are loaded and plotted. | rain = pd.read_csv('../data/rain_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True)
print('The data type of the rain series is:', type(rain))
evap = pd.read_csv('../data/evap_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True)
print('The data type of the evap series is', type(evap))
plt.figure(figsize=(12, 4))
rain.plot(label='rain')
evap.plot(label='evap')
plt.xlabel('Time [years]')
plt.ylabel('Rainfall/Evaporation (m/d)')
plt.legend(loc='best'); | examples/notebooks/02_fix_parameters.ipynb | pastas/pasta | mit |
Recharge
As a first simple model, the recharge is approximated as the measured rainfall minus the measured potential evaporation. | recharge = rain - evap
plt.figure(figsize=(12, 4))
recharge.plot()
plt.xlabel('Time [years]')
plt.ylabel('Recharge (m/d)'); | examples/notebooks/02_fix_parameters.ipynb | pastas/pasta | mit |
First time series model
Once the time series are read from the data files, a time series model can be constructed by going through the following three steps:
Creat a Model object by passing it the observed head series. Store your model in a variable so that you can use it later on.
Add the stresses that are expected to cause the observed head variation to the model. In this example, this is only the recharge series. For each stess, a StressModel object needs to be created. Each StressModel object needs three input arguments: the time series of the stress, the response function that is used to simulate the effect of the stress, and a name. In addition, it is recommended to specified the kind of series, which is used to perform a number of checks on the series and fix problems when needed. This checking and fixing of problems (for example, what to substitute for a missing value) depends on the kind of series. In this case, the time series of the stress is stored in the variable recharge, the Gamma function is used to simulate the response, the series will be called 'recharge', and the kind is prec which stands for precipitation. One of the other keyword arguments of the StressModel class is up, which means that a positive stress results in an increase (up) of the head. The default value is True, which we use in this case as a positive recharge will result in the heads going up. Each StressModel object needs to be stored in a variable, after which it can be added to the model.
When everything is added, the model can be solved. The default option is to minimize the sum of the squares of the errors between the observed and modeled heads. | ml = ps.Model(ho)
sm1 = ps.StressModel(recharge, ps.Gamma, name='recharge', settings='prec')
ml.add_stressmodel(sm1)
ml.solve(tmin='1985', tmax='2010') | examples/notebooks/02_fix_parameters.ipynb | pastas/pasta | mit |
The solve function has a number of default options that can be specified with keyword arguments. One of these options is that by default a fit report is printed to the screen. The fit report includes a summary of the fitting procedure, the optimal values obtained by the fitting routine, and some basic statistics. The model contains five parameters: the parameters $A$, $n$, and $a$ of the Gamma function used as the response function for the recharge, the parameter $d$, which is a constant base level, and the parameter $\alpha$ of the noise model, which will be explained a little later on in this notebook.
The results of the model are plotted below. | ml.plot(figsize=(12, 4));
ml = ps.Model(ho)
sm1 = ps.StressModel(recharge, ps.Gamma, name='recharge', settings='prec')
ml.add_stressmodel(sm1)
ml.solve(tmin='1985', tmax='2010', solver=ps.LeastSquares)
ml = ps.Model(ho)
sm1 = ps.StressModel(recharge, ps.Gamma, name='recharge', settings='prec')
ml.add_stressmodel(sm1)
ml.set_parameter('recharge_n', vary=False)
ml.solve(tmin='1985', tmax='2010', solver=ps.LeastSquares)
ml.plot(figsize=(10, 4)); | examples/notebooks/02_fix_parameters.ipynb | pastas/pasta | mit |
A normal old python function to return the Nth fibonacci number.
Interative implementation of fibonacci, just iteratively adds a and b to
calculate the nth number in the sequence.
>> [software_fibonacci(x) for x in range(10)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34] | def software_fibonacci(n):
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a | ipynb-examples/introduction-to-hardware.ipynb | UCSBarchlab/PyRTL | bsd-3-clause |
Attempt 1
Let's convert this into some hardware that computes the same thing. Our first go will be to just replace the 0 and 1 with WireVectors to see
what happens. | def attempt1_hardware_fibonacci(n, bitwidth):
a = pyrtl.Const(0)
b = pyrtl.Const(1)
for i in range(n):
a, b = b, a + b
return a | ipynb-examples/introduction-to-hardware.ipynb | UCSBarchlab/PyRTL | bsd-3-clause |
The above looks really nice does not really represent a hardware implementation
of fibonacci.
Let's reason through the code, line by line, to figure out what it would actually build.
a = pyrtl.Const(0)
This makes a wirevector of bitwidth=1 that is driven by a zero. Thus a is a wirevector. Seems good.
b = pyrtl.Const(1)
Just like above, b is a wirevector driven by 1
for i in range(n):
Okay, here is where things start to go off the rails a bit. This says to perform the following code 'n' times, but the value 'n' is passed as an input and is not something that is evaluated in the hardware, it is evaluated when you run the PyRTL program which generates (or more specifically elaborates) the hardware. Thus the hardware we are building will have The value of 'n' built into the hardware and won't actually be a run-time parameter. Loops are really useful for building large repetitive hardware structures, but they CAN'T be used to represent hardware that should do a computation iteratively. Instead we are going to need to use some registers to build a state machine.
a, b = b, a + b
Let's break this apart. In the first cycle b is Const(1) and (a + b) builds an adder with a (Const(0)) and b (Const(1) as inputs. Thus (b, a + b) in the first iteration is: ( Const(1), result_of_adding( Const(0), Const(1) ) At the end of the first iteration a and b refer to those two constant values. In each following iteration more adders are built and the names a and b are bound to larger and larger trees of adders but all the inputs are constants!
return a
The final thing that is returned then is the last output from this tree of adders which all have Consts as inputs. Thus this hardware is hard-wired to find only and exactly the value of fibonacci of the value N specified at design time! Probably not what you are intending.
Attempt 2
Let's try a different approach. Let's specify two registers ("a" and "b") and then we can update those values as we iteratively compute fibonacci of N cycle by cycle. | def attempt2_hardware_fibonacci(n, bitwidth):
a = pyrtl.Register(bitwidth, 'a')
b = pyrtl.Register(bitwidth, 'b')
a.next <<= b
b.next <<= a + b
return a | ipynb-examples/introduction-to-hardware.ipynb | UCSBarchlab/PyRTL | bsd-3-clause |
This is looking much better.
Two registers, a and b store the values from which we
can compute the series.
The line a.next <<= b means that the value of a in the next
cycle should be simply be b from the current cycle.
The line b.next <<= a + b says
to build an adder, with inputs of a and b from the current cycle and assign the value
to b in the next cycle.
A visual representation of the hardware built is as such:
+-----+ +---------+
| | | |
+===V==+ | +===V==+ |
| | | | | |
| a | | | b | |
| | | | | |
+===V==+ | +==V===+ |
| | | |
| +-----+ |
| | |
+===V===========V==+ |
\ adder / |
+==============+ |
| |
+---------------+
Note that in the picture the register a and b each have a wirevector which is
the current value (shown flowing out of the bottom of the register) and an input
which is giving the value that should be the value of the register in the following
cycle (shown flowing into the top of the register) which are a and a.next respectively.
When we say return a what we are returning is a reference to the register a in
the picture above.
Attempt 3
Of course one problem is that we don't know when we are done! How do we know we
reached the "nth" number in the sequence? Well, we need to add a register to
count up and see if we are done.
This is very similliar to the example before, except that now we have a register "i"
which keeps track of the iteration that we are on (i.next <<= i + 1).
The function now returns two values, a reference to the register "a" and a reference to a single
bit that tells us if we are done. That bit is calculated by comparing "i" to the
to a wirevector "n" that is passed in to see if they are the same. | def attempt3_hardware_fibonacci(n, bitwidth):
a = pyrtl.Register(bitwidth, 'a')
b = pyrtl.Register(bitwidth, 'b')
i = pyrtl.Register(bitwidth, 'i')
i.next <<= i + 1
a.next <<= b
b.next <<= a + b
return a, i == n | ipynb-examples/introduction-to-hardware.ipynb | UCSBarchlab/PyRTL | bsd-3-clause |
Attempt 4
This is now far enough along that we can simulate the design and see what happens... | def attempt4_hardware_fibonacci(n, req, bitwidth):
a = pyrtl.Register(bitwidth, 'a')
b = pyrtl.Register(bitwidth, 'b')
i = pyrtl.Register(bitwidth, 'i')
local_n = pyrtl.Register(bitwidth, 'local_n')
done = pyrtl.WireVector(bitwidth=1, name='done')
with pyrtl.conditional_assignment:
with req:
local_n.next |= n
i.next |= 0
a.next |= 0
b.next |= 1
with pyrtl.otherwise:
i.next |= i + 1
a.next |= b
b.next |= a + b
done <<= i == local_n
return a, done | ipynb-examples/introduction-to-hardware.ipynb | UCSBarchlab/PyRTL | bsd-3-clause |
Define test tables | # Define test data and register it as tables
# This is a classic example of employee and department relational tables
# Test data will be used in the examples later in this notebook
from pyspark.sql import Row
Employee = Row("id", "name", "email", "manager_id", "dep_id")
df_emp = sqlContext.createDataFrame([
Employee(1234, 'John', '[email protected]', 1236, 10),
Employee(1235, 'Mike', '[email protected]', 1237, 10),
Employee(1236, 'Pat', '[email protected]', 1237, 20),
Employee(1237, 'Claire', '[email protected]', None, 20),
Employee(1238, 'Jim', '[email protected]', 1236, 30)
])
df_emp.registerTempTable("employee")
Department = Row("dep_id", "dep_name")
df_dep = sqlContext.createDataFrame([
Department(10, 'Engineering'),
Department(20, 'Head Quarter'),
Department(30, 'Human resources')
])
df_dep.registerTempTable("department") | Pyspark_SQL_Magic_Jupyter/IPython_Pyspark_SQL_Magic.ipynb | LucaCanali/Miscellaneous | apache-2.0 |
Examples of how to use %SQL magic functions with Spark
Use %sql to run SQL and return a DataFrame, lazy evaluation | # Example of line magic, a shortcut to run SQL in pyspark
# Pyspark has lazy evaluation, so the query is not executed in this exmaple
df = %sql select * from employee
df | Pyspark_SQL_Magic_Jupyter/IPython_Pyspark_SQL_Magic.ipynb | LucaCanali/Miscellaneous | apache-2.0 |
Use %sql_show to run SQL and show the top lines of the result set | # Example of line magic, the SQL is executed and the result is displayed
# the maximum number of displayed lines is configurable (max_show_lines)
%sql_show select * from employee | Pyspark_SQL_Magic_Jupyter/IPython_Pyspark_SQL_Magic.ipynb | LucaCanali/Miscellaneous | apache-2.0 |
Example of cell magic to run SQL spanning multiple lines | %%sql_show
select emp.id, emp.name, emp.email, emp.manager_id, dep.dep_name
from employee emp, department dep
where emp.dep_id=dep.dep_id | Pyspark_SQL_Magic_Jupyter/IPython_Pyspark_SQL_Magic.ipynb | LucaCanali/Miscellaneous | apache-2.0 |
Use %sql_display to run SQL and display the results as a HTML table
Example of cell magic that runs SQL and then transforms it to Pandas. This will display the output as a HTML table in Jupyter notebooks | %%sql_display
select emp.id, emp.name, emp.email, emp2.name as manager_name, dep.dep_name
from employee emp
left outer join employee emp2 on emp2.id=emp.manager_id
join department dep on emp.dep_id=dep.dep_id | Pyspark_SQL_Magic_Jupyter/IPython_Pyspark_SQL_Magic.ipynb | LucaCanali/Miscellaneous | apache-2.0 |
Use %sql_explain to display the execution plan | %%sql_explain
select emp.id, emp.name, emp.email, emp2.name as manager_name, dep.dep_name
from employee emp
left outer join employee emp2 on emp2.id=emp.manager_id
join department dep on emp.dep_id=dep.dep_id | Pyspark_SQL_Magic_Jupyter/IPython_Pyspark_SQL_Magic.ipynb | LucaCanali/Miscellaneous | apache-2.0 |
TPOT uses a genetic algorithm (implemented with DEAP library) to pick an optimal pipeline for a regression task.
What is a pipeline?
Pipeline is composed of preprocessors:
* take polynomial transformations of features
*
TPOTBase is key class
parameters:
population_size: int (default: 100)
The number of pipelines in the genetic algorithm population. Must
be > 0.The more pipelines in the population, the slower TPOT will
run, but it's also more likely to find better pipelines.
* generations: int (default: 100)
The number of generations to run pipeline optimization for. Must
be > 0. The more generations you give TPOT to run, the longer it
takes, but it's also more likely to find better pipelines.
* mutation_rate: float (default: 0.9)
The mutation rate for the genetic programming algorithm in the range
[0.0, 1.0]. This tells the genetic programming algorithm how many
pipelines to apply random changes to every generation. We don't
recommend that you tweak this parameter unless you know what you're
doing.
* crossover_rate: float (default: 0.05)
The crossover rate for the genetic programming algorithm in the
range [0.0, 1.0]. This tells the genetic programming algorithm how
many pipelines to "breed" every generation. We don't recommend that
you tweak this parameter unless you know what you're doing.
* scoring: function or str
Function used to evaluate the quality of a given pipeline for the
problem. By default, balanced class accuracy is used for
classification problems, mean squared error for regression problems.
TPOT assumes that this scoring function should be maximized, i.e.,
higher is better.
Offers the same options as sklearn.cross_validation.cross_val_score:
['accuracy', 'adjusted_rand_score', 'average_precision', 'f1',
'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted',
'precision', 'precision_macro', 'precision_micro', 'precision_samples',
'precision_weighted', 'r2', 'recall', 'recall_macro', 'recall_micro',
'recall_samples', 'recall_weighted', 'roc_auc']
* num_cv_folds: int (default: 3)
The number of folds to evaluate each pipeline over in k-fold
cross-validation during the TPOT pipeline optimization process
* max_time_mins: int (default: None)
How many minutes TPOT has to optimize the pipeline. If not None,
this setting will override the generations parameter.
TPOTClassifier and TPOTRegressor inherit parent class TPOTBase, with modifications of the scoring function. | !sudo pip install deap update_checker tqdm xgboost tpot
import pandas as pd
import numpy as np
import psycopg2
import os
import json
from tpot import TPOTClassifier
from sklearn.metrics import classification_report
conn = psycopg2.connect(
user = os.environ['REDSHIFT_USER']
,password = os.environ['REDSHIFT_PASS']
,port = os.environ['REDSHIFT_PORT']
,host = os.environ['REDSHIFT_HOST']
,database = 'tradesy'
)
query = """
select
purchase_dummy
,shipping_price_ratio
,asking_price
,price_level
,brand_score
,brand_size
,a_over_b
,favorite_count
,has_blurb
,has_image
,seasonal_component
,description_length
,product_category_accessories
,product_category_shoes
,product_category_bags
,product_category_tops
,product_category_dresses
,product_category_weddings
,product_category_bottoms
,product_category_outerwear
,product_category_jeans
,product_category_activewear
,product_category_suiting
,product_category_swim
from saleability_model_v2
limit 50000
"""
df = pd.read_sql(query, conn)
target = 'purchase_dummy'
domain = filter(lambda x: x != target, df.columns.values)
df = df.astype(float)
y_all = df[target].values
X_all = df[domain].values
idx_all = np.random.RandomState(1).permutation(len(y_all))
idx_train = idx_all[:int(.8 * len(y_all))]
idx_test = idx_all[int(.8 * len(y_all)):]
# TRAIN AND TEST DATA
X_train = X_all[idx_train]
y_train = y_all[idx_train]
X_test = X_all[idx_test]
y_test = y_all[idx_test] | notebook_gallery/other_experiments/build-models/model-selection-and-tuning/current-solutions/TPOT/TPOT-demo.ipynb | pramitchoudhary/Experiments | unlicense |
Sklearn model: | from sklearn.ensemble import RandomForestClassifier
sklearn_model = RandomForestClassifier()
sklearn_model.fit(X_train, y_train)
sklearn_predictions = sklearn_model.predict(X_test)
print classification_report(y_test, sklearn_predictions) | notebook_gallery/other_experiments/build-models/model-selection-and-tuning/current-solutions/TPOT/TPOT-demo.ipynb | pramitchoudhary/Experiments | unlicense |
TPOT Classifier | tpot_model = TPOTClassifier(generations=3, population_size=10, verbosity=2, max_time_mins=10)
tpot_model.fit(X_train, y_train)
tpot_predictions = tpot_model.predict(X_test)
print classification_report(y_test, tpot_predictions) | notebook_gallery/other_experiments/build-models/model-selection-and-tuning/current-solutions/TPOT/TPOT-demo.ipynb | pramitchoudhary/Experiments | unlicense |
Export Pseudo Pipeline Code | tpot_model.export('optimal-saleability-model.py')
!cat optimal-saleability-model.py | notebook_gallery/other_experiments/build-models/model-selection-and-tuning/current-solutions/TPOT/TPOT-demo.ipynb | pramitchoudhary/Experiments | unlicense |
Flower power
Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the TensorFlow inception tutorial. | import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close() | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
ConvNet Codes
Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier.
Here we're using the vgg16 module from tensorflow_vgg. The network takes images of size $224 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from the source code):
```
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
```
So what we want are the values of the first fully connected layer, after being ReLUd (self.relu6). To build the network, we use
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
This creates the vgg object, then builds the graph with vgg.build(input_). Then to get the values from the layer,
feed_dict = {input_: images}
codes = sess.run(vgg.relu6, feed_dict=feed_dict) | import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)] | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Below I'm running images through the VGG network in batches.
Exercise: Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values). | # Set the batch size higher if you can fit in in your GPU memory
batch_size = 10
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# TODO: Build the vgg network here
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
codes_batch =
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels) | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Building the Classifier
Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work. | # read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1)) | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Data prep
As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!
Exercise: From scikit-learn, use LabelBinarizer to create one-hot encoded vectors from the labels. | labels_vecs = # Your one-hot encoded labels array here | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn.
You can create the splitter like so:
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
Then split the data with
splitter = ss.split(x, y)
ss.split returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use next(splitter) to get the indices. Be sure to read the documentation and the user guide.
Exercise: Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets. | train_x, train_y =
val_x, val_y =
test_x, test_y =
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape) | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
If you did it right, you should see these sizes for the training sets:
Train shapes (x, y): (2936, 4096) (2936, 5)
Validation shapes (x, y): (367, 4096) (367, 5)
Test shapes (x, y): (367, 4096) (367, 5)
Classifier layers
Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network.
Exercise: With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost. | inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
# TODO: Classifier layers and operations
logits = # output layer logits
cost = # cross entropy loss
optimizer = # training optimizer
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Batches!
Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data. | def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Training
Here, we'll train the network.
Exercise: So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the get_batches function I wrote before to get your batches like for x, y in get_batches(train_x, train_y). Or write your own! | saver = tf.train.Saver()
with tf.Session() as sess:
# TODO: Your training code here
saver.save(sess, "checkpoints/flowers.ckpt") | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Testing
Below you see the test accuracy. You can also see the predictions returned for images. | with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Below, feel free to choose images and see how the trained classifier predicts the flowers in them. | test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
if 'vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), lb.classes_) | transfer-learning/Transfer_Learning.ipynb | efoley/deep-learning | mit |
Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset
Trend and anomaly analyses are widely used in atmospheric and oceanographic research for detecting long term change.
An example is presented in this notebook of a numerical analysis of Sea Surface Temperature (SST) where the global change rate per decade has been calculated from 1982 to 2016. Moreover, its area-weighted global monthly SST anomaly time series is presented, too. In addition, all of calculating processes is list step by step.
Data Source
NOAA Optimum Interpolation (OI) Sea Surface Temperature (SST) V2 is downloaded from https://www.esrl.noaa.gov/psd/data/gridded/data.noaa.oisst.v2.html.
Spatial Coverage:
* 1.0 degree latitude x 1.0 degree longitude global grid (180x360).
* 89.5N - 89.5S, 0.5E - 359.5E.
Because oisst is an interpolated data, so it covers ocean and land. As a result, have to use land-ocean mask data at the same time, which is also available from the webstie.
We select SST from 1982 to 2016.
1. Load basic libs | % matplotlib inline
from pylab import *
import numpy as np
import datetime
from netCDF4 import netcdftime
from netCDF4 import Dataset as netcdf # netcdf4-python module
from netcdftime import utime
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.dates as mdates
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter
import matplotlib.ticker as ticker
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 15, 6
import warnings
warnings.simplefilter('ignore') | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
2. Read SST data and pick variables
2.1 Read SST | ncset= netcdf(r'data/sst.mnmean.nc')
lons = ncset['lon'][:]
lats = ncset['lat'][:]
sst = ncset['sst'][1:421,:,:] # 1982-2016 to make it divisible by 12
nctime = ncset['time'][1:421]
t_unit = ncset['time'].units
try :
t_cal =ncset['time'].calendar
except AttributeError : # Attribute doesn't exist
t_cal = u"gregorian" # or standard
nt, nlat, nlon = sst.shape
ngrd = nlon*nlat | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
2.2 Parse time | utime = netcdftime.utime(t_unit, calendar = t_cal)
datevar = utime.num2date(nctime)
print(datevar.shape)
datevar[0:5] | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
2.3 Read mask (1=ocen, 0=land) | lmfile = 'data\lsmask.nc'
lmset = netcdf(lmfile)
lsmask = lmset['mask'][0,:,:]
lsmask = lsmask-1
num_repeats = nt
lsm = np.stack([lsmask]*num_repeats,axis=-1).transpose((2,0,1))
lsm.shape | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
2.3 Mask out Land | sst = np.ma.masked_array(sst, mask=lsm) | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
3. Trend Analysis
3.1 Linear trend calculation | #import scipy.stats as stats
sst_grd = sst.reshape((nt, ngrd), order='F')
x = np.linspace(1,nt,nt)#.reshape((nt,1))
sst_rate = np.empty((ngrd,1))
sst_rate[:,:] = np.nan
for i in range(ngrd):
y = sst_grd[:,i]
if(not np.ma.is_masked(y)):
z = np.polyfit(x, y, 1)
sst_rate[i,0] = z[0]*120.0
#slope, intercept, r_value, p_value, std_err = stats.linregress(x, sst_grd[:,i])
#sst_rate[i,0] = slope*120.0
sst_rate = sst_rate.reshape((nlat,nlon), order='F') | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
3.2 Visualize SST trend | m = Basemap(projection='cyl', llcrnrlon=min(lons), llcrnrlat=min(lats),
urcrnrlon=max(lons), urcrnrlat=max(lats))
x, y = m(*np.meshgrid(lons, lats))
clevs = np.linspace(-0.5, 0.5, 21)
cs = m.contourf(x, y, sst_rate.squeeze(), clevs, cmap=plt.cm.RdBu_r)
m.drawcoastlines()
#m.fillcontinents(color='#000000',lake_color='#99ffff')
cb = m.colorbar(cs)
cb.set_label('SST Changing Rate ($^oC$/decade)', fontsize=12)
plt.title('SST Changing Rate ($^oC$/decade)', fontsize=16) | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
4. Anomaly analysis
4.1 Convert sst data into nyear x12 x lat x lon | sst_grd_ym = sst.reshape((12,nt/12, ngrd), order='F').transpose((1,0,2))
sst_grd_ym.shape | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
4.2 Calculate seasonal cycle | sst_grd_clm = np.mean(sst_grd_ym, axis=0)
sst_grd_clm.shape | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
4.3 Remove seasonal cycle | sst_grd_anom = (sst_grd_ym - sst_grd_clm).transpose((1,0,2)).reshape((nt, nlat, nlon), order='F')
sst_grd_anom.shape | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
4.4 Calculate area-weights
4.4.1 Make sure lat-lon grid direction | print(lats[0:12])
print(lons[0:12]) | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
4.4.2 Calculate area-weights with cos(lats) | lonx, latx = np.meshgrid(lons, lats)
weights = np.cos(latx * np.pi / 180.)
print(weights.shape) | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
4.4.3 Calculate valid grids total eareas for Global, NH and SH | sst_glb_avg = np.zeros(nt)
sst_nh_avg = np.zeros(nt)
sst_sh_avg = np.zeros(nt)
for it in np.arange(nt):
sst_glb_avg[it] = np.ma.average(sst_grd_anom[it, :], weights=weights)
sst_nh_avg[it] = np.ma.average(sst_grd_anom[it,0:nlat/2,:], weights=weights[0:nlat/2,:])
sst_sh_avg[it] = np.ma.average(sst_grd_anom[it,nlat/2:nlat,:], weights=weights[nlat/2:nlat,:]) | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
5. Visualize monthly SST anomaly time series | fig, ax = plt.subplots(1, 1 , figsize=(15,5))
ax.plot(datevar, sst_glb_avg, color='b', linewidth=2, label='GLB')
ax.plot(datevar, sst_nh_avg, color='r', linewidth=2, label='NH')
ax.plot(datevar, sst_sh_avg, color='g', linewidth=2, label='SH')
ax.axhline(0, linewidth=1, color='k')
ax.legend()
ax.set_title('Monthly SST Anomaly Time Series (1982 - 2016)', fontsize=16)
ax.set_xlabel('Month/Year #', fontsize=12)
ax.set_ylabel('$^oC$', fontsize=12)
ax.set_ylim(-0.6, 0.6)
fig.set_figheight(9)
# rotate and align the tick labels so they look better
fig.autofmt_xdate()
# use a more precise date string for the x axis locations in the toolbar
ax.fmt_xdata = mdates.DateFormatter('%Y') | ex15-Trend and Anomaly Analyses of Long-term Tempro-Spatial Dataset.ipynb | royalosyin/Python-Practical-Application-on-Climate-Variability-Studies | mit |
defaultdict
The whole point is that it will always return a value even if you query for a key that doesnt exist. That value you set ahead of time is called a factory object. That key also gets turned into a new key/value pair with the factory object | from collections import defaultdict
d = {'k1':1}
d["k1"]
d["k2"] # this will get an error because the k2 key doesnt exist
d = defaultdict(object)
d['one'] # this doesn't exist, but in calling for it it will create a new element {'one' : object}
d['two'] # same, this will add {'two' : object}
for k, v in d.items():
print "key:", k, "item:", v
e = defaultdict(lambda: 0) # lambda just returns 0 here
e['four']
e['twelve']
def error():
return 'error'
f = defaultdict(error) #returned item must be callable or None
f['new']
f.items() | Advanced Modules/Collections Module.ipynb | spacedrabbit/PythonBootcamp | mit |
orderedDict
dictionary subclass that remembers the order items were added | d_norm = {}
d_norm['a'] = 1
d_norm['b'] = 2
d_norm['c'] = 3
d_norm['d'] = 4
d_norm['e'] = 5
# order isn't preserved since a dict is just a mapping
for k,v in d_norm.items():
print k,v
from collections import OrderedDict
d_ordered = OrderedDict()
d_ordered['a'] = 1
d_ordered['b'] = 2
d_ordered['c'] = 3
d_ordered['d'] = 4
d_ordered['e'] = 5
for k,v in d_ordered.items():
print k, v
from collections import namedtuple
# this is kind of like creating a new class on the fly
# the first parameter of a namedtuple is the name of the class/tuple type
# the second parameter is a space-delimeted list of properties of the tuple
Dog = namedtuple('Dog','age breed name')
sam = Dog(age=2, breed='Lab', name='Sammy')
print sam.age
print sam.breed
print sam.name
Catzz = namedtuple('Cat', 'fur claws name')
mittens = Catzz(fur='fuzzy', claws='sharp', name='Mittens')
print type(mittens)
print type(Catzz)
print mittens[0]
print mittens.claws
print mittens.name
print mittens.count('fuzzy')
print mittens.index('sharp') | Advanced Modules/Collections Module.ipynb | spacedrabbit/PythonBootcamp | mit |
Apache ORC Reader
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/io/tutorials/orc"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/io/blob/master/docs/tutorials/orc.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/io/blob/master/docs/tutorials/orc.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/io/docs/tutorials/orc.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Overview
Apache ORC is a popular columnar storage format. tensorflow-io package provides a default implementation of reading Apache ORC files.
Setup
Install required packages, and restart runtime | !pip install tensorflow-io
import tensorflow as tf
import tensorflow_io as tfio | site/en-snapshot/io/tutorials/orc.ipynb | tensorflow/docs-l10n | apache-2.0 |
Download a sample dataset file in ORC
The dataset you will use here is the Iris Data Set from UCI. The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. It has 4 attributes: (1) sepal length, (2) sepal width, (3) petal length, (4) petal width, and the last column contains the class label. | !curl -OL https://github.com/tensorflow/io/raw/master/tests/test_orc/iris.orc
!ls -l iris.orc | site/en-snapshot/io/tutorials/orc.ipynb | tensorflow/docs-l10n | apache-2.0 |
Create a dataset from the file | dataset = tfio.IODataset.from_orc("iris.orc", capacity=15).batch(1) | site/en-snapshot/io/tutorials/orc.ipynb | tensorflow/docs-l10n | apache-2.0 |
Examine the dataset: | for item in dataset.take(1):
print(item)
| site/en-snapshot/io/tutorials/orc.ipynb | tensorflow/docs-l10n | apache-2.0 |
Let's walk through an end-to-end example of tf.keras model training with ORC dataset based on iris dataset.
Data preprocessing
Configure which columns are features, and which column is label: | feature_cols = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
label_cols = ["species"]
# select feature columns
feature_dataset = tfio.IODataset.from_orc("iris.orc", columns=feature_cols)
# select label columns
label_dataset = tfio.IODataset.from_orc("iris.orc", columns=label_cols) | site/en-snapshot/io/tutorials/orc.ipynb | tensorflow/docs-l10n | apache-2.0 |
A util function to map species to float numbers for model training: | vocab_init = tf.lookup.KeyValueTensorInitializer(
keys=tf.constant(["virginica", "versicolor", "setosa"]),
values=tf.constant([0, 1, 2], dtype=tf.int64))
vocab_table = tf.lookup.StaticVocabularyTable(
vocab_init,
num_oov_buckets=4)
label_dataset = label_dataset.map(vocab_table.lookup)
dataset = tf.data.Dataset.zip((feature_dataset, label_dataset))
dataset = dataset.batch(1)
def pack_features_vector(features, labels):
"""Pack the features into a single array."""
features = tf.stack(list(features), axis=1)
return features, labels
dataset = dataset.map(pack_features_vector) | site/en-snapshot/io/tutorials/orc.ipynb | tensorflow/docs-l10n | apache-2.0 |
Build, compile and train the model
Finally, you are ready to build the model and train it! You will build a 3 layer keras model to predict the class of the iris plant from the dataset you just processed. | model = tf.keras.Sequential(
[
tf.keras.layers.Dense(
10, activation=tf.nn.relu, input_shape=(4,)
),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3),
]
)
model.compile(optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"])
model.fit(dataset, epochs=5) | site/en-snapshot/io/tutorials/orc.ipynb | tensorflow/docs-l10n | apache-2.0 |
As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details. | %matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary() | 2.2/tutorials/requiv_crit_detached.ipynb | phoebe-project/phoebe2-docs | gpl-3.0 |
Detached Systems
Detached systems are the default case for default_binary. The requiv_max parameter is constrained to show the maximum value for requiv before the system will begin overflowing at periastron. | b['requiv_max@component@primary']
b['requiv_max@constraint@primary'] | 2.2/tutorials/requiv_crit_detached.ipynb | phoebe-project/phoebe2-docs | gpl-3.0 |
We can see that the default system is well within this critical value by printing all radii and critical radii. | print(b.filter(qualifier='requiv*', context='component')) | 2.2/tutorials/requiv_crit_detached.ipynb | phoebe-project/phoebe2-docs | gpl-3.0 |
If we increase 'requiv' past the critical point, we'll receive a warning from the logger and would get an error if attempting to call b.run_compute(). | b['requiv@primary'] = 2.2
print(b.run_checks()) | 2.2/tutorials/requiv_crit_detached.ipynb | phoebe-project/phoebe2-docs | gpl-3.0 |
Here, we load the DataFrame for the full training set and repeat the classification approach identified in step 2. | %%time
raw_input = pd.read_pickle('train_full.pkl')
raw_input.head()
raw_input.info() | text_classification_and_clustering/step_3_classification_of_full_dataset.ipynb | dipanjank/ml | gpl-3.0 |
Check for Class Imbalance | level_counts = raw_input.level.value_counts().sort_index()
group_counts = raw_input.group.value_counts().sort_index()
_, ax = plt.subplots(1, 2, figsize=(10, 5))
_ = level_counts.plot(kind='bar', title='Feature Instances per Level', ax=ax[0], rot=0)
_ = group_counts.plot(kind='bar', title='Feature Instances per Group', ax=ax[1], rot=0)
plt.tight_layout() | text_classification_and_clustering/step_3_classification_of_full_dataset.ipynb | dipanjank/ml | gpl-3.0 |
Level Classification Based on Text
Here we apply the same approach of converting text to bag-of-words features and then using a maximum entropy classifier. The difference is we are now running on the full dataset which is much larger. The optimizer now requires more steps to converge, so we change the max_iters attribute of LogisticRegression to 1000. We address the label imbalance by setting class_weight='balanced'. | import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
en_stopwords = set(stopwords.words('english'))
print(en_stopwords)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
def display_results(y, y_pred):
"""Given some predications y_pred for a target label y,
display the precision/recall/f1 score and the confusion matrix."""
report = classification_report(y_pred, y)
print(report)
level_values = y.unique()
level_values.sort()
cm = confusion_matrix(y_true=y, y_pred=y_pred.values, labels=level_values)
cm = pd.DataFrame(index=level_values, columns=level_values, data=cm)
fig, ax = plt.subplots(1, 1, figsize=(12, 10))
ax = sns.heatmap(cm, annot=True, ax=ax, fmt='d')
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict, StratifiedKFold
def build_pipeline():
"""Return the combination of a Feature Extractor and a LogisticRegression model in a ``Pipeline``. """
counter = CountVectorizer(
lowercase=True,
stop_words=en_stopwords,
ngram_range=(1, 1),
min_df=5,
max_df=0.4,
binary=True)
model = LogisticRegression(
# maximize log-likelihood + square norm of parameters
penalty='l2',
# steps required for the L-BFGS optimizer to converge, found by trial and error
max_iter=1000,
# use softmax instead of one-vs-rest style classification
multi_class='multinomial',
# use L-BFGS optimizer
solver='lbfgs',
# This prints out a warning if the optimizer hasn't converged
verbose=True,
# to handle the class imbalance
# automatically adjust weights inversely proportional to
# class frequencies in the input data
class_weight='balanced',
random_state=4321)
pipeline = make_pipeline(counter, model)
return pipeline
def classify(input_df, target_label='level'):
"""
Build a classifier for the `target_label` column in the DataFrame `input_df` using the `text` column.
Return the (labels, predicted_labels) tuple.
Use a 10-fold Stratified K-fold cross-validator to generate the out-of-sample predictions."""
assert target_label in input_df.columns
pipeline = build_pipeline()
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=1234)
X = input_df.text
y = input_df.loc[:, target_label]
y_pred = cross_val_predict(pipeline, X=X.values, y=y.values, cv=cv, n_jobs=10, verbose=2)
y_pred = pd.Series(index=input_df.index.copy(), data=y_pred)
return y.copy(), y_pred
%%time
levels, levels_predicted = classify(raw_input, target_label='level')
display_results(levels, levels_predicted) | text_classification_and_clustering/step_3_classification_of_full_dataset.ipynb | dipanjank/ml | gpl-3.0 |
Group Classification Based on Text | %%time
groups, groups_predicted = classify(raw_input, target_label='group')
display_results(groups, groups_predicted) | text_classification_and_clustering/step_3_classification_of_full_dataset.ipynb | dipanjank/ml | gpl-3.0 |
Classification on Test Set
Finally we report the performance on our classfier on both the leve and group classification tasks using the test dataset. For this we re-build the model using the hyperparameters used above, and train it using the entire train dataset. | from functools import lru_cache
@lru_cache(maxsize=1)
def get_test_dataset():
return pd.read_pickle('test.pkl')
def report_test_perf(train_df, target_label='level'):
"""Produce classification report and confusion matrix on the test Dataset for a given ``target_label``."""
test_df = get_test_dataset()
assert target_label in train_df.columns
assert target_label in test_df.columns
# Train the model using the entire training dataset
pipeline = build_pipeline()
X_train, y_train = train_df.text, train_df.loc[:, target_label]
pipeline = pipeline.fit(X_train.values, y_train.values)
# Generate predictions using test data
X_test, y_test = test_df.text, test_df.loc[:, target_label]
predicted = pipeline.predict(X_test.values)
predicted = pd.Series(index=test_df.index,
data=predicted,
name='{}_pred'.format(target_label))
display_results(y_test, predicted)
| text_classification_and_clustering/step_3_classification_of_full_dataset.ipynb | dipanjank/ml | gpl-3.0 |
Level Classification on Test Set | %% time
train_df = raw_input
report_test_perf(train_df, 'level') | text_classification_and_clustering/step_3_classification_of_full_dataset.ipynb | dipanjank/ml | gpl-3.0 |
Group Classification on Test Set | %%time
report_test_perf(train_df, 'group') | text_classification_and_clustering/step_3_classification_of_full_dataset.ipynb | dipanjank/ml | gpl-3.0 |
Let's show the symbols data, to see how good the recommender has to be. | print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
# Simulate (with new envs, each time)
n_epochs = 4
for i in range(n_epochs):
tic = time()
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_in_df)
env.indicators['rsi'].scaler
data_in_df['Close'].isnull().sum()
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL, agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
show_results([results_list], data_in_df, graph=True) | notebooks/prod/n08_simple_q_learner_fast_learner_full_training.ipynb | mtasende/Machine-Learning-Nanodegree-Capstone | mit |
Let's run the trained agent, with the test set
First a non-learning test: this scenario would be worse than what is possible (in fact, the q-learner can learn from past samples in the test set without compromising the causality). | TEST_DAYS_AHEAD = 20
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True) | notebooks/prod/n08_simple_q_learner_fast_learner_full_training.ipynb | mtasende/Machine-Learning-Nanodegree-Capstone | mit |
And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few). | env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=True,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True) | notebooks/prod/n08_simple_q_learner_fast_learner_full_training.ipynb | mtasende/Machine-Learning-Nanodegree-Capstone | mit |
What are the metrics for "holding the position"? | print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_test_df['Close'].iloc[TEST_DAYS_AHEAD:]))))
import pickle
with open('../../data/simple_q_learner_fast_learner_full_training.pkl', 'wb') as best_agent:
pickle.dump(agents[0], best_agent) | notebooks/prod/n08_simple_q_learner_fast_learner_full_training.ipynb | mtasende/Machine-Learning-Nanodegree-Capstone | mit |
Observem que diferente do método simples, para cada cenário (seed) ocorre uma situação diferente, ou seja, o número de segurados que se aposenta é diferente: | print(lista_nap) | notebooks/CalculoEstoqueMetodoProb.ipynb | cpatrickalves/simprev | gpl-3.0 |
Se calcularmos a média, temos um valor bem próximo ao do Método determinístico. | media = np.mean(lista_nap)
print('Média: {}'.format(media)) | notebooks/CalculoEstoqueMetodoProb.ipynb | cpatrickalves/simprev | gpl-3.0 |
Porém, com diferentes cenários, podemos calcular medidas de dispersão, como o desvio padrão. | std = np.std(lista_nap)
print('Desvio padrão: {}'.format(std)) | notebooks/CalculoEstoqueMetodoProb.ipynb | cpatrickalves/simprev | gpl-3.0 |
Visualizando em um gráfico: | import matplotlib.pyplot as plt
%matplotlib inline
medias = [350] * len(seeds)
fig, ax = plt.subplots()
ax.plot(seeds, lista_nap, '--', linewidth=2, label='Método Probabilístico')
ax.plot(seeds, medias,label='Método Determinístico')
ax.set_ylabel('Número de Aposentados')
ax.set_xlabel('Seed')
ax.set_title('Cálculo do estoque usando diferentes métodos')
ax.legend()
plt.show() | notebooks/CalculoEstoqueMetodoProb.ipynb | cpatrickalves/simprev | gpl-3.0 |
Aplicando o método probabilístico no cálculo dos estoques (onde as probabilidades são aplicadas), teremos para cada seed, uma projeção/resultado diferente.
Na média o resultado vai ser o mesmo obtido pelo método original, porém teremos diversas curvas ou pontos para cada ano, o que nos permite calcular medidas de dispersão como desvio padrão e Intervalos de Confiança para os resultados de receita e despesa. | np.var(lista_nap)
| notebooks/CalculoEstoqueMetodoProb.ipynb | cpatrickalves/simprev | gpl-3.0 |
Formula for standard deviation
$$\sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - \overline{x})^2}$$ | distribution = np.random.normal(0.75,size=1000)
np.sqrt(np.sum((np.mean(distribution)-distribution)**2)/len(distribution))
np.std(distribution)
import scipy.stats as stats
stats.kurtosis(distribution)
stats.skew(distribution)
chi_squared_df2 = np.random.chisquare(10, size=10000)
stats.skew(chi_squared_df2)
chi_squared_df5 = np.random.chisquare(5, size=10000)
stats.skew(chi_squared_df5)
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
output = plt.hist([chi_squared_df2,chi_squared_df5], bins=200, histtype='step',
label=['2 degrees of freedom','5 degrees of freedom'])
plt.legend(loc='upper right')
| Data_Science_Course/Michigan Data Analysis Course/0 Introduction to Data Science in Python/Week4/Week+4.ipynb | Z0m6ie/Zombie_Code | mit |
Hypothesis Testing | df = pd.read_csv('grades.csv')
df.head()
len(df)
early = df[df['assignment1_submission'] <= '2015-12-31']
late = df[df['assignment1_submission'] > '2015-12-31']
early.mean()
late.mean()
from scipy import stats
stats.ttest_ind?
stats.ttest_ind(early['assignment1_grade'], late['assignment1_grade'])
stats.ttest_ind(early['assignment2_grade'], late['assignment2_grade'])
stats.ttest_ind(early['assignment3_grade'], late['assignment3_grade']) | Data_Science_Course/Michigan Data Analysis Course/0 Introduction to Data Science in Python/Week4/Week+4.ipynb | Z0m6ie/Zombie_Code | mit |
Let's point the distributed client to the Dask cluster on Coiled and output the link to the dashboard: | from dask.distributed import Client
client = Client(cluster)
print('Dashboard:', client.dashboard_link) | dask/create-cluster.ipynb | koverholt/notebooks | bsd-3-clause |
Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
Exercícios | # Exercício 1 - Crie uma lista de 3 elementos e calcule a terceira potência de cada elemento.
list1 = [3,4,5]
quadrado = [item**3 for item in list1]
print(quadrado)
# Exercício 2 - Reescreva o código abaixo, usando a função map(). O resultado final deve ser o mesmo!
palavras = 'A Data Science Academy oferce os melhores cursos de análise de dados do Brasil'.split()
resultado = [[w.upper(), w.lower(), len(w)] for w in palavras]
for i in resultado:
print (i)
resultado = map(lambda w: [w.upper(), w.lower(), len(w)], palavras)
for i in resultado:
print (i)
# Exercício 3 - Calcule a matriz transposta da matriz abaixo.
# Caso não saiba o que é matriz transposta, visite este link: https://pt.wikipedia.org/wiki/Matriz_transposta
# Matriz transposta é um conceito fundamental na construção de redes neurais artificiais, base de sistemas de IA.
matrix = [[1, 2],[3,4],[5,6],[7,8]]
transpose = [[row[i] for row in matrix] for i in range(2)]
print(transpose)
# Exercício 4 - Crie duas funções, uma para elevar um número ao quadrado e outra para elevar ao cubo.
# Aplique as duas funções aos elementos da lista abaixo.
# Obs: as duas funções devem ser aplicadas simultaneamente.
lista = [0, 1, 2, 3, 4]
def square(x):
return (x**2)
def cube(x):
return (x**3)
funcs = [square, cube]
for i in lista:
valor = map(lambda x: x(i), funcs)
print(list((valor)))
# Exercício 5 - Abaixo você encontra duas listas. Faça com que cada elemento da listaA seja elevado
# ao elemento correspondente na listaB.
listaA = [2, 3, 4]
listaB = [10, 11, 12]
list(map(pow, listaA, listaB))
# Exercício 6 - Considerando o range de valores abaixo, use a função filter() para retornar apenas os valores negativos.
range(-5, 5)
list(filter((lambda x: x < 0), range(-5,5)))
# Exercício 7 - Usando a função filter(), encontre os valores que são comuns às duas listas abaixo.
a = [1,2,3,5,7,9]
b = [2,3,5,6,7,8]
print (list(filter(lambda x: x in a, b)))
# Exercício 8 - Considere o código abaixo. Obtenha o mesmo resultado usando o pacote time.
# Não conhece o pacote time? Pesquise!
import datetime
print (datetime.datetime.now().strftime("%d/%m/%Y %H:%M"))
import time
print (time.strftime("%d/%m/%Y %H:%M"))
# Exercício 9 - Considere os dois dicionários abaixo.
# Crie um terceiro dicionário com as chaves do dicionário 1 e os valores do dicionário 2.
dict1 = {'a':1,'b':2}
dict2 = {'c':4,'d':5}
def trocaValores(d1, d2):
dicTemp = {}
for d1key, d2val in zip(d1,d2.values()):
dicTemp[d1key] = d2val
return dicTemp
dict3 = trocaValores(dict1, dict2)
print(dict3)
# Exercício 10 - Considere a lista abaixo e retorne apenas os elementos cujo índice for maior que 5.
lista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
for indice, valor in enumerate(lista):
if indice <= 5:
continue
else:
print (valor) | Cap04/Notebooks/DSA-Python-Cap04-Exercicios-Solucao.ipynb | dsacademybr/PythonFundamentos | gpl-3.0 |
Explore event-related dynamics for specific frequency bands
The objective is to show you how to explore spectrally localized
effects. For this purpose we adapt the method described in [1]_ and use it on
the somato dataset. The idea is to track the band-limited temporal evolution
of spatial patterns by using the :term:Global Field Power(GFP) <GFP>.
We first bandpass filter the signals and then apply a Hilbert transform. To
reveal oscillatory activity the evoked response is then subtracted from every
single trial. Finally, we rectify the signals prior to averaging across trials
by taking the magniude of the Hilbert.
Then the :term:GFP is computed as described in [2], using the sum of the
squares but without normalization by the rank.
Baselining is subsequently applied to make the :term:GFPs <GFP> comparable
between frequencies.
The procedure is then repeated for each frequency band of interest and
all :term:GFPs <GFP> are visualized. To estimate uncertainty, non-parametric
confidence intervals are computed as described in [3] across channels.
The advantage of this method over summarizing the Space x Time x Frequency
output of a Morlet Wavelet in frequency bands is relative speed and, more
importantly, the clear-cut comparability of the spectral decomposition (the
same type of filter is used across all bands).
We will use this dataset: somato-dataset
References
.. [1] Hari R. and Salmelin R. Human cortical oscillations: a neuromagnetic
view through the skull (1997). Trends in Neuroscience 20 (1),
pp. 44-49.
.. [2] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
.. [3] Efron B. and Hastie T. Computer Age Statistical Inference (2016).
Cambrdige University Press, Chapter 11.2. | # Authors: Denis A. Engemann <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import somato
from mne.baseline import rescale
from mne.stats import bootstrap_confidence_interval | 0.20/_downloads/05c57a644672d33707fd1264df7f5617/plot_time_frequency_global_field_power.ipynb | mne-tools/mne-tools.github.io | bsd-3-clause |
Set parameters | data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# let's explore some frequency bands
iter_freqs = [
('Theta', 4, 7),
('Alpha', 8, 12),
('Beta', 13, 25),
('Gamma', 30, 45)
] | 0.20/_downloads/05c57a644672d33707fd1264df7f5617/plot_time_frequency_global_field_power.ipynb | mne-tools/mne-tools.github.io | bsd-3-clause |
We create average power time courses for each frequency band | # set epoching parameters
event_id, tmin, tmax = 1, -1., 3.
baseline = None
# get the header to extract events
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
frequency_map = list()
for band, fmin, fmax in iter_freqs:
# (re)load the data to save memory
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.pick_types(meg='grad', eog=True) # we just look at gradiometers
# bandpass filter
raw.filter(fmin, fmax, n_jobs=1, # use more jobs to speed up.
l_trans_bandwidth=1, # make sure filter params are the same
h_trans_bandwidth=1) # in each band and skip "auto" option.
# epoch
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=baseline,
reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
# remove evoked response
epochs.subtract_evoked()
# get analytic signal (envelope)
epochs.apply_hilbert(envelope=True)
frequency_map.append(((band, fmin, fmax), epochs.average()))
del epochs
del raw | 0.20/_downloads/05c57a644672d33707fd1264df7f5617/plot_time_frequency_global_field_power.ipynb | mne-tools/mne-tools.github.io | bsd-3-clause |
Subsets and Splits