repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nschloe/meshio
|
src/meshio/stl/_stl.py
|
1
|
7767
|
"""
I/O for the STL format, cf.
<https://en.wikipedia.org/wiki/STL_(file_format)>.
"""
import logging
import os
import numpy as np
from ..__about__ import __version__
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register
from .._mesh import CellBlock, Mesh
def read(filename):
with open_file(filename, "rb") as f:
# Checking if the file is ASCII format is normally done by checking if the
# first 5 characters of the header is "solid".
# ```
# header = f.read(80).decode()
# ```
# Unfortunately, there are mesh files out there which are binary and still put
# "solid" there.
# A suggested alternative is to do as if the file is binary, read the
# num_triangles and see if it matches the file size
# (https://stackoverflow.com/a/7394842/353337).
f.read(80)
num_triangles = np.fromfile(f, count=1, dtype=np.uint32)[0]
# for each triangle, one has 3 float32 (facet normal), 9 float32 (facet), and 1
# int16 (attribute count), 50 bytes in total
is_binary = 84 + num_triangles * 50 == os.path.getsize(filename)
if is_binary:
out = _read_binary(f, num_triangles)
else:
# skip header
f.seek(0)
f.readline()
out = _read_ascii(f)
return out
# np.loadtxt is super slow
# Code adapted from <https://stackoverflow.com/a/8964779/353337>.
def iter_loadtxt(infile, skiprows=0, comments=["#"], dtype=float, usecols=None):
def iter_func():
items = None
for _ in range(skiprows):
try:
next(infile)
except StopIteration:
raise ReadError("EOF Skipped too many rows")
for line in infile:
line = line.decode().strip()
if line.startswith(comments):
continue
# remove all text
items = line.split()[-3:]
usecols_ = range(len(items)) if usecols is None else usecols
for idx in usecols_:
yield dtype(items[idx])
if items is None:
raise ReadError()
iter_loadtxt.rowlength = len(items) if usecols is None else len(usecols)
data = np.fromiter(iter_func(), dtype=dtype)
return data.reshape((-1, iter_loadtxt.rowlength))
def _read_ascii(f):
# The file has the form
# ```
# solid foo
# facet normal 0.455194 -0.187301 -0.870469
# outer loop
# vertex 266.36 234.594 14.6145
# vertex 268.582 234.968 15.6956
# vertex 267.689 232.646 15.7283
# endloop
# endfacet
# # [...] more facets [...]
# endsolid
# ```
# In the interest of speed, don't verify the format and instead just skip the text.
# TODO Pandas is MUCH faster than numpy for i/o, see
# <https://stackoverflow.com/a/18260092/353337>.
# import pandas
# data = pandas.read_csv(
# f,
# skiprows=lambda row: row == 0 or (row - 1) % 7 in [0, 1, 5, 6],
# skipfooter=1,
# usecols=(1, 2, 3),
# )
# np.loadtxt is super slow
# data = np.loadtxt(
# f,
# comments=["solid", "facet", "outer loop", "endloop", "endfacet", "endsolid"],
# usecols=(1, 2, 3),
# )
data = iter_loadtxt(
f,
comments=("solid", "outer loop", "endloop", "endfacet", "endsolid"),
# usecols=(1, 2, 3),
)
if data.shape[0] % 4 != 0:
raise ReadError()
# split off the facet normals
facet_rows = np.zeros(len(data), dtype=bool)
facet_rows[0::4] = True
facet_normals = data[facet_rows]
data = data[~facet_rows]
facets = np.split(data, data.shape[0] // 3)
points, cells = data_from_facets(facets)
return Mesh(points, cells, cell_data={"facet_normals": [facet_normals]})
def data_from_facets(facets):
# Now, all facets contain the point coordinate. Try to identify individual
# points and build the data arrays.
pts = np.concatenate(facets)
# TODO equip `unique()` with a tolerance
# Use return_index so we can use sort on `idx` such that the order is
# preserved; see <https://stackoverflow.com/a/15637512/353337>.
_, idx, inv = np.unique(pts, axis=0, return_index=True, return_inverse=True)
k = np.argsort(idx)
points = pts[idx[k]]
inv_k = np.argsort(k)
cells = [CellBlock("triangle", inv_k[inv].reshape(-1, 3))]
return points, cells
def _read_binary(f, num_triangles):
# for each triangle, one has 3 float32 (facet normal), 9 float32 (facet), and 1
# int16 (attribute count)
out = np.fromfile(
f,
count=num_triangles,
dtype=np.dtype(
[("normal", "f4", (3,)), ("facet", "f4", (3, 3)), ("attr count", "i2")]
),
)
# discard normals, attribute count
facets = out["facet"]
# if not np.all(out["attr count"] == 0):
# print(out["attr count"])
# raise ReadError("Nonzero attr count")
points, cells = data_from_facets(facets)
return Mesh(points, cells)
def write(filename, mesh, binary=False):
if "triangle" not in {block.type for block in mesh.cells}:
raise WriteError("STL can only write triangle cells. No triangle cells found.")
if len(mesh.cells) > 1:
invalid = {block.type for block in mesh.cells if block.type != "triangle"}
logging.warning(
"STL can only write triangle cells. Discarding {}.".format(
", ".join(invalid)
)
)
if mesh.points.shape[1] == 2:
logging.warning(
"STL requires 3D points, but 2D points given. "
"Appending 0 third component."
)
mesh.points = np.column_stack(
[mesh.points[:, 0], mesh.points[:, 1], np.zeros(mesh.points.shape[0])]
)
pts = mesh.points[mesh.get_cells_type("triangle")]
if "facet_normals" in mesh.cell_data:
normals = mesh.get_cell_data("facet_normals", "triangle")
else:
normals = np.cross(pts[:, 1] - pts[:, 0], pts[:, 2] - pts[:, 0])
nrm = np.sqrt(np.einsum("ij,ij->i", normals, normals))
normals = (normals.T / nrm).T
fun = _write_binary if binary else _write_ascii
fun(filename, pts, normals)
def _write_ascii(filename, pts, normals):
with open_file(filename, "w") as fh:
fh.write("solid\n")
for local_pts, normal in zip(pts, normals):
out = (
"\n".join(
[
"facet normal {} {} {}".format(*normal),
" outer loop",
" vertex {} {} {}".format(*local_pts[0]),
" vertex {} {} {}".format(*local_pts[1]),
" vertex {} {} {}".format(*local_pts[2]),
" endloop",
"endfacet",
]
)
+ "\n"
)
fh.write(out)
fh.write("endsolid\n")
def _write_binary(filename, pts, normals):
with open_file(filename, "wb") as fh:
# 80 character header data
msg = f"This file was generated by meshio v{__version__}."
msg += (79 - len(msg)) * "X"
msg += "\n"
fh.write(msg.encode())
fh.write(np.array(len(pts)).astype("<u4"))
dtype = np.dtype(
[
("normal", ("<f4", 3)),
("points", ("<f4", (3, 3))),
("attr", "<u2"),
]
)
a = np.empty(len(pts), dtype=dtype)
a["normal"] = normals
a["points"] = pts
a["attr"] = 0
a.tofile(fh)
register("stl", [".stl"], read, {"stl": write})
|
mit
|
yyjiang/scikit-learn
|
examples/missing_values.py
|
233
|
3056
|
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
|
bsd-3-clause
|
emdodds/matching-pursuit
|
signalset.py
|
1
|
3413
|
import numpy as np
import os
try:
from scipy.io import wavfile
from scipy import signal as scisig
import matplotlib.pyplot as plt
except:
# workaround for cluster python with tf but no plt
print("Can't import matplotlib or scipy.")
# adapted from scipy cookbook
lowcut = 100
highcut = 6000
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = scisig.butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = scisig.lfilter(b, a, data)
return y
class SignalSet:
def __init__(self,
sample_rate=16000,
data='../Data/speech_corpora/TIMIT/',
min_length=800,
seg_length=80000):
self.sample_rate = sample_rate
self.min_length = min_length
self.seg_length = seg_length
if isinstance(data, str):
self.load_from_folder(data)
else:
self.data = data
self.ndata = len(data)
def load_from_folder(self, folder='../Data/TIMIT/'):
min_length = self.min_length
files = os.listdir(folder)
file = None
self.data = []
for ff in files:
if ff.endswith('.wav'):
file = os.path.join(folder, ff)
rate, signal = wavfile.read(file)
if rate != self.sample_rate:
raise NotImplementedError('The signal in ' + ff +
' does not match the given' +
' sample rate.')
if signal.shape[0] > min_length:
# bandpass
signal = signal/signal.std()
signal = butter_bandpass_filter(signal, lowcut, highcut,
self.sample_rate, order=5)
self.data.append(signal)
self.ndata = len(self.data)
print("Found ", self.ndata, " files")
def rand_stim(self):
"""Get one random signal."""
which = np.random.randint(low=0, high=self.ndata)
signal = self.data[which]
excess = signal.shape[0] - self.seg_length
if excess < 0:
segment = signal
else:
where = np.random.randint(low=0, high=excess)
segment = signal[where:where+self.seg_length]
segment /= np.max(np.abs(segment)) # norm by max as in Smith & Lewicki
return segment
def write_sound(self, filename, signal):
signal /= np.max(signal)
wavfile.write(filename, self.sample_rate, signal)
def tiled_plot(self, stims):
"""Tiled plots of the given signals. Zeroth index is which signal.
Kind of slow, expect about 10s for 100 plots."""
nstim = stims.shape[0]
plotrows = int(np.sqrt(nstim))
plotcols = int(np.ceil(nstim/plotrows))
f, axes = plt.subplots(plotrows, plotcols, sharex=True, sharey=True)
for ii in range(nstim):
axes.flatten()[ii].plot(stims[ii])
f.subplots_adjust(hspace=0, wspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
plt.setp([a.get_yticklabels() for a in f.axes[:-1]], visible=False)
|
mit
|
hehongliang/tensorflow
|
tensorflow/contrib/learn/__init__.py
|
18
|
2695
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning (DEPRECATED).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@read_keyed_batch_examples
@@read_keyed_batch_examples_shared_queue
@@read_keyed_batch_features
@@read_keyed_batch_features_shared_queue
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
aabadie/scikit-learn
|
examples/svm/plot_custom_kernel.py
|
43
|
1546
|
"""
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
ZENGXH/scikit-learn
|
examples/tree/plot_tree_regression.py
|
206
|
1476
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
KSchoenleber/urbs
|
urbs/input.py
|
1
|
25369
|
import pandas as pd
import os
import glob
from xlrd import XLRDError
import pyomo.core as pyomo
from .features.modelhelper import *
from .identify import *
def read_input(input_files, year):
"""Read Excel input file and prepare URBS input dict.
Reads the Excel spreadsheets that adheres to the structure shown in
mimo-example.xlsx. Column titles in 'Demand' and 'SupIm' are split, so that
'Site.Commodity' becomes the MultiIndex column ('Site', 'Commodity').
Args:
- filename: filename to Excel spreadsheets
- year: current year for non-intertemporal problems
Returns:
a dict of up to 12 DataFrames
"""
if input_files == 'Input':
glob_input = os.path.join(input_files, '*.xlsx')
input_files = sorted(glob.glob(glob_input))
else:
input_files = [input_files]
gl = []
sit = []
com = []
pro = []
pro_com = []
tra = []
sto = []
dem = []
sup = []
bsp = []
ds = []
ef = []
for filename in input_files:
with pd.ExcelFile(filename) as xls:
global_prop = xls.parse('Global').set_index(['Property'])
# create support timeframe index
if ('Support timeframe' in
xls.parse('Global').set_index('Property').value):
support_timeframe = (
global_prop.loc['Support timeframe']['value'])
global_prop = (
global_prop.drop(['Support timeframe'])
.drop(['description'], axis=1))
else:
support_timeframe = year
global_prop = pd.concat([global_prop], keys=[support_timeframe],
names=['support_timeframe'])
gl.append(global_prop)
site = xls.parse('Site').set_index(['Name'])
site = pd.concat([site], keys=[support_timeframe],
names=['support_timeframe'])
sit.append(site)
commodity = (
xls.parse('Commodity')
.set_index(['Site', 'Commodity', 'Type']))
commodity = pd.concat([commodity], keys=[support_timeframe],
names=['support_timeframe'])
com.append(commodity)
process = xls.parse('Process').set_index(['Site', 'Process'])
process = pd.concat([process], keys=[support_timeframe],
names=['support_timeframe'])
pro.append(process)
process_commodity = (
xls.parse('Process-Commodity')
.set_index(['Process', 'Commodity', 'Direction']))
process_commodity = pd.concat([process_commodity],
keys=[support_timeframe],
names=['support_timeframe'])
pro_com.append(process_commodity)
demand = xls.parse('Demand').set_index(['t'])
demand = pd.concat([demand], keys=[support_timeframe],
names=['support_timeframe'])
# split columns by dots '.', so that 'DE.Elec' becomes
# the two-level column index ('DE', 'Elec')
demand.columns = split_columns(demand.columns, '.')
dem.append(demand)
supim = xls.parse('SupIm').set_index(['t'])
supim = pd.concat([supim], keys=[support_timeframe],
names=['support_timeframe'])
supim.columns = split_columns(supim.columns, '.')
sup.append(supim)
# collect data for the additional features
# Transmission, Storage, DSM
if 'Transmission' in xls.sheet_names:
transmission = (
xls.parse('Transmission')
.set_index(['Site In', 'Site Out',
'Transmission', 'Commodity']))
transmission = (
pd.concat([transmission], keys=[support_timeframe],
names=['support_timeframe']))
else:
transmission = pd.DataFrame()
tra.append(transmission)
if 'Storage' in xls.sheet_names:
storage = (
xls.parse('Storage')
.set_index(['Site', 'Storage', 'Commodity']))
storage = pd.concat([storage], keys=[support_timeframe],
names=['support_timeframe'])
else:
storage = pd.DataFrame()
sto.append(storage)
if 'DSM' in xls.sheet_names:
dsm = xls.parse('DSM').set_index(['Site', 'Commodity'])
dsm = pd.concat([dsm], keys=[support_timeframe],
names=['support_timeframe'])
else:
dsm = pd.DataFrame()
ds.append(dsm)
if 'Buy-Sell-Price'in xls.sheet_names:
buy_sell_price = xls.parse('Buy-Sell-Price').set_index(['t'])
buy_sell_price = pd.concat([buy_sell_price],
keys=[support_timeframe],
names=['support_timeframe'])
buy_sell_price.columns = \
split_columns(buy_sell_price.columns, '.')
else:
buy_sell_price = pd.DataFrame()
bsp.append(buy_sell_price)
if 'TimeVarEff' in xls.sheet_names:
eff_factor = (xls.parse('TimeVarEff').set_index(['t']))
eff_factor = pd.concat([eff_factor], keys=[support_timeframe],
names=['support_timeframe'])
eff_factor.columns = split_columns(eff_factor.columns, '.')
else:
eff_factor = pd.DataFrame()
ef.append(eff_factor)
# prepare input data
try:
global_prop = pd.concat(gl, sort=False)
site = pd.concat(sit, sort=False)
commodity = pd.concat(com, sort=False)
process = pd.concat(pro, sort=False)
process_commodity = pd.concat(pro_com, sort=False)
demand = pd.concat(dem, sort=False)
supim = pd.concat(sup, sort=False)
transmission = pd.concat(tra, sort=False)
storage = pd.concat(sto, sort=False)
dsm = pd.concat(ds, sort=False)
buy_sell_price = pd.concat(bsp, sort=False)
eff_factor = pd.concat(ef, sort=False)
except KeyError:
pass
data = {
'global_prop': global_prop,
'site': site,
'commodity': commodity,
'process': process,
'process_commodity': process_commodity,
'demand': demand,
'supim': supim,
'transmission': transmission,
'storage': storage,
'dsm': dsm,
'buy_sell_price': buy_sell_price.dropna(axis=1, how='all'),
'eff_factor': eff_factor.dropna(axis=1, how='all')
}
# sort nested indexes to make direct assignments work
for key in data:
if isinstance(data[key].index, pd.core.index.MultiIndex):
data[key].sort_index(inplace=True)
return data
# preparing the pyomo model
def pyomo_model_prep(data, timesteps):
'''Performs calculations on the data frames in dictionary "data" for
further usage by the model.
Args:
- data: input data dictionary
- timesteps: range of modeled timesteps
Returns:
a rudimentary pyomo.CancreteModel instance
'''
m = pyomo.ConcreteModel()
# Preparations
# ============
# Data import. Syntax to access a value within equation definitions looks
# like this:
#
# storage.loc[site, storage, commodity][attribute]
#
m.mode = identify_mode(data)
m.timesteps = timesteps
m.global_prop = data['global_prop']
commodity = data['commodity']
process = data['process']
# create no expansion dataframes
pro_const_cap = process[process['inst-cap'] == process['cap-up']]
# create list with all support timeframe values
m.stf_list = m.global_prop.index.levels[0].tolist()
# creating list wih cost types
m.cost_type_list = ['Invest', 'Fixed', 'Variable', 'Fuel', 'Environmental']
# Converting Data frames to dict
# Data frames that need to be modified will be converted after modification
m.site_dict = data['site'].to_dict()
m.demand_dict = data['demand'].to_dict()
m.supim_dict = data['supim'].to_dict()
# additional features
if m.mode['tra']:
transmission = data['transmission'].dropna(axis=0, how='all')
# create no expansion dataframes
tra_const_cap = transmission[
transmission['inst-cap'] == transmission['cap-up']]
if m.mode['sto']:
storage = data['storage'].dropna(axis=0, how='all')
# create no expansion dataframes
sto_const_cap_c = storage[storage['inst-cap-c'] == storage['cap-up-c']]
sto_const_cap_p = storage[storage['inst-cap-p'] == storage['cap-up-p']]
if m.mode['dsm']:
m.dsm_dict = data["dsm"].dropna(axis=0, how='all').to_dict()
if m.mode['bsp']:
m.buy_sell_price_dict = \
data["buy_sell_price"].dropna(axis=0, how='all').to_dict()
# adding Revenue and Purchase to cost types
m.cost_type_list.extend(['Revenue', 'Purchase'])
if m.mode['tve']:
m.eff_factor_dict = \
data["eff_factor"].dropna(axis=0, how='all').to_dict()
# Create columns of support timeframe values
commodity['support_timeframe'] = (commodity.index.
get_level_values('support_timeframe'))
process['support_timeframe'] = (process.index.
get_level_values('support_timeframe'))
if m.mode['tra']:
transmission['support_timeframe'] = (transmission.index.
get_level_values
('support_timeframe'))
if m.mode['sto']:
storage['support_timeframe'] = (storage.index.
get_level_values('support_timeframe'))
# installed units for intertemporal planning
if m.mode['int']:
m.inst_pro = process['inst-cap']
m.inst_pro = m.inst_pro[m.inst_pro > 0]
if m.mode['tra']:
m.inst_tra = transmission['inst-cap']
m.inst_tra = m.inst_tra[m.inst_tra > 0]
if m.mode['sto']:
m.inst_sto = storage['inst-cap-p']
m.inst_sto = m.inst_sto[m.inst_sto > 0]
# process input/output ratios
m.r_in_dict = (data['process_commodity'].xs('In', level='Direction')
['ratio'].to_dict())
m.r_out_dict = (data['process_commodity'].xs('Out', level='Direction')
['ratio'].to_dict())
# process areas
proc_area = data["process"]['area-per-cap']
proc_area = proc_area[proc_area >= 0]
m.proc_area_dict = proc_area.to_dict()
# input ratios for partial efficiencies
# only keep those entries whose values are
# a) positive and
# b) numeric (implicitely, as NaN or NV compare false against 0)
r_in_min_fraction = data['process_commodity'].xs('In', level='Direction')
r_in_min_fraction = r_in_min_fraction['ratio-min']
r_in_min_fraction = r_in_min_fraction[r_in_min_fraction > 0]
m.r_in_min_fraction_dict = r_in_min_fraction.to_dict()
# output ratios for partial efficiencies
# only keep those entries whose values are
# a) positive and
# b) numeric (implicitely, as NaN or NV compare false against 0)
r_out_min_fraction = data['process_commodity'].xs('Out', level='Direction')
r_out_min_fraction = r_out_min_fraction['ratio-min']
r_out_min_fraction = r_out_min_fraction[r_out_min_fraction > 0]
m.r_out_min_fraction_dict = r_out_min_fraction.to_dict()
# storages with fixed initial state
if m.mode['sto']:
stor_init_bound = storage['init']
m.stor_init_bound_dict = \
stor_init_bound[stor_init_bound >= 0].to_dict()
try:
# storages with fixed energy-to-power ratio
sto_ep_ratio = storage['ep-ratio']
m.sto_ep_ratio_dict = sto_ep_ratio[sto_ep_ratio >= 0].to_dict()
except KeyError:
m.sto_ep_ratio_dict = {}
# derive invcost factor from WACC and depreciation duration
if m.mode['int']:
# modify pro_const_cap for intertemporal mode
for index in tuple(pro_const_cap.index):
stf_process = process.xs((index[1], index[2]), level=(1, 2))
if (not stf_process['cap-up'].max(axis=0) ==
pro_const_cap.loc[index]['inst-cap']):
pro_const_cap = pro_const_cap.drop(index)
# derive invest factor from WACC, depreciation and discount untility
process['discount'] = (m.global_prop.xs('Discount rate', level=1)
.loc[m.global_prop.index.min()[0]]['value'])
process['stf_min'] = m.global_prop.index.min()[0]
process['stf_end'] = (m.global_prop.index.max()[0] +
m.global_prop.loc[
(max(commodity.index.get_level_values
('support_timeframe').unique()),
'Weight')]['value'] - 1)
process['invcost-factor'] = (process.apply(
lambda x: invcost_factor(
x['depreciation'],
x['wacc'],
x['discount'],
x['support_timeframe'],
x['stf_min']),
axis=1))
# derive overpay-factor from WACC, depreciation and discount untility
process['overpay-factor'] = (process.apply(
lambda x: overpay_factor(
x['depreciation'],
x['wacc'],
x['discount'],
x['support_timeframe'],
x['stf_min'],
x['stf_end']),
axis=1))
process.loc[(process['overpay-factor'] < 0) |
(process['overpay-factor']
.isnull()), 'overpay-factor'] = 0
# Derive multiplier for all energy based costs
commodity['stf_dist'] = (commodity['support_timeframe'].
apply(stf_dist, m=m))
commodity['discount-factor'] = (commodity['support_timeframe'].
apply(discount_factor, m=m))
commodity['eff-distance'] = (commodity['stf_dist'].
apply(effective_distance, m=m))
commodity['cost_factor'] = (commodity['discount-factor'] *
commodity['eff-distance'])
process['stf_dist'] = (process['support_timeframe'].
apply(stf_dist, m=m))
process['discount-factor'] = (process['support_timeframe'].
apply(discount_factor, m=m))
process['eff-distance'] = (process['stf_dist'].
apply(effective_distance, m=m))
process['cost_factor'] = (process['discount-factor'] *
process['eff-distance'])
# Additional features
# transmission mode
if m.mode['tra']:
# modify tra_const_cap for intertemporal mode
for index in tuple(tra_const_cap.index):
stf_transmission = transmission.xs((index[1, 2, 3, 4]),
level=(1, 2, 3, 4))
if (not stf_transmission['cap-up'].max(axis=0) ==
tra_const_cap.loc[index]['inst-cap']):
tra_const_cap = tra_const_cap.drop(index)
# derive invest factor from WACC, depreciation and
# discount untility
transmission['discount'] = (
m.global_prop.xs('Discount rate', level=1)
.loc[m.global_prop.index.min()[0]]['value'])
transmission['stf_min'] = m.global_prop.index.min()[0]
transmission['stf_end'] = (m.global_prop.index.max()[0] +
m.global_prop.loc[
(max(commodity.index.get_level_values
('support_timeframe').unique()),
'Weight')]['value'] - 1)
transmission['invcost-factor'] = (
transmission.apply(lambda x: invcost_factor(
x['depreciation'],
x['wacc'],
x['discount'],
x['support_timeframe'],
x['stf_min']),
axis=1))
# derive overpay-factor from WACC, depreciation and
# discount untility
transmission['overpay-factor'] = (
transmission.apply(lambda x: overpay_factor(
x['depreciation'],
x['wacc'],
x['discount'],
x['support_timeframe'],
x['stf_min'],
x['stf_end']),
axis=1))
# Derive multiplier for all energy based costs
transmission.loc[(transmission['overpay-factor'] < 0) |
(transmission['overpay-factor'].isnull()),
'overpay-factor'] = 0
transmission['stf_dist'] = (transmission['support_timeframe'].
apply(stf_dist, m=m))
transmission['discount-factor'] = (
transmission['support_timeframe'].apply(discount_factor, m=m))
transmission['eff-distance'] = (transmission['stf_dist'].
apply(effective_distance, m=m))
transmission['cost_factor'] = (transmission['discount-factor'] *
transmission['eff-distance'])
# storage mode
if m.mode['sto']:
# modify sto_const_cap_c and sto_const_cap_p for intertemporal mode
for index in tuple(sto_const_cap_c.index):
stf_storage = storage.xs((index[1, 2, 3]), level=(1, 2, 3))
if (not stf_storage['cap-up-c'].max(axis=0) ==
sto_const_cap_c.loc[index]['inst-cap-c']):
sto_const_cap_c = sto_const_cap_c.drop(index)
for index in tuple(sto_const_cap_p.index):
stf_storage = storage.xs((index[1, 2, 3]), level=(1, 2, 3))
if (not stf_storage['cap-up-p'].max(axis=0) ==
sto_const_cap_p.loc[index]['inst-cap-p']):
sto_const_cap_p = sto_const_cap_p.drop(index)
# derive invest factor from WACC, depreciation and
# discount untility
storage['discount'] = m.global_prop.xs('Discount rate', level=1) \
.loc[m.global_prop.index.min()[0]]['value']
storage['stf_min'] = m.global_prop.index.min()[0]
storage['stf_end'] = (m.global_prop.index.max()[0] +
m.global_prop.loc[
(max(commodity.index.get_level_values
('support_timeframe').unique()),
'Weight')]['value'] - 1)
storage['invcost-factor'] = (
storage.apply(
lambda x: invcost_factor(
x['depreciation'],
x['wacc'],
x['discount'],
x['support_timeframe'],
x['stf_min']),
axis=1))
storage['overpay-factor'] = (
storage.apply(lambda x: overpay_factor(
x['depreciation'],
x['wacc'],
x['discount'],
x['support_timeframe'],
x['stf_min'],
x['stf_end']),
axis=1))
storage.loc[(storage['overpay-factor'] < 0) |
(storage['overpay-factor'].isnull()),
'overpay-factor'] = 0
storage['stf_dist'] = (storage['support_timeframe']
.apply(stf_dist, m=m))
storage['discount-factor'] = (storage['support_timeframe']
.apply(discount_factor, m=m))
storage['eff-distance'] = (storage['stf_dist']
.apply(effective_distance, m=m))
storage['cost_factor'] = (storage['discount-factor'] *
storage['eff-distance'])
else:
# for one year problems
process['invcost-factor'] = (
process.apply(
lambda x: invcost_factor(
x['depreciation'],
x['wacc']),
axis=1))
# cost factor will be set to 1 for non intertemporal problems
commodity['cost_factor'] = 1
process['cost_factor'] = 1
# additional features
if m.mode['tra']:
transmission['invcost-factor'] = (
transmission.apply(lambda x:
invcost_factor(x['depreciation'],
x['wacc']),
axis=1))
transmission['cost_factor'] = 1
if m.mode['sto']:
storage['invcost-factor'] = (
storage.apply(lambda x:
invcost_factor(x['depreciation'],
x['wacc']),
axis=1))
storage['cost_factor'] = 1
# Converting Data frames to dictionaries
m.global_prop_dict = m.global_prop.to_dict()
m.commodity_dict = commodity.to_dict()
m.process_dict = process.to_dict()
# dictionaries for additional features
if m.mode['tra']:
m.transmission_dict = transmission.to_dict()
if m.mode['sto']:
m.storage_dict = storage.to_dict()
# update m.mode['exp'] and write dictionaries with constant capacities
m.mode['exp']['pro'] = identify_expansion(pro_const_cap['inst-cap'],
process['inst-cap'].dropna())
m.pro_const_cap_dict = pro_const_cap['inst-cap'].to_dict()
if m.mode['tra']:
m.mode['exp']['tra'] = identify_expansion(
tra_const_cap['inst-cap'],
transmission['inst-cap'].dropna())
m.tra_const_cap_dict = tra_const_cap['inst-cap'].to_dict()
if m.mode['sto']:
m.mode['exp']['sto-c'] = identify_expansion(
sto_const_cap_c['inst-cap-c'], storage['inst-cap-c'].dropna())
m.sto_const_cap_c_dict = sto_const_cap_c['inst-cap-c'].to_dict()
m.mode['exp']['sto-p'] = identify_expansion(
sto_const_cap_c['inst-cap-p'], storage['inst-cap-p'].dropna())
m.sto_const_cap_p_dict = sto_const_cap_p['inst-cap-p'].to_dict()
return m
def split_columns(columns, sep='.'):
"""Split columns by separator into MultiIndex.
Given a list of column labels containing a separator string (default: '.'),
derive a MulitIndex that is split at the separator string.
Args:
- columns: list of column labels, containing the separator string
- sep: the separator string (default: '.')
Returns:
a MultiIndex corresponding to input, with levels split at separator
Example:
>>> split_columns(['DE.Elec', 'MA.Elec', 'NO.Wind'])
MultiIndex(levels=[['DE', 'MA', 'NO'], ['Elec', 'Wind']],
labels=[[0, 1, 2], [0, 0, 1]])
"""
if len(columns) == 0:
return columns
column_tuples = [tuple(col.split('.')) for col in columns]
return pd.MultiIndex.from_tuples(column_tuples)
def get_input(prob, name):
"""Return input DataFrame of given name from urbs instance.
These are identical to the key names returned by function `read_excel`.
That means they are lower-case names and use underscores for word
separation, e.g. 'process_commodity'.
Args:
- prob: a urbs model instance
- name: an input DataFrame name ('commodity', 'process', ...)
Returns:
the corresponding input DataFrame
"""
if hasattr(prob, name):
# classic case: input data DataFrames are accessible via named
# attributes, e.g. `prob.process`.
return getattr(prob, name)
elif hasattr(prob, '_data') and name in prob._data:
# load case: input data is accessible via the input data cache dict
return prob._data[name]
else:
# unknown
raise ValueError("Unknown input DataFrame name!")
|
gpl-3.0
|
michaldz44/pyG-Attract
|
gattract.py
|
1
|
4027
|
import argparse
import json
import sys
from golem import Golem
from attractors import Attractors
import pickle
def main():
parser = argparse.ArgumentParser(description='Gravitational attractors')
parser.add_argument('--dt', type=float, default=0.0005, help='timestep')
parser.add_argument('--h', type=float, default=0.1, help='magic depth parameter - DO NOT CHANGE!!!!')
parser.add_argument('--pot_d', type=float, default=0.005, help='potential diameter')
parser.add_argument('--term_v', type=float, default=0.005, help='terminatig velocity')
parser.add_argument('--mu', type=float, default=0.7, help='friction coefficient')
parser.add_argument('--size', type=int, default=10, help='Problem size (N as it will compute NxN matrix) ')
parser.add_argument('--max_steps', type=int, default=False, help='Max steps that will occure (default run each terminated)')
parser.add_argument('positions', type=str, help='Geojson file containing positions with masses')
try:
with open('data1.pkl', 'rb') as input:
import matplotlib.pyplot as plt
(positions_x,positions_y) = pickle.load(input)
plt.plot(positions_x,positions_y)
for at in attractors.attractors:
plt.plot([at["position"].real],[at["position"].imag],'r*')
plt.show()
except:
pass
args = parser.parse_args()
with open(args.positions) as file:
js=json.loads(file.read())
attractors=Attractors(js,args)
N=args.size
# We choose points from [0,1]x[0,1] area
# WE split it into NxN regoins (future pixels)
# Each golem gets position according to pixel center which is
golems=[Golem((i%N+0.5)/N,(i//N+0.5)/N,args,attractors,i) for i in range(N*N)]
for a in attractors.attractors:
print(a["esc_energy"])
print(attractors.get_potencial(complex(10000,10000)))
golems_functions=[golem.do_move for golem in golems]
golem_runnig=golems_functions
steps=0
positions_x=[]
positions_y=[]
number_to_view=0
while any(golem_runnig):
golem_runnig=[golem_function() for golem_function in golems_functions]
no_of_golems_running=sum([1 for rg in golem_runnig if rg])
if len(golems)<1000 or (len(golems)>=1000 and steps%10==0):
print(
repr(abs(golems[number_to_view].velocity)).ljust(43),
repr(golems[number_to_view].get_energy()).ljust(20),
repr(golems[number_to_view].energy).ljust(20),
repr(no_of_golems_running).rjust(5),
# repr(golems[number_to_view].q).rjust(5),
)
positions_x.append(golems[number_to_view].position.real)
positions_y.append(golems[number_to_view].position.imag)
steps+=1
if args.max_steps and args.max_steps < steps:
break
#
for golem in golems:
golem.final_attractor=golem.attractors.min_attractor(golem.position)
try:
#try:
# with open('data.pkl', 'rb') as input:
# golems = pickle.load(input)
#except:
# pass
from PIL import Image
img = Image.new( 'RGB', (N,N), "black") # create a new black image
pixels = img.load() # create the pixel map
for i in range(N*N): # for every pixel:
color = golems[i].get_color()
if color:
pixels[i%N,i//N] = tuple(color) # set the colour accordingly
img.show()
except:
with open('data.pkl', 'wb') as output:
pickle.dump(golems, output, pickle.HIGHEST_PROTOCOL)
try:
import matplotlib.pyplot as plt
plt.plot(positions_x,positions_y)
for at in attractors.attractors:
plt.plot([at["position"].real],[at["position"].imag],'r*')
plt.show()
except:
with open('data1.pkl', 'wb') as output:
pickle.dump((positions_x,positions_y), output, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
|
gpl-2.0
|
andyraib/data-storage
|
python_scripts/env/lib/python3.6/site-packages/pandas/sparse/scipy_sparse.py
|
18
|
5516
|
"""
Interaction with scipy.sparse matrices.
Currently only includes SparseSeries.to_coo helpers.
"""
from pandas.core.index import MultiIndex, Index
from pandas.core.series import Series
from pandas.compat import OrderedDict, lmap
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError(
'Is not a partition because intersection is not null.')
if set.union(*parts) != whole:
raise ValueError('Is not a partition because union is not the whole.')
def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
""" For arbitrary (MultiIndexed) SparseSeries return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor. """
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the SparseSeries: get the labels and data for non-null entries
values = ss._data.internal_values()._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels)
for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can rejplace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
""" Return OrderedDict of unique labels to number.
Optionally sort by label.
"""
labels = Index(lmap(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(list(labels))
d = OrderedDict((k, i) for i, k in enumerate(labels))
return (d)
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
def robust_get_level_values(i):
# if index has labels (that are not None) use those,
# else use the level location
try:
return index.get_level_values(index.names[i])
except KeyError:
return index.get_level_values(i)
ilabels = list(zip(*[robust_get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels,
sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
labels_to_i.name = 'value'
return (labels_to_i)
labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels,
sort_labels=sort_labels)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ),
sort_labels=False):
""" Convert a SparseSeries to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError('to_coo requires MultiIndex with nlevels > 2')
if not ss.index.is_unique:
raise ValueError('Duplicate index entries are not allowed in to_coo '
'transformation.')
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels,
column_levels=column_levels,
sort_labels=sort_labels)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns)))
return sparse_matrix, rows, columns
def _coo_to_sparse_series(A, dense_index=False):
""" Convert a scipy.sparse.coo_matrix to a SparseSeries.
Use the defaults given in the SparseSeries constructor.
"""
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
s = s.sort_index()
s = s.to_sparse() # TODO: specify kind?
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex_axis(ind)
return s
|
apache-2.0
|
IshankGulati/scikit-learn
|
examples/model_selection/plot_learning_curve.py
|
76
|
4509
|
"""
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
|
bsd-3-clause
|
paalge/scikit-image
|
doc/source/conf.py
|
1
|
12382
|
# -*- coding: utf-8 -*-
#
# skimage documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 22 13:00:30 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import skimage
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
curpath = os.path.dirname(__file__)
sys.path.append(os.path.join(curpath, '..', 'ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'numpydoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx_gallery.gen_gallery'
]
autosummary_generate = True
#------------------------------------------------------------------------
# Sphinx-gallery configuration
#------------------------------------------------------------------------
sphinx_gallery_conf = {
'doc_module' : 'skimage',
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples',
'mod_example_dir': 'api',
'reference_url' : {
'skimage': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',}
}
# Determine if the matplotlib has a recent enough version of the
# plot_directive, otherwise use the local fork.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skimage'
copyright = '2013, the scikit-image team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../../skimage/__init__.py') as f:
setup_lines = f.readlines()
version = 'vUndefined'
for l in setup_lines:
if l.startswith('__version__'):
version = l.split("'")[1]
break
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-image'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'skimage v%s docs' % version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['searchbox.html',
'navigation.html',
'localtoc.html',
'versions.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikitimagedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'scikit-image.tex', u'The scikit-image Documentation',
u'scikit-image development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_elements = {}
latex_elements['preamble'] = r'''
\usepackage{enumitem}
\setlistdepth{100}
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_basedir = os.path.join(curpath, "plots")
plot_pre_code = """
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
import matplotlib
matplotlib.rcParams.update({
'font.size': 14,
'axes.titlesize': 12,
'axes.labelsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 10,
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
"""
plot_include_source = True
plot_formats = [('png', 100), ('pdf', 100)]
plot2rst_index_name = 'README'
plot2rst_rcparams = {'image.cmap' : 'gray',
'image.interpolation' : 'none'}
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
_python_version_str = '{0.major}.{0.minor}'.format(sys.version_info)
_python_doc_base = 'http://docs.python.org/' + _python_version_str
intersphinx_mapping = {
'python': (_python_doc_base, None),
'numpy': ('http://docs.scipy.org/doc/numpy',
(None, './_intersphinx/numpy-objects.inv')),
'scipy': ('http://docs.scipy.org/doc/scipy/reference',
(None, './_intersphinx/scipy-objects.inv')),
'sklearn': ('http://scikit-learn.org/stable',
(None, './_intersphinx/sklearn-objects.inv')),
'matplotlib': ('http://matplotlib.org/',
(None, 'http://matplotlib.org/objects.inv'))
}
# ----------------------------------------------------------------------------
# Source code links
# ----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
# Function courtesy of NumPy to return URLs containing line numbers
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(skimage.__file__))
if 'dev' in skimage.__version__:
return ("http://github.com/scikit-image/scikit-image/blob/"
"master/skimage/%s%s" % (fn, linespec))
else:
return ("http://github.com/scikit-image/scikit-image/blob/"
"v%s/skimage/%s%s" % (skimage.__version__, fn, linespec))
|
bsd-3-clause
|
ai-se/Transfer-Learning
|
src/tools/misc.py
|
2
|
1963
|
from pandas import DataFrame, read_csv, concat
from os import walk
import numpy as np
from pdb import set_trace
import sys
def say(text):
sys.stdout.write(str(text))
def shuffle(df, n=1, axis=0):
df = df.copy()
for _ in range(n):
df.apply(np.random.shuffle, axis=axis)
return df
def csv2DF(dir, as_mtx=False, toBin=False):
files=[]
for f in dir:
df=read_csv(f)
headers = [h for h in df.columns if '?' not in h]
# set_trace()
if isinstance(df[df.columns[-1]][0], str):
df[df.columns[-1]] = DataFrame([0 if 'N' in d or 'n' in d else 1 for d in df[df.columns[-1]]])
if toBin:
df[df.columns[-1]]=DataFrame([1 if d > 0 else 0 for d in df[df.columns[-1]]])
files.append(df[headers])
"For N files in a project, use 1 to N-1 as train."
data_DF = concat(files)
if as_mtx: return data_DF.as_matrix()
else: return data_DF
def explore(dir='../data.dat/Jureczko/', name=None):
datasets = []
for (dirpath, dirnames, filenames) in walk(dir):
datasets.append(dirpath)
training = []
testing = []
if name:
for k in datasets[1:]:
if name in k:
if 'Jureczko' or 'mccabe' in dir:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
# set_trace()
training = [train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store' and '.csv' in p]
testing = test
return training, testing
elif 'Seigmund' in dir:
train = [dir+name+'/'+fname[0] for dirPath, _, fname in walk(k)]
return train
else:
for k in datasets[1:]:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
training.append(
[train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store'])
testing.append(test)
return training, testing
|
unlicense
|
rsivapr/scikit-learn
|
sklearn/tests/test_lda.py
|
22
|
1521
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from .. import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]])
def test_lda_predict():
"""
LDA classification.
This checks that LDA implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = lda.LDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
def test_lda_transform():
clf = lda.LDA()
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
|
bsd-3-clause
|
bikong2/scikit-learn
|
sklearn/decomposition/dict_learning.py
|
104
|
44632
|
""" Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
# Transposing product to ensure Fortran ordering
gram = np.dot(dictionary, dictionary.T).T
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
|
bsd-3-clause
|
prernaa/NLPCourseProj
|
Supervised and Unsupervised learning/n-grams.py
|
1
|
2594
|
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import preprocessing
scaler = preprocessing.StandardScaler();
from sklearn import svm
from scipy.sparse import coo_matrix, hstack
def combineFeatures(f1,f2):
f1 = coo_matrix(f1)
f2 = coo_matrix(f2)
return hstack([f1,f2]).toarray()
inputPath = "./data/twitter-train-cleansed-B_rmnotav.tsv"
inputIdx = 3
labelIdx = 2
testPath = "./data/twitter-dev-input-B_rmnotav.tsv"
testIdx = 3
outputPath = "./data/output_alltrain.txt"
f = open(inputPath, 'r');
corpus = []
labels = []
print "getting corpus"
##def getPOSfeatures(twlst):
## return DictVectorizer()
for line in f:
lst = line.split()
twtList = lst[inputIdx:]
twt = " ".join(twtList)
corpus.append(twt)
labels.append(lst[labelIdx])
f.close
##print corpus
cv = CountVectorizer(analyzer='word', max_features=30000, ngram_range=(1,3))
cv2 = CountVectorizer(analyzer='char_wb', max_features=50000, ngram_range=(3,6))
print "extracting features from corpus"
sparsefeatureVec = cv.fit_transform(corpus).toarray()
charNgramsFeatures = cv2.fit_transform(corpus).toarray()
sparsefeatureVec = combineFeatures(sparsefeatureVec, charNgramsFeatures)
featureNamesWordNgrams = cv.get_feature_names();
featureNamesCharNgrams = cv2.get_feature_names();
print len(featureNamesWordNgrams)
print len(featureNamesCharNgrams)
print len(sparsefeatureVec[0])
##for w in cv.get_feature_names():
## print w
print "declaring svm"
clf = svm.LinearSVC(C=0.01, class_weight='auto', penalty='l1', dual=0); # linearsvc2
print "scaling features"
sparsefeatureVec = scaler.fit_transform(sparsefeatureVec, labels);
print "training svm"
clf.fit(sparsefeatureVec, labels);
# Saving Trained Classifier
from sklearn.externals import joblib
print "Saving SVM"
fileToSave = "bestclassifier.joblib.pkl";
_ = joblib.dump(clf, fileToSave, compress=9);
print "Classifier SAVED!";
ft = open(testPath, 'r');
tcorpus = []
print "getting test data"
for line in ft:
lst = line.split()
twtList = lst[testIdx:]
twt = " ".join(twtList)
tcorpus.append(twt)
ft.close
print "extracting features from test data"
testfeatureVec = cv.transform(tcorpus).toarray()
testcharNgramsFeatures = cv2.transform(tcorpus).toarray()
testfeatureVec = combineFeatures(testfeatureVec, testcharNgramsFeatures)
print "scaling test features"
testfeatureVec = scaler.transform(testfeatureVec)
print "predicting labels for test data"
predicted = clf.predict(testfeatureVec)
fw = open(outputPath, 'w');
for l in predicted:
fw.write(l)
fw.write('\n')
fw.close()
|
mit
|
NunoEdgarGub1/scikit-learn
|
examples/ensemble/plot_adaboost_hastie_10_2.py
|
355
|
3576
|
"""
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
|
bsd-3-clause
|
brunojulia/ultracoldUB
|
gpe_fft_ts_v0.py
|
1
|
8692
|
# prueba
# coding: utf-8
# ## FFT solver for 1D Gross-Pitaevski equation
# We look for the complex function $\psi(x)$ satisfying the GP equation
#
# $ i\partial_t \psi = -\frac{1}{2}(i\partial_x - \Omega)^2\psi+ V(x)\psi + g|\psi|^2\psi $,
#
# with periodic boundary conditions.
#
# Integration: pseudospectral method with split (time evolution) operator;
# that is evolving in real (R) or momentum (K) space according to the operators
# in the Hamiltonian, i.e.
# first we evaluate
#
# $\hat{\psi}(x,\frac{t}{2})=\cal{F}^{-1}\left[\exp\left(-i \frac{\hbar^2 k^2}{2} \frac{t}{2}\right)\,\psi(k,0)\right] $
#
# and later
#
# $\psi(k,t) = \exp(-i \frac{\hbar^2 k^2}{2} \frac{t}{2})\,
# \cal{F}\left[\exp\left(-i (V(x)+|\hat{\psi}(x,\frac{t}{2})|^2)\, t \right)\,\hat{\psi}(x,\frac{t}{2}) \,
# \right]$
#
# where $\cal{F}$ is the Fourier transform.
# _______________________________________________________________________________
# _______________________________________________________________________________
# Import libraries and general definitions
# -------------------------------------------------------------------------------
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft
from scipy.integrate import odeint
import numpy.linalg as lin
# comment next line to export as a python shell
#get_ipython().magic('matplotlib inline')
pi=np.pi
# Data block
# --------------------------------------------------------------------------------
# In[2]:
Zmax = 2*pi # Grid half length
Npoint = 128 # Number of grid points
Nparticle = 50 # Number of particles
a_s = 0.5 # scattering length
whoz = 1.0 # harmonic oscilator angular frequency
Omega = pi/(2*Zmax) # reference frame velocity
Ntime_fin = 10000 # total number of time steps
Ntime_out = 100 # number of time steps for intermediate outputs
Dtr = 1.0e-3 # real time step
Dti = 1.0e-3 # imaginary time step
#
# print evolution data:
#
print("Initial data:")
print(" Number of particles = %g"%(Nparticle))
print(" Harmonic oscillator angular frequency = %g"%(whoz))
print(" Domain half length = %g"%(Zmax))
print(" Number of grid points = %g"%(Npoint))
print(" Scattering length = %g"%(a_s))
print(" Total time of evolution = %g"%(Ntime_fin*Dtr))
print(" Real time step = %g"%(Dtr))
print(" Imaginary time = %g"%(Dti))
print(" Intermediate solutions = %g"%(Ntime_fin/Ntime_out-1))
# Derived quantities
# -------------------------------------------------------------------------------------
# In[3]:
NormWF = 1.0/(2*Zmax) # Wave function (WF) norm
gint = 2*a_s*Nparticle*NormWF # interaction (nonlinear-term) strength
Dz = 2*Zmax/Npoint # length step size
Dk = pi/Zmax # momentum step size
Kmax = Dk*(Npoint//2) # maximum momentum
Dt = Dtr-1j*Dti # complex time
Ninter = Ntime_fin/Ntime_out # Number of outputs with the intermediate states
# Utilities
# -------------------------------------------------------------------------------------
# In[4]:
def changeFFTposition(f,N,j): # change the order in vectors from FFT
#
# f(0...N-1) is the vector to order
# N is the vector dimension
# j is a switch indicating the change direction
# physical order is f=[(-(Zmax-Dz):Dz:-Dz) (0:Dz:Zmax) ]
# FFT order is f=[(0:Dk:kmax) (-(kmax-kz):kz:-kz)]
#
f1 = f*1
if (j==1): # from physical to FFT order
for i in range(0,N//2-1) :
f1[i] = f[N//2-1+i];
f1[N//2+1+i] = f[i];
f1[N//2-1] = f[N-2];
f1[N//2] = f[N-1];
elif (j==0): # from FFT to physical order
for i in range(0,N//2-1) :
f1[i] = f[N//2+1+i];
f1[N//2+1+i] = f[i+2];
f1[N//2-1] = f[0];
f1[N//2] = f[1];
else:
print("error in changeFFTposition(f,N,j): j must be 0 or 1...")
return f1
# Grid definitions: physical and momentum space
# ---------------------------------------------------------------------------------------
# In[5]:
z = np.arange(-Zmax+Dz,Zmax+Dz,Dz) # physical (R-space) grid points in ascending order
# zp=[(0:Dz:Zmax) (-(Zmax-Dz):Dz:-Dz)];
zp = changeFFTposition(z,Npoint,1) # (R-space) grid points with FFT order
#print("grid points (K-order): "); print(zp)
#print(" R-order: "); print(z)
#
# kp=[(0:Dk:Kmax) (-(Kmax-Dk):Dk:-Dk)]; # grid points (K-space), FFT order
kp = np.arange(-Kmax+Dk,Kmax+Dk,Dk)
kp = changeFFTposition(kp,Npoint,1)
#print("momentum values: "); print(kp)
# Define operators
# ---------------------------------------------------------------------------------------
# In[6]:
Ekin_K = 0.5*(kp**2) # Kinetic energy in K space
T_K = np.exp(-1j*0.5*Dt*Ekin_K) # time evolution operator in K space (for second order accuracy)
# print("Ekin: "); print(Ekin_K)
#
# Potential energy in R space:
# Harmonic oscillator with angular frequency whoz:
Vpot_R = 0.5*whoz*zp**2;
# print("Vpot: "); print(Vpot_R)
# Main functions
# ________________________________________________________________________________________
# In[7]:
def Energy(c): # Energy (per particle) calculation
global gint, Vpot_R, Ekin_K, Npoint
ek = sum(Ekin_K*abs(c)**2) # Kinetic energy in K
psi = ifft(c)*Npoint; # wf FFT to R
ep = sum(Vpot_R*abs(psi)**2)/Npoint; # Potential energy
ei = 0.5*gint*sum(abs(psi)**4)/Npoint; # Interaction energy
em = ek+ep+ei; # average energy
chem_pot = em+ei; # chemical potential
return em, chem_pot, ek, ep, ei
#
def T_R_psi(t,Dt,psi): # Action of the time evolution operator over state c in R space
global gint, Vpot_R
# Includes the external potential and the interaction operators:
# T_R_psi = exp(-i Dt (Vpot_R+ gint|psi|^2) ) c
# psi is the wave function in R space
# t is the time (which is not used for time independant Hamiltonians)
# Dt is the complex time step
#
return np.exp( -1j*Dt*(Vpot_R + gint*(abs(psi)**2)) )*psi # return action on psi
#
def gaussian(x,n,x0,w): # Gaussian wf in K3
fx = np.pi**0.25*np.exp(-0.5*((x-x0)/w)**2); # define the Gaussian in R3
return fft(fx)/n; # FFT to K3
#
def normaliza(c): # normalization to 1
norm = lin.norm(c)
if ((norm-1.0)>1.0e-4): # check norm
print("normalization from: ",norm)
return c/norm
# Choose initial wafe function and evolve in time
# __________________________________________________________________________________________
# In[8]:
# initial wf: Gaussian centered at x=0 and width=1
c0=normaliza(gaussian(zp,Npoint,0,1)); # wf at t=0
# evolve in time: parameters
t0=0.0
tevol=np.empty([Ninter+1]) # time vector
energy_cicle=np.empty([Ninter+1,5]) # put the energies in a matrix
energy_cicle[0,:] = Energy(c0) # Energies at t=0
print("Energies: Emed mu Ekin Epot Eint")
print(" initial = %g %g %g %g %g"%(Energy(c0)))
# print("$\psi(t=0)$: "); print(ct)
c=c0
tevol[0]=t0
j=0
t=0
for i in range(1, Ntime_fin+1): # time evolution cicle
t += Dt.real
psi=ifft(T_K*c)*Npoint
c=T_K*fft(T_R_psi(t0,Dt,psi))/Npoint
c = normaliza(c); # check norm in the wf
if(not(i%Ntime_out)):
j+=1
tevol[j] = t
energy_cicle[j,:] = Energy(c)
print(" final = %g %g %g %g %g"%(Energy(c))) # check energies
print("Energy change at last step = %g"%(energy_cicle[Ninter,0]-energy_cicle[Ninter-1,0]))
# Plot convergence during the evolution in the average energy per particle
# In[10]:
f1=plt.figure()
plt.title('Convergence',fontsize=15)
plt.xlabel('time ($t \, \\omega_{ho}$)',fontsize=15)
plt.ylabel('Energy per particle ($E/\\hbar \,\\omega_{ho}$)',fontsize=15)
#plt.axis([-Zmax,Zmax,0, 8])
plt.xticks(np.arange(0, tevol[Ninter]+1,tevol[Ninter]/5))
plt.locator_params('y',nbins=3)
plt.plot(tevol, energy_cicle[:,0], 'r-')
#plt.plot(z, psi, 'r.')
f1.show()
# Plot the final density (or wave function)
# In[11]:
cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
# plot features
f2=plt.figure()
plt.title('Final state at $t \,\\omega_{ho}=%g$'%(tevol[Ninter]),fontsize=15)
plt.xlabel('$x/a_{ho}$',fontsize=15)
#plt.ylabel('$\\psi\,(x)$',fontsize=15)
#plt.axis([-Zmax,Zmax,0, 8])
plt.xticks(np.arange(-Zmax, Zmax+1,Zmax/2))
plt.locator_params('y',nbins=3)
#plt.plot(z, psi.real, 'r.',label='real$(\psi)$')
#plt.plot(z, psi.imag, 'b--',label='imag$(\psi)$')
plt.plot(z, abs(psi)**2, 'b--',label='$|\psi|^2$') # plot density
plt.legend(fontsize=15)
f2.show()
# In[ ]:
|
gpl-3.0
|
rohit21122012/DCASE2013
|
runs/2013/dnn_layerwise/bs1024/dnn_4layer/dnn6.py
|
11
|
32027
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
#import sys
#sys.path.insert(0, '../')
from src.ui import *
from src.general import *
from src.files import *
from src.features import *
from src.dataset import *
from src.evaluation import *
import numpy
import csv
import argparse
import textwrap
from sklearn.metrics import confusion_matrix
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import timeit
from sklearn.externals import joblib
from sklearn import preprocessing as pp
from sklearn import mixture
from sklearn.svm import SVC
import skflow
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
def main(argv):
matplotlib.use('Agg')
start = timeit.default_timer()
numpy.random.seed(123456) # let's make randomization predictable
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( [email protected] )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
params = load_parameters('dnn6.yaml')
params = process_parameters(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
foot()
plot_name = params['classifier']['method']
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
#plot_name = params['classifier']['method'] + str(params['classifier']['parameters']['n_components'])
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'],
plot_name=plot_name)
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at ["+params['path']['challenge_results']+"]"
print " "
end = timeit.default_timer()
print " "
print "Total Time : " + str(end-start)
print " "
final_result['time'] = end-start
joblib.dump(final_result, 'result' + plot_name + '.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['features'] = os.path.join(params['path']['base'], params['path']['features'],
params['features']['hash'])
params['path']['feature_normalizers'] = os.path.join(params['path']['base'], params['path']['feature_normalizers'],
params['features']['hash'])
params['path']['models'] = os.path.join(params['path']['base'], params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
params['path']['results'] = os.path.join(params['path']['base'], params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True, fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds', overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='dnn6', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['dnn6']
classifier method, currently only GMM supported
(Default value='dnn6')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'dnn6':
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'dnn6':
# model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label,len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
#print tot_data['y'].shape, numpy.repeat(label,len(data[label]), axis=0).shape
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method ["+classifier_method+"]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn6':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'])
clf.save('dnn6/dnn6model1')
print model_container['models']
# Save models
save_data(current_model_file, model_container)
#clf.save(current_model_file);
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='dnn6', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['dnn6']
classifier method, currently only GMM supported
(Default value='dnn6')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'dnn6':
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True, fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'dnn6':
current_result = dataset.scene_labels[do_classification_dnn6(feature_data, model_container)]
else:
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Store the result
results.append((dataset.absolute_to_relative(item['file']), current_result))
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn6(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(10)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn6/dnn6model1');
#for label_id, label in enumerate(model_container['models']):
# logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)),0)
#print logls
classification_result_id = numpy.argmax(logls)
return classification_result_id
def plot_cm(cm, targets, title='Confusion Matrix', cmap=plt.cm.Blues, norm=True, name='Plot'):
if(norm):
cm = cm.astype(float)/cm.sum(axis=1)[:, numpy.newaxis]
fig = plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title + ' ' + name)
plt.colorbar()
tick_marks = numpy.arange(len(targets))
plt.xticks(tick_marks, targets,rotation=45)
plt.yticks(tick_marks, targets)
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
# plt.show()
fig.savefig(name + '.png')
#plt.close()
def do_system_evaluation(dataset, result_path, plot_name, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
print str(dataset.scene_label_count)
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
#print dataset.file_meta(result[0])[0]['scene_label'] + ' ' + result[1]
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
#print ' '
print tot_cm
#plot_cm(tot_cm, dataset.scene_labels,name=plot_name)
#joblib.dump(tot_cm, plot_name + '.pkl')
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm))/numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold'+str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy')+fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold-1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][label] * 100)+fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold-1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100)+fold_values
final_result['result'] = results
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
|
mit
|
vermouthmjl/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
navjeet0211/phd
|
hsa/pc1-pc2-scatter-histogram-kmean.py
|
1
|
1862
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from prody import *
from pylab import *
from scipy.cluster.vq import *
pca = loadModel('hsa_xray.pca.npz')
ensemble = loadEnsemble('hsa.ens.npz')
# x for pc1 and y for pc2
x = calcProjection(ensemble, pca[:2])[:,0]
y = calcProjection(ensemble, pca[:2])[:,1]
################## k mean data #################
m = x.reshape(-1,1)
n = y.reshape(-1,1)
mn = np.hstack((m,n))
cl = kmeans2(mn, 2)
################################################
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(1, figsize=(8,8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
#axScatter.scatter(x, y)
#### plot k mean datat ##########
axScatter.scatter(mn[:,0], mn[:,1], c=cl[1], cmap = plt.cm.jet, facecolor='none')
# now determine nice limits by hand:
binwidth = 0.1
xymax = np.max( [np.max(np.fabs(x)), np.max(np.fabs(y))] )
lim = ( int(xymax/binwidth) + 1) * binwidth
#axScatter.set_xlim( (-lim, 3.5) )
#axScatter.set_ylim( (-2.5, 1) )
axScatter.set_xlabel('PC1', fontsize=14)
axScatter.set_ylabel('PC2', fontsize=14)
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
axHistx.set_xlim( axScatter.get_xlim() )
axHisty.set_ylim( axScatter.get_ylim() )
plt.savefig("pc1-pc2-proj-kmean.png",dpi=92, papertype='letter', format='png')
plt.show()
|
gpl-2.0
|
raincoatrun/basemap
|
doc/users/figures/plotprecip.py
|
6
|
1879
|
from mpl_toolkits.basemap import Basemap, cm
# requires netcdf4-python (netcdf4-python.googlecode.com)
from netCDF4 import Dataset as NetCDFFile
import numpy as np
import matplotlib.pyplot as plt
# plot rainfall from NWS using special precipitation
# colormap used by the NWS, and included in basemap.
nc = NetCDFFile('../../../examples/nws_precip_conus_20061222.nc')
# data from http://water.weather.gov/precip/
prcpvar = nc.variables['amountofprecip']
data = 0.01*prcpvar[:]
latcorners = nc.variables['lat'][:]
loncorners = -nc.variables['lon'][:]
lon_0 = -nc.variables['true_lon'].getValue()
lat_0 = nc.variables['true_lat'].getValue()
# create figure and axes instances
fig = plt.figure(figsize=(8,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# create polar stereographic Basemap instance.
m = Basemap(projection='stere',lon_0=lon_0,lat_0=90.,lat_ts=lat_0,\
llcrnrlat=latcorners[0],urcrnrlat=latcorners[2],\
llcrnrlon=loncorners[0],urcrnrlon=loncorners[2],\
rsphere=6371200.,resolution='l',area_thresh=10000)
# draw coastlines, state and country boundaries, edge of map.
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# draw parallels.
parallels = np.arange(0.,90,10.)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
# draw meridians
meridians = np.arange(180.,360.,10.)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
ny = data.shape[0]; nx = data.shape[1]
lons, lats = m.makegrid(nx, ny) # get lat/lons of ny by nx evenly space grid.
x, y = m(lons, lats) # compute map proj coordinates.
# draw filled contours.
clevs = [0,1,2.5,5,7.5,10,15,20,30,40,50,70,100,150,200,250,300,400,500,600,750]
cs = m.contourf(x,y,data,clevs,cmap=cm.s3pcpn)
# add colorbar.
cbar = m.colorbar(cs,location='bottom',pad="5%")
cbar.set_label('mm')
# add title
plt.title(prcpvar.long_name+' for period ending '+prcpvar.dateofdata)
plt.show()
|
gpl-2.0
|
tapomayukh/projects_in_python
|
clustering/k_means_object_data_foliage.py
|
1
|
5543
|
# Idea is to collect training data without creating special environments by clustering the data into numbers of objects / object categories in an environment (given). After clustering, human can label the clusters into object categories / objects or we can use the centroids of clusters to find the category by running HMM models on them (if object type is same).
# Limitation : Data length should be same (Add missing data)
import pylab as pyl
import matplotlib.pyplot as pp
import numpy as np
import scipy as scp
import scipy.ndimage as ni
from scipy.cluster.vq import kmeans, kmeans2, vq, whiten
import pandas as pd
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/rapid_categorization/taxel_based/')
from hmm_taxel_based_foliage import HMM_Model
def check_accuracy(idxs, temp_num_fol, temp_num_trunk):
leaf = []
trunk = []
global nClusters
global leaf_idx
global trunk_idx
#print np.array(idxs)[0:temp_num_fol]
#print np.array(idxs)[temp_num_fol:temp_num_fol+temp_num_trunk]
for j in range(nClusters):
leaf.append(sum(1 for i in np.array(idxs)[0:temp_num_fol] if i==j))
trunk.append(sum(1 for i in np.array(idxs)[temp_num_fol:temp_num_fol+temp_num_trunk] if i==j))
#print leaf, trunk
accuracy = float((leaf[leaf_idx] + trunk[trunk_idx]))*100./float((sum(leaf) + sum(trunk)))
#accuracyl0t1 = float((leaf[0] + trunk[1]))*100./float((sum(leaf) + sum(trunk)))
#print accuracyl0t1
#accuracyl1t0 = float((leaf[1] + trunk[0]))*100./float((sum(leaf) + sum(trunk)))
#print accuracyl1t0
return accuracy
def test_data(num_clusters):
global hMM
global model_ff_trained
global model_tf_trained
global centroids
leaf_idx = 0
trunk_idx = 1
for i in range(num_clusters):
ts_obj = centroids[i].tolist()
path_ff_obj = hMM.test(model_ff_trained, ts_obj)
path_tf_obj = hMM.test(model_tf_trained, ts_obj)
obj = max(path_ff_obj[1],path_tf_obj[1])
if (obj == path_ff_obj[1]):
print "Cluster no. ", i, "is : Leaf-like"
leaf_idx = i
if (obj == path_tf_obj[1]):
print "Cluster no. ", i, "is : Trunk-like"
trunk_idx = i
return leaf_idx, trunk_idx
class K_Means:
def __init__(self, Fmat, data_len, nClusters, temp_num_fol, temp_num_trunk):
self.Fmat = Fmat
self.data = pd.DataFrame(Fmat)
self.data_length = data_len
self.num_clusters = nClusters
self.clustered_data = {}
self.Foliage_trials = temp_num_fol
self.Trunk_trials = temp_num_trunk
def removeforval(self, val):
if isinstance(val,list):
val = val[0]
else:
val = val
return val
def removeforcol(self, col):
col = col.apply(self.removeforval)
return col
def add_missing_data(self):
self.data = self.data.apply(self.removeforcol,axis=0)
for i in range(self.Foliage_trials + self.Trunk_trials):
self.data.iloc[i] = self.data.iloc[i].interpolate()
self.data = self.data.iloc[:,0:self.data_length]
self.Fmat = np.asarray(self.data)
def run_k_means(self):
self.add_missing_data()
self.whitened_data = whiten(self.Fmat)
# computing K-Means
self.centroids,_ = kmeans(self.Fmat,self.num_clusters, iter=10000, thresh=1e-50)
#self.centroids,self.idx = kmeans2(self.Fmat,self.num_clusters, iter=10000, thresh=1e-50, minit='random', missing='warn')
# assign each sample to a cluster
self.idx,_ = vq(self.Fmat,self.centroids)
#print self.centroids, self.idx
return self.centroids, self.idx
def plot_data(self):
# data generation
time = np.linspace(0,self.data_length/100.,self.data_length)
# plot using numpy's logical indexing
color = ['g','b']
for i in range(self.num_clusters):
pp.figure(i)
self.clustered_data[i] = self.Fmat[self.idx==i].tolist()
m,n = np.shape(np.array(self.Fmat[self.idx==i]))
for j in range(m):
pp.plot(time, self.clustered_data[i][j], color[i])
pp.plot(time, self.centroids[i].tolist(),'--m', linewidth='4')
return self.clustered_data
####################################################################################################
if __name__ == '__main__':
# Params
data_length = 50
nClusters = 2
num_features = 2
num_states = 10
if num_features == 1:
from data_variable_length_force_sample import Fmat_original, temp_num_fol, temp_num_trunk
elif num_features == 2:
from data_variable_length_force_motion_sample_scaled_wrt_all_data import Fmat_original, temp_num_fol, temp_num_trunk
# Clustering using K-Means
k_means = K_Means(Fmat_original, data_length, nClusters, temp_num_fol, temp_num_trunk)
centroids, idxs = k_means.run_k_means()
clustered_data = k_means.plot_data()
# HMM Implementation
hMM = HMM_Model(Fmat_original, temp_num_fol, temp_num_trunk, num_features, num_states)
model_ff = hMM.create_model('Foliage')
model_tf = hMM.create_model('Trunk')
# Train the models
model_ff_trained = hMM.train(model_ff, 'Foliage')
model_tf_trained = hMM.train(model_tf, 'Trunk')
# Test the model
leaf_idx, trunk_idx = test_data(nClusters)
#print leaf_idx, trunk_idx
# Check accuracy of Clustering Method
accuracy = check_accuracy(idxs, temp_num_fol, temp_num_trunk)
print accuracy
# Show plot
pp.show()
|
mit
|
rishikksh20/scikit-learn
|
examples/cluster/plot_birch_vs_minibatchkmeans.py
|
333
|
3694
|
"""
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
|
bsd-3-clause
|
dolaameng/practical_munging_tools
|
munging/session.py
|
1
|
13731
|
"""
## preconditions of data transformaions
1. centering & scaling <- unskewed log-transformation for skewed data (or outlier/invalid removal)
2. unskewed log-transformation <- missing value imputation / noninformative feature removal
3. missing value imputation <- None
4. feature l2 normalization <- centering & scaling
5. pca <- centering & scaling
6. discretization <- missing value imputation
7. zero-variance features <- None
"""
import pandas as pd
import numpy as np
from scipy import stats
import math
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_auc_score
import statsmodels.api as sm
from transform import *
from model import *
class Session(object):
def __init__(self, data, target_feature, test_frac = 0.3, copy = True, random_state = None):
self.data = data.copy() if copy else data
self.target_feature = target_feature
self.train_index, self.test_index = train_test_split(np.arange(data.shape[0]),
test_size = test_frac, random_state=random_state)
self.removed_features = np.array([])
self.params = {
"MIN_NUM_VALUES_FOR_NUMERICAL": 5
, "FRAC_OF_NA_TO_IGNORE": 0.95
, "FRAC_OF_FEAT_TO_BE_NONINFORMATIVE": 0.96
, "SKEWNESS_THR": 20
, "REDUNDANT_FEAT_CORR_THR": 0.95
}
def set_parameters(self, **params):
self.params.update(params)
def get_parameters(self):
return self.params
def get_data(self, selected_features = None):
if selected_features is None:
selected_features = self.get_all_input_features()
selected_features = np.append(selected_features, self.target_feature)
train_data = self.data.iloc[self.train_index, :].loc[:, selected_features]
test_data = self.data.iloc[self.test_index, :].loc[:, selected_features]
return (train_data, test_data)
def get_transform_combiners(self, transformers):
combiner = TransformPipeline(transformers)
return combiner
########################## Feature Filtering ##########################
def is_numerical_feature(self, feature_name):
ftype = self.data[feature_name].dtype
if ftype in np.array([np.double, np.float]):
return True
elif ftype in np.array([np.int]):
return len(self.data[feature_name].unique()) >= self.params["MIN_NUM_VALUES_FOR_NUMERICAL"]
else:
return False
def is_categorical_feature(self, feature_name):
ftype = self.data[feature_name].dtype
if ftype in np.array([np.bool, np.object]):
return True
elif ftype in np.array([np.int]):
return len(self.data[feature_name].unique()) < self.params["MIN_NUM_VALUES_FOR_NUMERICAL"]
else:
return False
def is_na_feature(self, feature_name):
return np.any(pd.isnull(self.data[feature_name]))
def is_na_heavy(self, feature_name):
return np.mean(pd.isnull(self.data[feature_name])) >= self.params["FRAC_OF_NA_TO_IGNORE"]
def is_skewed_numerical_feature(self, feature_name):
if not self.is_numerical_feature(feature_name):
return False
skewness, pvalue = stats.skewtest(self.data[feature_name].dropna())
if skewness >= self.params["SKEWNESS_THR"] and pvalue <= 0.01:
return True
else:
return False
def is_noninformative_feature(self, feature_name):
value_counts = pd.value_counts(self.data[feature_name], dropna = False)
if len(value_counts) == 1:
return True
elif value_counts.max()*1./self.data.shape[0] >= self.params["FRAC_OF_FEAT_TO_BE_NONINFORMATIVE"]:
return True
return False
def is_numerized_from_categorical_feature(self, feature_name):
return feature_name.endswith("_NUMERIZED")
def get_features_of(self, criterion = None):
return np.asarray([f for f in self.get_all_input_features()
if criterion(f)])
def get_all_input_features(self):
return np.asarray([f for f in self.data.columns
if f not in self.removed_features
if f != self.target_feature])
def find_redundant_features(self, feature_names = None):
if feature_names is None:
feature_names = self.get_features_of(self.is_numerical_feature)
corrmat = self.data.loc[:, feature_names].dropna().corr().abs()
corrmat = corrmat.fillna(value = 0)
for i in xrange(corrmat.shape[0]):
corrmat.iloc[i, i] = 0
corrmean = corrmat.mean(axis = 0)
redundant_feats = []
while True:
try:
corr_max = np.asarray(corrmat).max()
if corr_max <= self.params["REDUNDANT_FEAT_CORR_THR"]:
break
f1, f2 = corrmat.columns[list(zip(*np.where(corrmat == corr_max))[0])]
f = f1 if corrmean[f1] > corrmean[f2] else f2
redundant_feats.append(f)
corrmat.loc[:, f] = 0
corrmat.loc[f, :] = 0
except:
print corr_max
print corrmat.columns[list(zip(*np.where(corrmat == corr_max))[0])]
break
return redundant_feats
########################## Feature Transformation ##########################
def remove_features(self, feature_names):
self.removed_features = np.unique(np.hstack([self.removed_features, feature_names]))
remover = FeatureRemover(feature_names)
return remover
def impute_features(self, feature_names = None, auto_remove = True):
if feature_names is None:
feature_names = self.get_features_of(self.is_na_feature)
feature_types = ['categorical' if self.is_categorical_feature(f) else 'numerical'
for f in feature_names]
feature_imputer = FeatureImputer(dict(zip(feature_names, feature_types)))
feature_imputer.fit(self.data.iloc[self.train_index, :])
self.data = feature_imputer.transform(self.data)
if auto_remove:
remover = self.remove_features(feature_names)
return TransformPipeline([feature_imputer, remover])
else:
return feature_imputer
def evenize_skew_features(self, feature_names = None, auto_remove = False):
if feature_names is None:
feature_names = self.get_features_of(self.is_skewed_numerical_feature)
feature_transforms = ['log' if self.data[f].min() > 0
else 'log_plus1' if self.data[f].min() >= 0
else 'signed_log'
for f in feature_names]
feature_evenizer = NumericalFeatureEvenizer(dict(zip(feature_names, feature_transforms)))
feature_evenizer.fit(self.data.iloc[self.train_index, :])
self.data = feature_evenizer.transform(self.data)
if auto_remove:
remover = self.remove_features(feature_names)
return TransformPipeline([feature_evenizer, remover])
else:
return feature_evenizer
def whiten_features(self, feature_names = None, auto_remove = False):
if feature_names is None:
feature_names = self.get_features_of(self.is_numerical_feature)
feature_whitener = NumericalFeatureWhitener(feature_names)
feature_whitener.fit(self.data.iloc[self.train_index, :])
self.data = feature_whitener.transform(self.data)
if auto_remove:
remover = self.remove_features(feature_names)
return TransformPipeline([feature_whitener, remover])
else:
return feature_whitener
def minmax_scale_features(self, feature_names = None, auto_remove = False):
if feature_names is None:
feature_names = self.get_features_of(self.is_numerical_feature)
feature_scaler = NumericalFeatureMinMaxScaler(feature_names)
feature_scaler.fit(self.data.iloc[self.train_index, :])
self.data = feature_scaler.transform(self.data)
if auto_remove:
remover = self.remove_features(feature_names)
return TransformPipeline([feature_scaler, remover])
else:
return feature_scaler
def numerize_categorical_features(self, feature_names = None, auto_remove = False):
if not self.is_categorical_feature(self.target_feature):
raise ValueError("this method is for classifiation problem")
if feature_names is None:
feature_names = self.get_features_of(self.is_categorical_feature)
numerizer = CategoricalFeatureNumerizer(feature_names, self.target_feature)
numerizer.fit(self.data.iloc[self.train_index, :])
self.data = numerizer.transform(self.data)
if auto_remove:
remover = self.remove_features(feature_names)
return TransformPipeline([numerizer, remover])
else:
return numerizer
########################## Feature Selection ##########################
def rank_features(self, feature_names, by, *args, **kwargs):
train_scores, test_scores = zip(*[by(feature_name = f, *args, **kwargs) for f in feature_names])
return sorted(zip(feature_names, test_scores), key=lambda (f,s): s, reverse=True)
def numerized_feature_auc_metric(self, feature_name, target_value):
train_data = self.data.iloc[self.train_index, :][feature_name]
train_target = self.data.iloc[self.train_index, :][self.target_feature] == target_value
test_data = self.data.iloc[self.test_index, :][feature_name]
test_target = self.data.iloc[self.test_index, :][self.target_feature] == target_value
train_score = roc_auc_score(train_target, train_data)
test_score = roc_auc_score(test_target, test_data)
return (train_score, test_score)
def numerized_feature_logloss_metric(self, feature_name, target_value):
train_data = self.data.iloc[self.train_index, :][feature_name]
train_target = self.data.iloc[self.train_index, :][self.target_feature] == target_value
test_data = self.data.iloc[self.test_index, :][feature_name]
test_target = self.data.iloc[self.test_index, :][self.target_feature] == target_value
train_score = -np.mean(np.log(np.where(train_target==target_value, train_data, 1-train_data)))
test_score = -np.mean(np.log(np.where(test_target==target_value, test_data, 1-test_data)))
return (train_score, test_score)
########################## Data Exploration ##########################
def print_categorial_crosstable(self, feature_names = None, targets = None):
if feature_names is None:
feature_names = self.get_features_of(self.is_categorical_feature)
targets = targets or [self.target_feature]
value_tables = []
for prefix, index in zip(["train_", "test_", "overall_"],
[self.train_index, self.test_index, None]):
df = self.data.iloc[index, :] if index is not None else self.data
value_table = pd.crosstab(columns = [df[t] for t in targets],
index = [df[f] for f in feature_names],
margins=True, dropna = False)
value_table = value_table.divide(value_table.All, axis = 'index', ).iloc[:, :-2]
value_table = value_table.replace([-np.inf, np.inf], np.nan).dropna()
value_table = value_table.rename(columns = {f: prefix+str(f) for f in value_table.columns})
value_tables.append(value_table)
result = pd.concat(value_tables, axis = 1, join = 'outer')
result = result.sort(columns=result.columns[0], ascending=False)
return result
def plot_feature_pair(self, xname, yname, ax = None, legend = True, figsize = None, *args, **kwargs):
"""
Plot the 'scatter plot' of a pair of two features based on the types of features,
e.g.,
1. numberical vs numbercial - scatter plot with lowess
2. numericla vs categorical - density plot grouped by categorical vars
3. categorical vs categorical - stacked barchart (hexbin or confusion matrix plot)
This will help spot useful features that are both common and have extreme patterns (for classification)
df: DataFrame
xname: name of feature x (usually an input feature of interest)
yname: name of feature y (usually the output feature )
args, kwargs: plotting parameters
"""
df = self.data.loc[:, [xname, yname]].dropna()
if ax is None:
fig, ax = plt.subplots(1, 1, figsize = figsize)
x_dtype = "numerical" if self.is_numerical_feature(xname) else "categorical"
y_dtype = "numerical" if self.is_numerical_feature(yname) else "categorical"
x, y = df[xname], df[yname]
if x_dtype is "numerical" and y_dtype is "numerical":
ax.scatter(x, y, color = "blue", s = 10, marker = ".", *args, **kwargs)
lowessy = sm.nonparametric.lowess(y, x, return_sorted = False)
ax.plot(sorted(x), sorted(lowessy), "r-", label="lowess", alpha = 1)
ax.set_xlabel("%s(%s)" % (xname, x_dtype))
ax.set_ylabel("%s(%s)" % (yname, y_dtype))
elif x_dtype is "numerical" and y_dtype is "categorical":
for value, subdf in df.groupby(by = yname):
if subdf.shape[0] > 1:
subdf[xname].plot(kind = "density", label = value, ax = ax)
ax.set_xlabel("%s|%s" % (xname, yname))
elif x_dtype is "categorical" and y_dtype is "numerical":
for value, subdf in df.groupby(by = xname):
if subdf.shape[0] > 1:
subdf[yname].plot(kind = "density", label = value, ax = ax)
ax.set_xlabel("%s|%s" % (yname, xname))
else: # categorical and categorical
pd.crosstab(df[xname], df[yname], margins = False).plot(kind = 'barh', stacked = True, ax = ax)
ax.set_xlabel("dist. of %s" % yname)
if legend:
ax.legend(loc = "best")
return self
def plot_numerical_feature_density(self, feature_names=None):
if feature_names is None:
feature_names = [f for f in self.get_features_of(self.is_numerical_feature)
if f not in self.get_features_of(self.is_numerized_from_categorical_feature)]
nfeats = len(feature_names)
nrows, ncols = int(math.ceil(nfeats / 4)), 4
fig, axes = plt.subplots(nrows = nrows, ncols = ncols, figsize = (4 * ncols, 4 * nrows))
axes = axes.ravel()
for f, ax in zip(feature_names, axes):
try:
self.plot_feature_pair(xname = f, yname = self.target_feature, ax = ax, legend=False)
except:
pass
return self
########################## Model Fitting ##################################
def blend_biclass_models(self, models, blender,
score_function = None,
feature_names = None, target_value_index = 1, n_folds = 5):
"""
Idea credited to https://github.com/emanuele/kaggle_pbr/blob/master/blend.py
"""
if feature_names is None:
feature_names = self.get_all_input_features()
blender = BiClassModelBlender(feature_names, self.target_feature, models, blender,
target_value_index, n_folds)
blender.fit(self.data.iloc[self.train_index, :])
return blender
|
bsd-3-clause
|
anderspitman/scikit-bio
|
skbio/stats/ordination/__init__.py
|
1
|
3853
|
r"""
Ordination methods (:mod:`skbio.stats.ordination`)
==================================================
.. currentmodule:: skbio.stats.ordination
This module contains several ordination methods, including Principal
Coordinate Analysis, Correspondence Analysis, Redundancy Analysis and
Canonical Correspondence Analysis.
Functions
---------
.. autosummary::
:toctree: generated/
ca
pcoa
cca
rda
mean_and_std
corr
scale
svd_rank
Examples
--------
This is an artificial dataset (table 11.3 in [1]_) that represents fish
abundance in different sites (`Y`, the response variables) and
environmental variables (`X`, the explanatory variables).
>>> import numpy as np
>>> import pandas as pd
First we need to construct our explanatory variable dataset `X`.
>>> X = np.array([[1.0, 0.0, 1.0, 0.0],
... [2.0, 0.0, 1.0, 0.0],
... [3.0, 0.0, 1.0, 0.0],
... [4.0, 0.0, 0.0, 1.0],
... [5.0, 1.0, 0.0, 0.0],
... [6.0, 0.0, 0.0, 1.0],
... [7.0, 1.0, 0.0, 0.0],
... [8.0, 0.0, 0.0, 1.0],
... [9.0, 1.0, 0.0, 0.0],
... [10.0, 0.0, 0.0, 1.0]])
>>> transects = ['depth', 'substrate_coral', 'substrate_sand',
... 'substrate_other']
>>> sites = ['site1', 'site2', 'site3', 'site4', 'site5', 'site6', 'site7',
... 'site8', 'site9', 'site10']
>>> X = pd.DataFrame(X, sites, transects)
Then we need to create a dataframe with the information about the species
observed at different sites.
>>> species = ['specie1', 'specie2', 'specie3', 'specie4', 'specie5',
... 'specie6', 'specie7', 'specie8', 'specie9']
>>> Y = np.array([[1, 0, 0, 0, 0, 0, 2, 4, 4],
... [0, 0, 0, 0, 0, 0, 5, 6, 1],
... [0, 1, 0, 0, 0, 0, 0, 2, 3],
... [11, 4, 0, 0, 8, 1, 6, 2, 0],
... [11, 5, 17, 7, 0, 0, 6, 6, 2],
... [9, 6, 0, 0, 6, 2, 10, 1, 4],
... [9, 7, 13, 10, 0, 0, 4, 5, 4],
... [7, 8, 0, 0, 4, 3, 6, 6, 4],
... [7, 9, 10, 13, 0, 0, 6, 2, 0],
... [5, 10, 0, 0, 2, 4, 0, 1, 3]])
>>> Y = pd.DataFrame(Y, sites, species)
We can now perform canonical correspondence analysis. Matrix `X` contains a
continuous variable (depth) and a categorical one (substrate type) encoded
using a one-hot encoding.
>>> from skbio.stats.ordination import cca
We explicitly need to avoid perfect collinearity, so we'll drop one of the
substrate types (the last column of `X`).
>>> del X['substrate_other']
>>> ordination_result = cca(Y, X, scaling=2)
Exploring the results we see that the first three axes explain about
80% of all the variance.
>>> ordination_result.proportion_explained
CCA1 0.466911
CCA2 0.238327
CCA3 0.100548
CCA4 0.104937
CCA5 0.044805
CCA6 0.029747
CCA7 0.012631
CCA8 0.001562
CCA9 0.000532
dtype: float64
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical Ecology. Elsevier,
Amsterdam.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
from ._redundancy_analysis import rda
from ._correspondence_analysis import ca
from ._canonical_correspondence_analysis import cca
from ._principal_coordinate_analysis import pcoa
from ._utils import (mean_and_std, scale, svd_rank, corr, e_matrix, f_matrix)
__all__ = ['ca', 'rda', 'cca', 'pcoa',
'mean_and_std', 'scale', 'svd_rank', 'corr',
'e_matrix', 'f_matrix']
test = TestRunner(__file__).test
|
bsd-3-clause
|
timsnyder/bokeh
|
bokeh/util/tests/test_serialization.py
|
2
|
16426
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import base64
import datetime
import os
# External imports
import numpy as np
import pytz
# Bokeh imports
# Module under test
import bokeh.util.serialization as bus
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_make_id(object):
def test_default(self):
bus._simple_id = 999
assert bus.make_id() == "1000"
assert bus.make_id() == "1001"
assert bus.make_id() == "1002"
def test_simple_ids_yes(self):
bus._simple_id = 999
os.environ["BOKEH_SIMPLE_IDS"] = "yes"
assert bus.make_id() == "1000"
assert bus.make_id() == "1001"
assert bus.make_id() == "1002"
def test_simple_ids_no(self):
os.environ["BOKEH_SIMPLE_IDS"] = "no"
assert len(bus.make_id()) == 36
assert isinstance(bus.make_id(), str)
del os.environ["BOKEH_SIMPLE_IDS"]
class Test_make_globally_unique_id(object):
def test_basic(self):
assert len(bus.make_globally_unique_id()) == 36
assert isinstance(bus.make_globally_unique_id(), str)
def test_np_consts():
assert bus.NP_EPOCH == np.datetime64(0, 'ms')
assert bus.NP_MS_DELTA == np.timedelta64(1, 'ms')
def test_binary_array_types():
assert len(bus.BINARY_ARRAY_TYPES) == 8
for typ in [np.dtype(np.float32),
np.dtype(np.float64),
np.dtype(np.uint8),
np.dtype(np.int8),
np.dtype(np.uint16),
np.dtype(np.int16),
np.dtype(np.uint32),
np.dtype(np.int32)]:
assert typ in bus.BINARY_ARRAY_TYPES
def test_datetime_types(pd):
if pd is None:
assert len(bus.DATETIME_TYPES) == 4
else:
assert len(bus.DATETIME_TYPES) == 8
def test_is_timedelta_type_non_pandas_types():
assert bus.is_timedelta_type(datetime.timedelta(3000))
assert bus.is_timedelta_type(np.timedelta64(3000, 'ms'))
def test_is_timedelta_type_pandas_types(pd):
assert bus.is_timedelta_type(pd.Timedelta("3000ms"))
def test_convert_timedelta_type_non_pandas_types():
assert bus.convert_timedelta_type(datetime.timedelta(3000)) == 259200000000.0
assert bus.convert_timedelta_type(np.timedelta64(3000, 'ms')) == 3000.
def test_convert_timedelta_type_pandas_types(pd):
assert bus.convert_timedelta_type(pd.Timedelta("3000ms")) == 3000.0
def test_is_datetime_type_non_pandas_types():
assert bus.is_datetime_type(datetime.datetime(2016, 5, 11))
assert bus.is_datetime_type(datetime.date(2016, 5, 11))
assert bus.is_datetime_type(datetime.time(3, 54))
assert bus.is_datetime_type(np.datetime64("2011-05-11"))
def test_is_datetime_type_pandas_types(pd):
assert bus.is_datetime_type(bus._pd_timestamp(3000000))
assert bus.is_datetime_type(pd.Period('1900', 'A-DEC'))
assert bus.is_datetime_type(pd.NaT)
def test_convert_datetime_type_non_pandas_types():
assert bus.convert_datetime_type(datetime.datetime(2018, 1, 3, 15, 37, 59, 922452)) == 1514993879922.452
assert bus.convert_datetime_type(datetime.datetime(2018, 1, 3, 15, 37, 59)) == 1514993879000.0
assert bus.convert_datetime_type(datetime.datetime(2016, 5, 11)) == 1462924800000.0
assert bus.convert_datetime_type(datetime.date(2016, 5, 11)) == 1462924800000.0
assert bus.convert_datetime_type(datetime.time(3, 54)) == 14040000.0
assert bus.convert_datetime_type(np.datetime64("2016-05-11")) == 1462924800000.0
def test_convert_datetime_type_pandas_types(pd):
assert bus.convert_datetime_type(bus._pd_timestamp(3000000)) == 3.0
assert bus.convert_datetime_type(pd.Period('1900', 'A-DEC')) == -2208988800000.0
assert bus.convert_datetime_type(pd.Period('1900', 'A-DEC')) == bus.convert_datetime_type(np.datetime64("1900-01-01"))
assert np.isnan(bus.convert_datetime_type(pd.NaT))
@pytest.mark.parametrize('obj', [[1,2], (1,2), dict(), set(), 10.2, "foo"])
@pytest.mark.unit
def test_convert_datetime_type_array_ignores_non_array(obj):
assert bus.convert_datetime_array(obj) is obj
def test_convert_datetime_type_array_ignores_non_datetime_array():
a = np.arange(0,10,100)
assert bus.convert_datetime_array(a) is a
def test_convert_datetime_type_array():
a = np.array(['2018-01-03T15:37:59', '2018-01-03T15:37:59.922452', '2016-05-11'], dtype='datetime64')
r = bus.convert_datetime_array(a)
assert r[0] == 1514993879000.0
assert r[1] == 1514993879922.452
assert r[2] == 1462924800000.0
assert r.dtype == 'float64'
def test_convert_datetime_type_with_tz():
# This ensures datetimes are sent to BokehJS timezone-naive
# see https://github.com/bokeh/bokeh/issues/6480
for tz in pytz.all_timezones:
assert bus.convert_datetime_type(datetime.datetime(2016, 5, 11, tzinfo=datetime.tzinfo(tz))) == 1462924800000.0
testing = [[float('nan'), 3], [float('-inf'), [float('inf')]]]
expected = [['NaN', 3.0], ['-Infinity', ['Infinity']]]
def test_traverse_return_valid_json():
assert bus.traverse_data(testing) == expected
def test_traverse_with_numpy():
assert bus.traverse_data(testing, True) == expected
def test_traverse_without_numpy():
assert bus.traverse_data(testing, False) == expected
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.unit
def test_transform_array_force_list_default(dt):
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a)
assert isinstance(out, dict)
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.unit
def test_transform_array_force_list_default_with_buffers(dt):
a = np.empty(shape=10, dtype=dt)
bufs = []
out = bus.transform_array(a, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert bufs[0][1] == a.tobytes()
assert 'shape' in out
assert out['shape'] == a.shape
assert 'dtype' in out
assert out['dtype'] == a.dtype.name
assert '__buffer__' in out
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.unit
def test_transform_array_force_list_true(dt):
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a, force_list=True)
assert isinstance(out, list)
def test_transform_series_force_list_default(pd):
# default int seems to be int64, can't be encoded!
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, list)
assert out == [1, 3, 5, 6, 8]
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df)
assert isinstance(out, dict)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, dict)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df)
assert isinstance(out, dict)
def test_transform_series_force_list_default_with_buffers(pd):
# default int seems to be int64, can't be converted to buffer!
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, list)
assert out == [1, 3, 5, 6, 8]
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
df = pd.Series([1.0, 3, 5, 6, 8])
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
# PeriodIndex
df = pd.period_range('1900-01-01','2000-01-01', freq='A')
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == bus.convert_datetime_array(df.to_timestamp().values).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == 'float64'
assert '__buffer__' in out
# DatetimeIndex
df = pd.period_range('1900-01-01','2000-01-01', freq='A').to_timestamp()
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == bus.convert_datetime_array(df.values).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == 'float64'
assert '__buffer__' in out
# TimeDeltaIndex
df = pd.to_timedelta(np.arange(5), unit='s')
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == bus.convert_datetime_array(df.values).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == 'float64'
assert '__buffer__' in out
def test_transform_series_force_list_true(pd):
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.unit
def test_transform_array_to_list(dt):
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array_to_list(a)
assert isinstance(out, list)
@pytest.mark.parametrize('values', [(['cat', 'dog']), ([1.2, 'apple'])])
@pytest.mark.unit
def test_transform_array_with_nans_to_list(pd, values):
s = pd.Series([np.nan, values[0], values[1]])
out = bus.transform_array_to_list(s)
assert isinstance(out, list)
assert out == ['NaN', values[0], values[1]]
def test_array_encoding_disabled_by_dtype():
assert len(bus.BINARY_ARRAY_TYPES) > 0
dt_ok = bus.BINARY_ARRAY_TYPES
dt_bad = set(np.dtype(x) for x in set(np.typeDict.values()) - set([np.void])) - dt_ok
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
assert not bus.array_encoding_disabled(a)
for dt in dt_bad:
a = np.empty(shape=10, dtype=dt)
assert bus.array_encoding_disabled(a)
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
@pytest.mark.unit
def test_encode_base64_dict(dt, shape):
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
assert 'shape' in d
assert d['shape'] == a.shape
assert 'dtype' in d
assert d['dtype'] == a.dtype.name
assert '__ndarray__' in d
b64 = base64.b64decode(d['__ndarray__'])
aa = np.frombuffer(b64, dtype=d['dtype'])
assert np.array_equal(a, aa)
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
@pytest.mark.unit
def test_decode_base64_dict(dt, shape):
a = np.arange(12, dtype=dt)
a.reshape(shape)
data = base64.b64encode(a).decode('utf-8')
d = {
'__ndarray__' : data,
'dtype' : a.dtype.name,
'shape' : a.shape
}
aa = bus.decode_base64_dict(d)
assert aa.shape == a.shape
assert aa.dtype.name == a.dtype.name
assert np.array_equal(a, aa)
assert aa.flags['WRITEABLE']
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
@pytest.mark.unit
def test_encode_decode_roundtrip(dt, shape):
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
aa = bus.decode_base64_dict(d)
assert np.array_equal(a, aa)
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
@pytest.mark.unit
def test_encode_binary_dict(dt, shape):
a = np.arange(12, dtype=dt)
a.reshape(shape)
bufs = []
d = bus.encode_binary_dict(a, buffers=bufs)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert bufs[0][1] == a.tobytes()
assert 'shape' in d
assert d['shape'] == a.shape
assert 'dtype' in d
assert d['dtype'] == a.dtype.name
assert '__buffer__' in d
@pytest.mark.parametrize('cols', [None, [], ['a'], ['a', 'b'], ['a', 'b', 'c']])
@pytest.mark.parametrize('dt1', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('dt2', [np.float32, np.float64, np.int64])
@pytest.mark.unit
def test_transform_column_source_data_with_buffers(pd, cols, dt1, dt2):
d = dict(a=[1,2,3], b=np.array([4,5,6], dtype=dt1), c=pd.Series([7,8,9], dtype=dt2))
bufs = []
out = bus.transform_column_source_data(d, buffers=bufs, cols=cols)
assert set(out) == (set(d) if cols is None else set(cols))
if 'a' in out:
assert out['a'] == [1,2,3]
for x in ['b', 'c']:
dt = d[x].dtype
if x in out:
if dt in bus.BINARY_ARRAY_TYPES:
assert isinstance(out[x], dict)
assert 'shape' in out[x]
assert out[x]['shape'] == d[x].shape
assert 'dtype' in out[x]
assert out[x]['dtype'] == d[x].dtype.name
assert '__buffer__' in out[x]
else:
assert isinstance(out[x], list)
assert out[x] == list(d[x])
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bsd-3-clause
|
rvraghav93/scikit-learn
|
examples/plot_isotonic_regression.py
|
33
|
1767
|
"""
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# #############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
# #############################################################################
# Plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
|
bsd-3-clause
|
rebeccaroisin/nxrepair
|
nxrepair/nxrepair.py
|
1
|
27734
|
#!/usr/bin/env python
# Copyright (c) 2014, Illumina
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pysam
import math
import collections
import os
import sys
import numpy as np
from intervalNode import IntervalNode
def update_progress(progress):
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if progress >= 1:
progress = 1
status = "Done...\r\n"
progress = round(progress,1)
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
def normpdf(xl, mu=0, sigma=1):
l = len(xl)
yl = np.zeros(l)
for i in range(0,l):
u = float((xl[i]-mu) / abs(sigma))
yl[i] = math.exp(-u*u/2) / (math.sqrt(2*math.pi) * abs(sigma))
return yl
def meansd(frq):
"""
Function to calculate mean and standard deviation from a dictionary of frequencies.
Return a tuple of (mean, std).
Arguments:
frq: a dictionary of frequencies: key = insert size, value = frequency
"""
keys = frq.keys()
keys.sort()
w = np.empty(len(keys),np.float)
for i,k in enumerate(keys):
w[i] = frq[k]
x = np.abs(keys)
xbar = np.average(x,weights=w)
xsd = np.average(np.sqrt(np.power((x - xbar),2)),weights=w)
return (xbar,xsd)
def MAD(frq):
"""
Function to calculate median and median absolute deviation (MAD) from a dictionary of frequencies.
Return a tuple of (mean, MAD).
Arguments:
frq: a dictionary of frequencies: key = insert size, value = frequency
"""
all_lengths = []
for k, v in frq.iteritems():
new_vals = [k] * int(v)
all_lengths.extend(new_vals)
all_lengths = np.array(sorted(all_lengths))
mid = len(all_lengths)/2
median = all_lengths[mid]
residuals = sorted(abs(all_lengths - median)) # difference between val and median
MAD = residuals[mid] # median of residuals
#print MAD
return median, MAD
def find_intersections(tree, s, e):
"""
Function to find inserts that bridge a region of a contig.
Arguments:
tree: interval tree of start/end positions of mate pair inserts
s: start position of interval on contig
e: end position of interval on contig
Return a list of nodes from tree whose start and end positions span the interval given by s and e.
"""
# find all reads that bridge a gap
intersections = []
tree.intersect(s, e, lambda x: intersections.append(x)) # see interval node for implementation
return intersections
def get_insertlengths(reads):
"""
Function to calculate interval sizes of a set of mate pairs.
Arguments:
reads: a list of mate pairs as interal tree node objects
Return two numpy arrays: an array of insert sizes (integers) and an array of strand alignments (boolean)
"""
distances = []
strands = []
for read in reads:
distances.append(read.end - read.start) # insert length
strands.append(read.other[1]) # boolean: correct alignment
return np.array(distances), np.array(strands)
def probability_of_readlength(read_length, mu, sigma, pi1, L):
"""
Function to calculate the probability that mate pair insert sizes are not anomalous.
Return an array of probabilites.
Arguments:
read_length: a numpy array of insert sizes
mu: mean insert size (float)
sigma: insert size standard deviation (float)
pi1: prior probability of being anomalous (float)
L: length of contig to which the reads in read_length are aligned
"""
p_0 = pi1 * (1 / float(L)) # anomaly
# probability of drawing from a gaussian with mean mu and std sigma
p_1 = (1 - pi1) * normpdf(read_length,mu,sigma)
p_total = p_1 / (p_0 + p_1)
return p_total
class aligned_assembly:
"""
Class to hold a set of mate pair or paired end reads aligned to the scaffolded genome assembly
"""
def __init__(self, bamfile, fastafile, min_size, threshold, step, window, minmapq, maxinsert, fraction, prior):
"""
Initialiser function for the aligned assembly class.
Arguments:
bamfile: a sorted bam file of reads aligned to the scaffolded assembly
fastafile: the scaffolded assembly in fasta format
min_size: the minimum size contig to consider for analysis (integer)
threshold: threshold in Z score below which a misassembly is called (float)
step: step size to walk contigs (integer)
window: width of window around each position from which mate pair insert sizes are fetched (integer)
minmapq: the minimum mapq value for which paired reads are evaluated (float)
maxinsert: the maximum insert size for which genome population statistics are calculated
fraction: minimum fraction of read pairs with correct orientation to call support for the assembly.
"""
# initialising user parameters
self.minmapq = minmapq
self.maxinsert = maxinsert
self.threshold = threshold
self.step = step
self.window = window
self.fraction = fraction
self.prior = prior
self.sam = pysam.Samfile(bamfile, "rb" )
self.fasta = pysam.Fastafile(fastafile)
self.min_size = min_size
# getting reads from bamfile
self.all_reads = self.sam.fetch()
self.references = self.sam.references
self.lengths = self.sam.lengths
# refdict: key=contig, val=contig length
# read_stock: key=contig, val=aligned reads
self.refdict = {}
self.read_stock = {}
for k,v in zip(self.references,self.lengths):
self.refdict[k]=v
self.read_stock[k] = self.get_reads(k, 0, v)
self.sizes = self.get_read_size_distribution()
self.isize_median, self.isize_MAD = MAD(self.sizes)
self.isize_mean, _ = meansd(self.sizes)
self.isize_sd = 1.4826 * self.isize_MAD
#print self.isize_sd, self.isize_MAD, 1.4826 * self.isize_MAD
def get_read_size_distribution(self):
"""
Function to calculate global insert size distribution across the whole assembly
Return a frequency table of insert sizes as a dictionary with key = insert size, value = frequency
"""
frq = collections.defaultdict(int) # dictionary of insert sizes
found = {}
for read in self.all_reads:
# accept read based on mapq, contig alignemnt and insert size
if (read.mapq > self.minmapq) and (read.rnext == read.tid) and (abs(read.tlen) < self.maxinsert):
if read.qname in found and found[read.qname][0]==read.tid:
mate = found[read.qname]
isize = abs(max( mate[1]+mate[2]-read.pos,read.pos+read.rlen-mate[1] ))
frq[isize] += 1
else:
found[read.qname] = (read.tid,read.pos,read.rlen)
return frq
def get_reads(self, ref, start, end):
"""
Function to fetch reads aligned to a specific part of the assembled genome and return a list of aligned reads, where each list entry is a tuple:
(read start position, read end position, read name, strand alignment) and strand alignment is a boolean indicating whether the two reads of a read pair align correctly to opposite strands.
Reads are fetched that align to contig "ref" between positions "start" and "end".
Arguments:
ref: the name of the contig from which aligned reads are to be fetched.
start: the position on the contig from which to start fetching aligned reads
end: the position on the contig from which to end fetching aligned reads
"""
# fetch all reads within a region
# insert size: gap between end of one mate and start of next
reads = self.sam.fetch(ref, start, end)
read_stock = []
found = {}
for read in reads:
if (read.rnext == read.tid):
if read.qname in found and found[read.qname][0]==read.tid: # if mate maps to same contig
mate = found[read.qname] # fetch mate
# correctly ordering mates
if mate[1] > read.pos:
start_pos = read.pos + read.rlen
end_pos = mate[1]
else:
start_pos = mate[1] + mate[2]
end_pos = read.pos
# add mates to list of mates on that contig
# include strand orientation info
correct_strands = ((read.is_reverse) and not (read.mate_is_reverse)) or ((read.mate_is_reverse) and not (read.is_reverse))
read_stock.append((start_pos, end_pos, read.qname, correct_strands))
else:
found[read.qname] = (read.tid,read.pos,read.rlen) # haven't reached mate yet
return read_stock
#@profile
def make_tree(self, ref):
"""
Function to construct an interval tree from reads aligning to a contig and return the interval tree.
The interval tree stores nodes with properties start (start postition of interval), end (end position of interval) and other,
which is a tuple of the mate pair name (string) and the strand alignment of the two paired reads (boolean).
Arguments:
ref: Reference ID of the contig for which the interval tree is to be constructed
"""
bridges = self.read_stock[ref]
# check if contig has any alignments
if not bridges:
return None
# insert first interval into tree
s1, e1, name, correct_strands = bridges[0]
tree = IntervalNode(s1, e1, other=(name, correct_strands))
# insert the rest of the intervals
for (start, end, name, correct_strands) in bridges[1:]:
tree = tree.insert(start, end, other=(name, correct_strands))
return tree
def get_read_mappings(self, ref):
"""
Function to calculate the fraction of reads pairs within a contig that align correctly to opposite strands.
Return five arrays: the positions at which strand alignment was evaluated, the fraction correctly aligned, the fraction incorrectly aligned to the same strand, the unmapped
fraction and the fraction that have some other alignment issue.
Arguments:
ref: the reference id of the contig to be evaulated
"""
dump_val = self.step
positions = []
same_strand = 0
opp_strand = 0
unmapped = 0
other = 0
# arrays of read mapping behaviour
good_ratio = []
unmapped_ratio = []
bad_ratio = []
other_ratio = []
mini_pos = []
reads = self.sam.fetch(reference = ref)
# note that iterating in this manner works because the bam file is sorted.
# create arrays containing fraction of correctly / incorrectly alinged reads
for i, r in enumerate(reads):
mini_pos.append(r.pos)
if r.mate_is_unmapped:
unmapped += 1
elif ((r.is_reverse) and not (r.mate_is_reverse)) or ((r.mate_is_reverse) and not (r.is_reverse)):
same_strand += 1
elif((r.is_reverse) and (r.mate_is_reverse)) or (not (r.mate_is_reverse) and not (r.is_reverse)):
opp_strand += 1
else:
other += 1
if (i+1) % dump_val == 0:
total = same_strand + opp_strand + unmapped + other
good_ratio.append(float(same_strand) / total)
bad_ratio.append(float(opp_strand) / total)
unmapped_ratio.append(float(unmapped) / total)
other_ratio.append(float(other) / total)
same_strand = 0
opp_strand = 0
unmapped = 0
other = 0
positions.append(np.mean(mini_pos))
mini_pos = []
return np.array(positions), np.array(good_ratio), np.array(bad_ratio), np.array(unmapped_ratio), np.array(other_ratio)
def get_mapping_anomalies(self):
"""
Function to determine the frequency of strand mapping anomalies across the entire genome assembly.
Calls get_read_mappings for each contig larger than the aligned_assembly.min_size and returns:
1) a dictionary with keys = contig reference IDs; values = list of positions and strand alignment ratios as described in get_read_mappings
2) a dictionary of anomalies wiht keys = contig reference IDs, values = [list of positions for which the ratio of correctly aligned strands < 0.75 (currently hard-coded), corresponding ratio of correctly aligned strands]
"""
mapping_ratios = {} # key=contig, val=list of arrays of mapping behaviours
anomalies = {}
for w, (ref, length) in enumerate(self.refdict.iteritems()):
if length > self.min_size: # consider only big contigs
positions, good_ratio, bad_ratio, unmapped_ratio, other_ratio = self.get_read_mappings(ref)
map_criterion = good_ratio < self.fraction
pos_anomalies = positions[map_criterion]
map_anomalies = good_ratio[map_criterion]
mapping_ratios[ref] = [positions, good_ratio, bad_ratio, unmapped_ratio, other_ratio]
anomalies[ref] = [pos_anomalies, map_anomalies]
return mapping_ratios, anomalies
def get_size_anomalies(self):
"""
Function to determine the frequency of insert size anomalies across the entire genome assembly.
Calls probability_of_readlength for each contig larger than the aligned_assembly.min_size and returns:
1) a dictionary with keys = contig reference IDs; values = array of Zscores as described in probability_of_readlength
2) a dictionary of anomalies wiht keys = contig reference IDs, values = [list of positions for which abs(z-score) > 2 (currently hard-coded), corresponding z-score value]
"""
anomalies = {}
zscores = {}
all_probabilities = []
stock_probabilities = {}
for w, (ref, length) in enumerate(self.refdict.iteritems()):
if length > self.min_size:
tree = self.make_tree(ref) # build tree from all reads aligning to a contig
if not tree:
continue
positions = np.arange(self.step, length - self.window, self.step)
probabilities = []
print "\nProcessing ",ref
npos = float(len(positions))
for idx,pos in enumerate(positions):
# update progress bar
update_progress(idx/npos)
bridges = np.array(find_intersections(tree, pos-self.window, pos+self.window)) # fetch reads in windows across contig
bridge_lengths, strand_alignment = get_insertlengths(bridges) # get insert sizes and mapping behaviour
prob_lengths = probability_of_readlength(bridge_lengths, self.isize_mean, self.isize_sd, self.prior, length) # get prob. insert sizes from null
condition = strand_alignment == 1
D = np.sum(prob_lengths[condition]) # D is total assembly support
probabilities.append(D)
all_probabilities.append(D)
stock_probabilities[ref] = [positions, np.array(probabilities)]
p_mean = np.mean(np.array(all_probabilities)) # get contig mean and variance
p_std = np.std(np.array(all_probabilities))
if p_std == 0:
p_std = 0.01
for ref, [positions, probs] in stock_probabilities.iteritems():
zscore = (probs - p_mean) / p_std # calculate position z score from contig mean, std
# anomalies have Zscore < Threshold.
# Note: threshold should be negative
z_criterion = (zscore < self.threshold)
z_anomalies = zscore[z_criterion]
#print ref, z_anomalies
pos_anomalies = positions[z_criterion]
zscores[ref] = [positions, zscore]
anomalies[ref] = [pos_anomalies, z_anomalies] # list of anomaly locations and socres
return zscores, anomalies, tree
def get_anomalies(self, outfile, trim, img_name=None):
"""
Function to determine the frequency of anomalous mate pair behaviour across the entire genome assembly and return a dictionary where:
key = contig reference IDs,
value = list of postions within that contig where an assembly error is identified and the contig should be broken.
Calls get_size_anomalies and get_mapping_anomalies for each contig larger than the aligned_assembly.min_size; makes a .csv file listing for each contig the positions of identified misassemblies and their corresponding anomalous scores.
Arguments:
outfile: name of file (including filepath) to store the list of contig misassemblies.
Keyword Arguments:
img_name: name of file (including filepath, not including filetype) to store plots of alignment quality
"""
#print "Anomaly detection"
# get anomaly positions
zscores, size_anomalies, tree = self.get_size_anomalies()
map_ratios, map_anomalies = self.get_mapping_anomalies()
break_points = {}
# # make a wiggle file
# print "Writing wiggle file"
# wig_file = "%s.wig" % ("/media/rmurphy/sandbox/bash_scripts/test_TB")
# with open(wig_file, "w") as wig:
# #wig.write("track type=wiggle_0 graphType=line color=0,0,255 altColor=255,0,0 name='Zscore' graphType=heatmap midRange=35:65 midColor=0,255,0\n")
# for ref, [positions, probs] in zscores.iteritems():
# print ref
# wig.write("fixedStep chrom=%s start=%s step=%s span=%s\n" % (ref, 1, self.step, 1))
# #print zscores[ref]
# #for vals in zscores[ref]:
# # positions = vals[0]
# # probs = vals[1]
# for prob in probs:
# wig.write("%s\n" % (prob))
# wig.write("\n")
for w, (ref, [positions, probs]) in enumerate(zscores.iteritems()):
# write all Z scores to a csv file
#print "Writing Z scores to file (%s)" % ref
for pos, prob in zip(positions, probs):
outfile.write("%s %s %s\n" %(ref, pos, prob))
z_pos, z_anomalies = size_anomalies[ref]
#print "z_anomalies:", z_anomalies
#print ref, z_pos
map_positions, good_ratio, bad_ratio, unmapped_ratio, other_ratio = map_ratios[ref]
pos_anomalies, map_anom = map_anomalies[ref]
anomaly_positions = sorted(z_pos.tolist())
#print ref, anomaly_positions
# make list of positions to break this contig
break_points[ref] = []
if len(anomaly_positions) != 0:
current = []
for p in range(len(anomaly_positions)):
anom_pos = anomaly_positions[p]
if current == []:
current.append(anom_pos)
else:
if anom_pos - current[-1] <= trim:
# if anomalies are well separated flush current values to break_point
current.append(anom_pos)
else:
break_points[ref].append(np.mean(current))
current = [anom_pos]
if current != []:
break_points[ref].append(np.mean(current))
#print "Breakpoints for ",ref, break_points[ref]
if img_name != None:
# plot zscores and anomalies
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
plt.subplots_adjust(bottom=0.15)
ax1.set_xlim([0, max(map_positions)])
ax1.set_xlabel("Position",size=24)
ax1.set_ylabel("Assembly Support", size=24)
plt.tick_params(axis='both', which='major', labelsize=20)
lns1 = ax1.plot(positions, probs, c="k", label="Support")
#plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
anomalies_to_plot = sorted(break_points[ref])
anomalies_y = [-10] * len(anomalies_to_plot)
ax1.scatter(anomalies_to_plot, anomalies_y, c="r", marker = "o")
#print "anomalies", anomalies_to_plot
# if name given, save image as .pdf and .png
name = img_name + "_%s.pdf" % ref
plt.savefig(name)
name = img_name + "_%s.png" % ref
plt.savefig(name)
plt.cla()
return break_points
def breakContigs_double(self,outfile, breakpoints, trim):
"""
Function to break a contigs at positions identified as assembly errors and write a new fasta file containing all contigs (both altered and unaltered).
Makes a two-point break at the identified misassembly position, splitting at 5 Kb upstream and downstream of the misassembly and (currently) excluding the misassembled region.
Arguments:
outfile: name of the new fasta file (including filepath)
breakpoints: dictionary of misassemblies. key = contig reference ID, value = list of misassembly positions within the contig
trim: distance, in bases, to trim from each each edge of a breakpoint to remove misassembly (integer)
"""
#for k, v in breakpoints.iteritems():
# print breakpoints
#print k, v
newcontigs = []
for contig, length in self.refdict.iteritems():
#dna = self.fasta[contig] # sequence of contig
dna = self.fasta.fetch(reference=contig) # sequence of contig
if len(dna) <= 0:
print >> sys.stderr, "Cannot find BAM contig",contig," in Fasta file. Aborting."
sys.exit()
if contig in breakpoints:
splits = breakpoints[contig]
splits.sort()
prev = 0
for s in splits: # iterate through breakpoints
#print s
if (s - prev > trim) and ((length - s) > trim):
newcontigs.append((contig,dna[int(prev):int(s-trim)])) # trim and append section before break
print "Breaking",contig,"at",prev
prev = s + trim # trim other end of break
newcontigs.append((contig,dna[int(prev):]))
else:
newcontigs.append((contig,dna))
# write new contigs to file
newcontigs.sort(lambda x,y: cmp(len(x), len(y)),reverse=True)
for count, tup in enumerate(newcontigs):
name = ">CONTIG_%d_length_%d_%s"%(count,len(tup[1]),tup[0])
#print name
outfile.write(name)
outfile.write("\n")
outfile.write(tup[1])
outfile.write("\n")
def main():
# read command line arguments
import argparse
parser = argparse.ArgumentParser(description='Routine to identify and correct large-scale misassemblies in de novo assemblies')
parser.add_argument('bam', metavar='bam', type=str, help='bam')
parser.add_argument('fasta', metavar='fasta', type=str, help='scaffold fasta')
parser.add_argument('outfile', metavar='outfile', type=str, help='Output file name')
parser.add_argument('newfasta', metavar='newfasta', type=str, help='Fasta file for new contigs, including filepath')
parser.add_argument('-min_size', metavar='min_size', type=int, default=10000, help='Minimum contig size to analyse')
parser.add_argument('-img_name', metavar ='img_name', type=str, default=None, help='Name under which to save (optional) graphs of alignment quality. Default value: None (no graphs produced)')
parser.add_argument('-trim', metavar ='trim', type=int, default=4000, help='Number of bases to trim from each side of an identified misassembly. Default value: 5000')
parser.add_argument('-T', metavar ='T', type=float, default= -4.0, help='Threshold in Z score below which a misassembly is called. Default value: -4.0')
parser.add_argument('-step_size', metavar ='step_size', type=int, default=1000, help='Step-size in bases to traverse contigs. Default value: 1000')
parser.add_argument('-window', metavar ='window', type=int, default=200, help='Window size across which bridging mate pairs are evaluated. Default value: 200')
parser.add_argument('-minmapq', metavar ='minmapq', type=int, default=40, help='Minimum MapQ value, above which a read pair is included in calculating population statistics. Default value: 40')
parser.add_argument('-maxinsert', metavar ='maxinsert', type=int, default=30000, help='Maximum insert size, below which a read pair is included in calculating population statistics. Default value: 30000')
parser.add_argument('-fraction', metavar ='fraction', type=int, default=0.75, help='Minimum fraction of read pairs with correct orientation to call support for the assembly. Default value: 0.75')
parser.add_argument('-prior', metavar ='prior', type=float, default=0.01, help='Prior probablility that the insert size is anomalous. Default value: 0.01')
args = parser.parse_args()
# make assembly object
f = aligned_assembly(args.bam, args.fasta, args.min_size, args.T, args.step_size, args.window, args.minmapq, args.maxinsert, args.fraction, args.prior)
print "Search for anomalous alignments"
# find anomalies
with open(args.outfile, "w") as of:
bps = f.get_anomalies(of, args.trim, args.img_name)
# break contig at identified anomalies
print "\nBreaking contigs"
with open(args.newfasta, "w") as outfasta:
f.breakContigs_double(outfasta, bps, args.trim)
if __name__ == "__main__":
main()
|
bsd-2-clause
|
costypetrisor/scikit-learn
|
sklearn/mixture/tests/test_gmm.py
|
200
|
17427
|
import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
luo66/scikit-learn
|
examples/applications/wikipedia_principal_eigenvector.py
|
233
|
7819
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
bsd-3-clause
|
quanhua92/tensorflow-handbook
|
classification/logistic_regression_2d.py
|
1
|
1996
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
"""
Usage:
python logistic_regression_2d.py
"""
learning_rate = 0.1
training_epochs = 2000
x1_label1 = np.random.normal(3, 1, 1000)
x2_label1 = np.random.normal(2, 1, 1000)
x1_label2 = np.random.normal(7, 1, 1000)
x2_label2 = np.random.normal(6, 1, 1000)
x1s = np.append(x1_label1, x1_label2)
x2s = np.append(x2_label1, x2_label2)
ys = np.asarray([0.] * len(x1_label1) + [1.] * len(x1_label2))
X1 = tf.placeholder(tf.float32, shape=(None, ), name="x1")
X2 = tf.placeholder(tf.float32, shape=(None, ), name="x2")
Y = tf.placeholder(tf.float32, shape=(None, ), name="y")
w = tf.Variable([0., 0., 0.], name="weights", trainable=True)
y_model = tf.sigmoid(-(w[2] * X2 + w[1] * X1 + w[0]))
cost = tf.reduce_mean(-tf.log(y_model * Y + (1-y_model) * (1 - Y)))
train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
prev_err = 0
for epoch in tqdm(range(training_epochs)):
err, _ = sess.run([cost, train_op], {
X1: x1s,
X2: x2s,
Y: ys
})
if epoch % 100 == 0:
print(epoch, err)
if abs(prev_err - err) < 0.00001:
break
prev_err = err
w_val = sess.run(w, {
X1: x1s,
X2: x2s,
Y: ys
})
sess.close()
def sigmoid(x):
return 1. / (1. + np.exp(-x))
x1_boundary, x2_boundary = [], []
for x1_test in np.linspace(0, 10, 100):
for x2_test in np.linspace(0, 10, 100):
z = sigmoid(-x2_test * w_val[2] - x1_test * w_val[1] - w_val[0])
if abs(z - 0.5) < 0.01:
x1_boundary.append(x1_test)
x2_boundary.append(x2_test)
plt.scatter(x1_boundary, x2_boundary, c='b', marker='o', s=20)
plt.scatter(x1_label1, x2_label1, c='r', marker='x', s=20)
plt.scatter(x1_label2, x2_label2, c='g', marker='1', s=20)
plt.show()
|
apache-2.0
|
rvraghav93/scikit-learn
|
examples/model_selection/grid_search_text_feature_extraction.py
|
15
|
4163
|
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# #############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
# #############################################################################
# Define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
|
bsd-3-clause
|
arthurmensch/modl
|
exps/multi_decompose_fmri.py
|
1
|
2118
|
import os
import sys
from os import path
from os.path import join
import numpy as np
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state
from modl.utils.system import get_output_dir
# Add examples to known modules
sys.path.append(path.dirname(path.dirname
(path.dirname(path.abspath(__file__)))))
from exps.exp_decompose_fmri import exp as single_exp
exp = Experiment('multi_decompose_fmri')
basedir = join(get_output_dir(), 'multi_decompose_fmri')
if not os.path.exists(basedir):
os.makedirs(basedir)
exp.observers.append(FileStorageObserver.create(basedir=basedir))
@exp.config
def config():
n_jobs = 2
n_seeds = 1
seed = 1
@single_exp.config
def config():
n_components = 70
batch_size = 100
learning_rate = 0.92
method = 'average'
reduction = 12
alpha = 3e-4
n_epochs = 100
verbose = 100
n_jobs = 1
step_size = 1e-5
source = 'adhd_4'
seed = 1
def single_run(config_updates, rundir, _id):
run = single_exp._create_run(config_updates=config_updates)
observer = FileStorageObserver.create(basedir=rundir)
run._id = _id
run.observers = [observer]
try:
run()
except:
print('Run %i failed' % _id)
@exp.automain
def run(n_seeds, n_jobs, _run, _seed):
seed_list = check_random_state(_seed).randint(np.iinfo(np.uint32).max,
size=n_seeds)
exps = []
exps += [{'method': 'sgd',
'step_size': step_size}
for step_size in np.logspace(-7, -7, 1)]
exps += [{'method': 'gram',
'reduction': reduction}
for reduction in [12]]
rundir = join(basedir, str(_run._id), 'run')
if not os.path.exists(rundir):
os.makedirs(rundir)
Parallel(n_jobs=n_jobs,
verbose=10)(delayed(single_run)(config_updates, rundir, i)
for i, config_updates in enumerate(exps))
|
bsd-2-clause
|
mblondel/scikit-learn
|
sklearn/calibration.py
|
2
|
18518
|
"""Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import _check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'])
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
base_estimator = LinearSVC()
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = _check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
`a_` : float
The slope.
`b_` : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
`T_` : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/lib/mpl_examples/api/sankey_demo_basics.py
|
12
|
3421
|
"""Demonstrate the Sankey class by producing three basic diagrams.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.sankey import Sankey
# Example 1 -- Mostly defaults
# This demonstrates how to create a simple diagram by implicitly calling the
# Sankey.add() method and by appending finish() to the call to the class.
Sankey(flows=[0.25, 0.15, 0.60, -0.20, -0.15, -0.05, -0.50, -0.10],
labels=['', '', '', 'First', 'Second', 'Third', 'Fourth', 'Fifth'],
orientations=[-1, 1, 0, 1, 1, 1, 0, -1]).finish()
plt.title("The default settings produce a diagram like this.")
# Notice:
# 1. Axes weren't provided when Sankey() was instantiated, so they were
# created automatically.
# 2. The scale argument wasn't necessary since the data was already
# normalized.
# 3. By default, the lengths of the paths are justified.
# Example 2
# This demonstrates:
# 1. Setting one path longer than the others
# 2. Placing a label in the middle of the diagram
# 3. Using the the scale argument to normalize the flows
# 4. Implicitly passing keyword arguments to PathPatch()
# 5. Changing the angle of the arrow heads
# 6. Changing the offset between the tips of the paths and their labels
# 7. Formatting the numbers in the path labels and the associated unit
# 8. Changing the appearance of the patch and the labels after the figure is
# created
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[],
title="Flow Diagram of a Widget")
sankey = Sankey(ax=ax, scale=0.01, offset=0.2, head_angle=180,
format='%.0f', unit='%')
sankey.add(flows=[25, 0, 60, -10, -20, -5, -15, -10, -40],
labels = ['', '', '', 'First', 'Second', 'Third', 'Fourth',
'Fifth', 'Hurray!'],
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0],
pathlengths = [0.25, 0.25, 0.25, 0.25, 0.25, 0.6, 0.25, 0.25,
0.25],
patchlabel="Widget\nA",
alpha=0.2, lw=2.0) # Arguments to matplotlib.patches.PathPatch()
diagrams = sankey.finish()
diagrams[0].patch.set_facecolor('#37c959')
diagrams[0].texts[-1].set_color('r')
diagrams[0].text.set_fontweight('bold')
# Notice:
# 1. Since the sum of the flows is nonzero, the width of the trunk isn't
# uniform. If verbose.level is helpful (in matplotlibrc), a message is
# given in the terminal window.
# 2. The second flow doesn't appear because its value is zero. Again, if
# verbose.level is helpful, a message is given in the terminal window.
# Example 3
# This demonstrates:
# 1. Connecting two systems
# 2. Turning off the labels of the quantities
# 3. Adding a legend
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[], title="Two Systems")
flows = [0.25, 0.15, 0.60, -0.10, -0.05, -0.25, -0.15, -0.10, -0.35]
sankey = Sankey(ax=ax, unit=None)
sankey.add(flows=flows, label='one',
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0])
sankey.add(flows=[-0.25, 0.15, 0.1], fc='#37c959', label='two',
orientations=[-1, -1, -1], prior=0, connect=(0, 0))
diagrams = sankey.finish()
diagrams[-1].patch.set_hatch('/')
plt.legend(loc='best')
# Notice that only one connection is specified, but the systems form a
# circuit since: (1) the lengths of the paths are justified and (2) the
# orientation and ordering of the flows is mirrored.
plt.show()
|
mit
|
xguse/bokeh
|
bokeh/charts/builder/tests/test_step_builder.py
|
33
|
2495
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Step
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestStep(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 3, 7, 5, 26]
xyvalues['pypy'] = [12, 33, 47, 15, 126]
xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [ 2., 2., 3., 3., 7., 7., 5., 5., 26.]
y_jython = [ 22., 22.,43., 43., 10., 10., 25., 25., 26.]
y_pypy = [ 12., 12., 33., 33., 47., 47., 15., 15., 126.]
x = [0, 1, 1, 2, 2, 3, 3, 4, 4]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
|
bsd-3-clause
|
klocey/ScalingMicroBiodiversity
|
ExtraTests/EMP_N.py
|
2
|
2324
|
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import random
import scipy as sc
from scipy import stats
import os
import sys
from scipy.stats.distributions import t
import statsmodels.stats.api as sms
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import itertools as it
import pandas as pd
from math import log10
import linecache
mydir = os.path.expanduser("~/GitHub/MicrobialScaling/")
mydir2 = os.path.expanduser("~/")
#sys.path.append(mydir2 + "GitHub/DiversityTools/metrics")
#import metrics as mets
def BigN():
datasets = []
GoodNames = ['HMP', 'EMPclosed', 'EMPopen']
for name in os.listdir(mydir +'data/micro'):
if name in GoodNames: pass
else: continue
#path = mydir2+'data/micro/'+name+'/'+name+'-SADMetricData_NoMicrobe1s.txt'
path = mydir2+'data/micro/'+name+'/'+name+'-SADMetricData.txt'
num_lines = sum(1 for line in open(path))
datasets.append([name, 'micro', num_lines])
print name, num_lines
Nlist, Slist, Evarlist, ESimplist, klist, radDATA, BPlist, NmaxList, rareSkews, KindList, StdList = [[], [], [], [], [], [], [], [], [], [], []]
for dataset in datasets:
Nt = 0
name, kind, numlines = dataset
lines = []
if name == 'EMPclosed' or name == 'EMPopen':
lines = np.random.choice(range(1, numlines+1), numlines, replace=False) # 166
elif kind == 'micro':
lines = np.random.choice(range(1, numlines+1), numlines, replace=False) #167
#path = mydir2+'data/'+kind+'/'+name+'/'+name+'-SADMetricData_NoMicrobe1s.txt'
path = mydir2+'data/'+kind+'/'+name+'/'+name+'-SADMetricData.txt'
for line in lines:
data = linecache.getline(path, line)
radDATA.append(data)
for data in radDATA:
data = data.split()
name, kind, N, S, Var, Evar, ESimp, EQ, O, ENee, EPielou, EHeip, BP, SimpDom, Nmax, McN, skew, logskew, chao1, ace, jknife1, jknife2, margalef, menhinick, preston_a, preston_S = data
Nt += float(N)
print name
print '%.2e' % Nt
return
BigN()
|
gpl-3.0
|
yukoba/sympy
|
examples/intermediate/mplot3d.py
|
93
|
1252
|
#!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=2, cstride=2)
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
|
bsd-3-clause
|
mlyundin/scikit-learn
|
sklearn/kernel_ridge.py
|
155
|
6545
|
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
bsd-3-clause
|
aabadie/scikit-learn
|
sklearn/feature_extraction/dict_vectorizer.py
|
37
|
12559
|
# Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by OneHotEncoder to complete binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
|
bsd-3-clause
|
gfrubi/FM2
|
figuras-editables/fig-Bessel.py
|
1
|
3508
|
# -*- coding: utf-8 -*-
from matplotlib.pyplot import *
from numpy import *
from scipy.special import *
style.use('classic')
colores=['blue','red','brown','purple','black']
dasheses=[[],[5,2],[5,5],[5,2,2,2],[2,2]]
x = linspace(-10,10,1000)
fig = figure(figsize=(8,6))
for n in range(5):
plot(x,jn(n,x),colores[n], dashes=dasheses[n],label='$n= $'+str(n), linewidth=2)
grid()
xlabel(r'$x$',fontsize=15)
ylabel(r'$J_n(x)$',fontsize=15)
ylim(-1.1,1.1)
#title(r'Funciones de Bessel de $1^{a}$ especie y orden entero')
legend(loc='best',fontsize=12)
ylim(-0.7,1.1)
savefig('../figs/fig-Bessel-J.pdf')
# $N_{\nu}(x)$
fig = figure(figsize=(8,6))
for n in range(5):
plot(x,yn(n,x),colores[n], dashes=dasheses[n],label='$n= $'+str(n), linewidth=2)
grid()
xlabel(r'$x$',fontsize=15)
ylabel(r'$Y_n(x)$',fontsize=15)
ylim(-1.1,1.1)
#title(r'Funciones de Bessel de $2^{da}$ especie y orden entero')
legend(loc='best',fontsize=12)
ylim(-2,0.7)
savefig('../figs/fig-Bessel-Y.pdf')
# $I_{\nu}(z)$
x = linspace(-4,4,1000)
fig = figure(figsize=(8,6))
for n in range(5):
plot(x,iv(n,x),colores[n], dashes=dasheses[n],label='$n= $'+str(n), linewidth=2)
grid()
xlabel(r'$x$',fontsize=15)
ylabel(r'$I_n(x)$',fontsize=15)
#title(r'Funciones modificadas de Bessel de $1^{\circ}$ especie a orden entero',fontsize=13)
legend(loc='best',fontsize=12)
xlim(-4,4)
ylim(-10,12)
savefig('../figs/fig-Bessel-I.pdf')
# $K_{\nu}(z)$
x = linspace(0,4,1000)
fig = figure(figsize=(8,6))
for n in range(5):
plot(x,kv(n,x),colores[n], dashes=dasheses[n],label='$n= $'+str(n), linewidth=2)
grid()
xlabel(r'$x$',fontsize=15)
ylabel(r'$K_n(x)$',fontsize=15)
#title(r'Funciones modificadas de Bessel de $2^{\circ}$ especie a orden entero',fontsize=13)
legend(loc='best',fontsize=12)
ylim(0,4)
savefig('../figs/fig-Bessel-K.pdf')
# #$j_{n}(x)$:
x = linspace(0,10,1000)
fig = figure(figsize=(8,6))
for n in range(5):
plot(x,spherical_jn(n,x),colores[n], dashes=dasheses[n],label='$n= $'+str(n), linewidth=2)
grid()
xlabel(r'$x$',fontsize=15)
ylabel(r'$j_n(x)$',fontsize=15)
#title(ur'Funciones Esféricas de Bessel de $1^{\circ}$ especie a orden entero',fontsize=12)
legend(loc='best',fontsize=12)
ylim(-0.5,1.1)
savefig('../figs/fig-Bessel-Esferica-j.pdf')
# #$n_{n}(x)$:
x = linspace(-20,20,1000)
fig = figure(figsize=(8,6))
for n in range(5):
plot(x,spherical_yn(n,x),colores[n], dashes=dasheses[n],label='$n= $'+str(n), linewidth=2)
grid()
xlabel(r'$x$',fontsize=15)
ylabel(r'$y_n(x)$',fontsize=15)
#title(ur'Funciones Esféricas de Bessel de $2^{\circ}$ especie a orden entero',fontsize=12)
legend(loc='best',fontsize=12)
ylim(-1,1)
savefig('../figs/fig-Bessel-Esferica-y.pdf')
# #$i_{n}(x)$:
x = linspace(0,5,1000)
fig = figure(figsize=(8,6))
for n in range(5):
plot(x,spherical_in(n,x),colores[n], dashes=dasheses[n],label='$n= $'+str(n), linewidth=2)
grid()
xlabel(r'$x$',fontsize=15)
ylabel(r'$i_n(x)$',fontsize=15)
#title(ur'Funciones Esféricas Modificadas de Bessel de $1^{\circ}$ especie a orden entero',fontsize=12)
legend(loc='best',fontsize=12)
savefig('../figs/fig-Bessel-Esferica-i.pdf')
# #$k_{n}(x)$:
x = linspace(5,1000)
fig = figure(figsize=(8,6))
for n in range(5):
plot(x,spherical_kn(n,x),colores[n], dashes=dasheses[n],label='$n= $'+str(n), linewidth=2)
grid()
xlabel(r'$x$',fontsize=15)
ylabel(r'$k_n(x)$',fontsize=15)
#title(ur'Funciones Esféricas Modificadas de Bessel de $2^{\circ}$ especie a orden entero',fontsize=12)
legend(loc='best',fontsize=12)
ylim(-1,15)
|
gpl-3.0
|
willhess/aima-python
|
submissions/Hess/myNN.py
|
13
|
1067
|
import traceback
from sklearn.neural_network import MLPClassifier
from submissions.Hess import cars
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
guzzle = DataFrame()
guzzle.target = []
guzzle.data = []
guzzle = cars.get_cars()
def guzzleTarget(string):
if (info['Fuel Information']['City mph'] < 14):
return 1
return 0
for info in guzzle:
try:
guzzle.data.append(guzzleTarget(info['Fuel Information']['City mph']))
fuelCity = float(info['Fuel Information']['City mph']) # they misspelled mpg
year = float(info['Identification']['Year'])
guzzle.data.apend([fuelCity, year])
except:
traceback.print_exc()
guzzle.feature_names = [
"City mph"
"Year"
]
guzzle.target_names = [
"New Car is < 14 MPG"
"New Car is > 14 MPG"
]
mlpc = MLPClassifier(
solver='sgd',
learning_rate = 'adaptive',
)
Examples = {
'Guzzle': {
'frame': guzzle,
},
'GuzzleMLPC': {
'frame': guzzle,
'mlpc': mlpc
},
}
|
mit
|
Alexander-P/Isca
|
src/extra/python/scripts/change_horizontal_resolution_of_restart_file.py
|
4
|
8205
|
"""Script for changing the horizontal resolution of an FMS restart file"""
import xarray as xar
import numpy as np
import gauss_grid as gg
import scipy.interpolate as scinterp
import pdb
import mpl_toolkits.basemap as basemap
import matplotlib.pyplot as plt
import sh
from netCDF4 import Dataset
import copy_netcdf_attrs as cna
import tempfile
import shutil
import os
def linear_interpolate_for_regrid(lon_list_in_grid, lat_list_in_grid, lon_list_out_grid, lat_list_out_grid, input_array):
time_length, z_length, lat_length, lon_length = input_array.shape
lon_array, lat_array = np.meshgrid(lon_list_out_grid,lat_list_out_grid)
output_array = np.zeros((time_length, z_length, lat_list_out_grid.shape[0], lon_list_out_grid.shape[0]))
for tim in range(time_length):
for z in range(z_length):
input_array_2d = np.squeeze(input_array[tim,z,...])
output_array[tim,z,...] = basemap.interp(input_array_2d, lon_list_in_grid, lat_list_in_grid, lon_array, lat_array, order=1)
return output_array
def populate_new_spherical_harmonic_field(x_in, y_in, x_out, y_out, input_array):
time_length, z_length, y_length, x_length = input_array.shape
output_array = np.zeros((time_length, z_length, y_out.shape[0], x_out.shape[0]))
output_array[:,:,0:y_length, 0:x_length] = input_array
return output_array
def process_input_file(file_name, atmosphere_or_spectral_dynamics, num_fourier_out, num_x_out, num_y_out):
num_spherical_out = num_fourier_out + 1
dataset = xar.open_dataset(file_name+'.res.nc')
Time_in = dataset.Time.values #Just 1 and 2
Time_out = Time_in
dataset_out = dataset.copy(deep=True)
if atmosphere_or_spectral_dynamics=='atmosphere':
x_axis_1_in = dataset.xaxis_1.values #Just a single value - 1.0
x_axis_2_in = dataset.xaxis_2.values #Bizarrely, this is the number of vertical levels+1
y_axis_1_in = dataset.yaxis_1.values #Just a single value - 1.0
y_axis_2_in = dataset.yaxis_2.values #Number of spherical harmonics + 1 (87)
z_axis_1_in = dataset.zaxis_1.values #Just a single value - 1.0
z_axis_2_in = dataset.zaxis_2.values #Number of full levels (30)
longitudes_in = np.arange(0., 360., (360./x_axis_2_in.shape[0]))
latitudes_in = gg.gaussian_latitudes(int(y_axis_2_in.shape[0]/2))[0]
axes_out = {'xaxis_1':x_axis_1_in, 'xaxis_2':np.arange(1.,num_x_out+1), 'yaxis_1':y_axis_1_in, 'yaxis_2':np.arange(1.,num_y_out+1), 'zaxis_1':dataset.zaxis_1.values, 'zaxis_2':dataset.zaxis_2.values, 'Time':Time_in}
y_axis_name = 'yaxis_2'
x_axis_name = 'xaxis_2'
elif atmosphere_or_spectral_dynamics=='spectral_dynamics':
x_axis_1_in = dataset.xaxis_1.values #Just a single value - 1.0
x_axis_2_in = dataset.xaxis_2.values #Bizarrely, this is the number of vertical levels+1
x_axis_3_in = dataset.xaxis_3.values #Number of spherical harmonics (e.g. 86)
x_axis_4_in = dataset.xaxis_4.values #Number of gridpoints in physical space
y_axis_1_in = dataset.yaxis_1.values #Just a single value - 1.0
y_axis_2_in = dataset.yaxis_2.values #Number of spherical harmonics + 1 (87)
y_axis_3_in = dataset.yaxis_3.values #Number of gridpoints in physical space
z_axis_1_in = dataset.zaxis_1.values #Just a single value - 1.0
z_axis_2_in = dataset.zaxis_2.values #Number of full levels (30)
longitudes_in = np.arange(0., 360., (360./x_axis_4_in.shape[0]))
latitudes_in = gg.gaussian_latitudes(int(y_axis_3_in.shape[0]/2))[0]
axes_out = {'xaxis_1':x_axis_1_in, 'xaxis_2':x_axis_2_in, 'xaxis_3':np.arange(1.,num_spherical_out+1), 'xaxis_4':np.arange(1.,num_x_out+1), 'yaxis_1':y_axis_1_in, 'yaxis_2':np.arange(1.,num_spherical_out+2), 'yaxis_3':np.arange(1.,num_y_out+1), 'zaxis_1':dataset.zaxis_1.values, 'zaxis_2':dataset.zaxis_2.values, 'Time':Time_in}
y_axis_name = 'yaxis_3'
x_axis_name = 'xaxis_4'
longitudes_out = np.arange(0., 360., (360./num_x_out))
latitudes_out = gg.gaussian_latitudes(int(num_y_out/2))[0]
for var in list(dataset_out.data_vars.keys()):
dataset_out.__delitem__(var)
for coord in list(dataset_out.coords.keys()):
dataset_out[coord] = axes_out[coord]
dataset_out[coord].attrs = dataset[coord].attrs
for var in list(dataset.data_vars.keys()):
var_dims = dataset[var].dims
if var_dims[2:4] == (y_axis_name, x_axis_name):
print((var, 'physical grid'))
new_var = linear_interpolate_for_regrid(longitudes_in, latitudes_in, longitudes_out, latitudes_out, dataset[var].load().values)
dataset_out[var] = (dataset[var].dims, new_var)
elif var_dims[2:4] == ('yaxis_2', 'xaxis_3'):
print((var, 'spectral grid'))
new_var = populate_new_spherical_harmonic_field(x_axis_2_in, y_axis_2_in, axes_out['xaxis_3'], axes_out['yaxis_2'], dataset[var].values)
dataset_out[var] = ((dataset[var].dims, new_var))
else:
print((var, 'neither'))
dataset_out[var] = ((dataset[var].dims, dataset[var].values))
dataset_out[var].attrs = dataset[var].attrs
out_file_name = file_name+'_mod_'+str(num_fourier_out)+'_onescript.res.nc'
dataset_out.to_netcdf(path='./temp_'+out_file_name, format='NETCDF3_CLASSIC', engine='scipy')
remove_fill_value_attribute('./temp_'+out_file_name, out_file_name)
os.remove('./temp_'+out_file_name)
return out_file_name
def join_into_cpio(atmosphere_file_name='./atmosphere.res.nc', spectral_dynamics_file_name='./spectral_dynamics.res.nc', atmos_model_file_name='./atmos_model.res', restart_file_out_name='./res_mod'):
temp_folder_name = tempfile.mkdtemp()
shutil.move(atmosphere_file_name, temp_folder_name+'/atmosphere.res.nc')
shutil.move(spectral_dynamics_file_name, temp_folder_name+'/spectral_dynamics.res.nc')
shutil.copyfile(atmos_model_file_name, temp_folder_name+'/atmos_model.res')
state_files_out = ['atmosphere.res.nc', 'spectral_dynamics.res.nc', 'atmos_model.res']
cwd = os.getcwd()
os.chdir(temp_folder_name) #s have to move to temporary folder as cpio cannot cope with absolute file references, as otherwise when you delete the temporary folder, cpio will go looking for the temporary folder when it's extracted.
sh.cpio('-ov', _in='\n'.join(state_files_out), _out=restart_file_out_name)
shutil.move(restart_file_out_name, cwd)
os.chdir(cwd)
shutil.move(temp_folder_name+'/atmosphere.res.nc', atmosphere_file_name)
shutil.move(temp_folder_name+'/spectral_dynamics.res.nc', spectral_dynamics_file_name)
shutil.rmtree(temp_folder_name)
def remove_fill_value_attribute(in_file_name, out_file_name):
dsin = Dataset(in_file_name, 'a', format='NETCDF3_CLASSIC')
dsout= Dataset(out_file_name, 'w', format='NETCDF3_CLASSIC')
cna.copy_netcdf_attrs(dsin, dsout, copy_vars = True)
dsout.close()
if __name__=="__main__":
#Specify the number of fourier modes and lon and lat dimensions for the output
num_fourier_out = 85
num_x_out = 256
num_y_out = 128
#Specify the name of the input files that you want to regrid
atmosphere_file_name = 'atmosphere_old'
spectral_dynamics_file_name = 'spectral_dynamics_old'
atmos_model_file_name = 'atmos_model.res'
#Specify the name of the output cpio archive
restart_file_out_name = 'res_85_onescript'
#Regridding atmosphere file
atmosphere_out_file_name = process_input_file(atmosphere_file_name, 'atmosphere', num_fourier_out, num_x_out, num_y_out)
#regridding spectral dynamics file
spectral_out_file_name = process_input_file(spectral_dynamics_file_name, 'spectral_dynamics', num_fourier_out, num_x_out, num_y_out)
#merging into a single archive
join_into_cpio(atmosphere_out_file_name, spectral_out_file_name, atmos_model_file_name, restart_file_out_name=restart_file_out_name)
|
gpl-3.0
|
chaffra/sympy
|
sympy/interactive/tests/test_ipythonprinting.py
|
24
|
6208
|
"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
from sympy.utilities.pytest import raises
# run_cell was added in IPython 0.11
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] == "pi"
assert app.user_ns['a2']['text/plain'] == "pi**2"
else:
assert app.user_ns['a'][0]['text/plain'] == "pi"
assert app.user_ns['a2'][0]['text/plain'] == "pi**2"
# Load printing extension
app.run_cell("from sympy import init_printing")
app.run_cell("init_printing()")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
app.run_cell("a2 = format(Symbol('pi')**2)")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
assert app.user_ns['a']['text/plain'] in (u'\N{GREEK SMALL LETTER PI}', 'pi')
assert app.user_ns['a2']['text/plain'] in (u' 2\n\N{GREEK SMALL LETTER PI} ', ' 2\npi ')
else:
assert app.user_ns['a'][0]['text/plain'] in (u'\N{GREEK SMALL LETTER PI}', 'pi')
assert app.user_ns['a2'][0]['text/plain'] in (u' 2\n\N{GREEK SMALL LETTER PI} ', ' 2\npi ')
def test_print_builtin_option():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
app.run_cell("from sympy import init_printing")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one
# text type: str which holds Unicode data and two byte types bytes and bytearray.
# XXX: How can we make this ignore the terminal width? This test fails if
# the terminal is too narrow.
assert text in ("{pi: 3.14, n_i: 3}",
u'{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}',
"{n_i: 3, pi: 3.14}",
u'{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}')
# If we enable the default printing, then the dictionary's should render
# as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
latex = app.user_ns['a']['text/latex']
else:
text = app.user_ns['a'][0]['text/plain']
latex = app.user_ns['a'][0]['text/latex']
assert text in ("{pi: 3.14, n_i: 3}",
u'{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}',
"{n_i: 3, pi: 3.14}",
u'{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}')
assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$'
app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True")
app.run_cell("init_printing(use_latex=True, print_builtin=False)")
app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})")
# Deal with API change starting at IPython 1.0
if int(ipython.__version__.split(".")[0]) < 1:
text = app.user_ns['a']['text/plain']
raises(KeyError, lambda: app.user_ns['a']['text/latex'])
else:
text = app.user_ns['a'][0]['text/plain']
raises(KeyError, lambda: app.user_ns['a'][0]['text/latex'])
# Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one
# text type: str which holds Unicode data and two byte types bytes and bytearray.
# Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}'
# Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}'
# Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}'
assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}")
def test_matplotlib_bad_latex():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("import IPython")
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import init_printing, Matrix")
app.run_cell("init_printing(use_latex='matplotlib')")
# The png formatter is not enabled by default in this context
app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True")
# Make sure no warnings are raised by IPython
app.run_cell("import warnings")
app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)")
# This should not raise an exception
app.run_cell("a = format(Matrix([1, 2, 3]))")
# issue 9799
app.run_cell("from sympy import Piecewise, Symbol, Eq")
app.run_cell("x = Symbol('x'); pw = format(Piecewise((1, Eq(x, 0)), (0, True)))")
|
bsd-3-clause
|
gotomypc/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
129
|
43401
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
bsd-3-clause
|
sriramsitharaman/sp17-i524
|
project/S17-IR-P012/code/binarize.py
|
21
|
1096
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 4 15:56:31 2017
I524 Project: OCR
Preprocessing
Binarization
@author: saber
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
image_path = 'sample1.png'
image_arr = cv2.imread(image_path, 0)
plt.figure(1)
plt.subplot(311)
# Plot histogram of data
plt.hist(image_arr.flatten())
hist, bin_centers = np.histogram(image_arr)
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
mean1 = np.cumsum(hist * bin_centers[1:]) / weight1
mean2 = np.cumsum((hist * bin_centers[1:]) / weight2[::-1])[::-1]
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:])**2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
img_bin = np.zeros(image_arr.shape)
for i in range(image_arr.shape[0]):
for j in range(image_arr.shape[1]):
if image_arr[i, j] > threshold:
img_bin[i, j] = 255
else:
img_bin[i, j] = 0
#plt.imshow(image_arr)
#plt.imshow(img_bin)
plt.subplot(312)
plt.imshow(image_arr, 'gray')
plt.subplot(313)
plt.imshow(img_bin, 'gray')
|
apache-2.0
|
fzalkow/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
dhruv13J/scikit-learn
|
sklearn/learning_curve.py
|
110
|
13467
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
bsd-3-clause
|
cameronlai/ml-class-python
|
solutions/ex4/ex4.py
|
1
|
9678
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from scipy import optimize
from ex4_utility import *
## Machine Learning Online Class - Exercise 4: Neural Network Learning
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# linear exercise. You will need to complete the following functions
# in this exericse:
#
# sigmoidGradient.m
# randInitializeWeights.m
# nnCostFunction.m
#
# ==================== All function declaration ====================
def reshape_param(nn_params, input_layer_size, hidden_layer_size, num_labels):
# Reshape nn_params back into parameters Theta1 and Theta2
Theta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)]
Theta1 = Theta1.reshape((hidden_layer_size, input_layer_size + 1))
Theta2 = nn_params[(hidden_layer_size * (input_layer_size + 1)):]
Theta2 = Theta2.reshape((num_labels, hidden_layer_size + 1))
return Theta1, Theta2
def predict(Theta1, Theta2, X):
X = np.column_stack((np.ones(m), X))
z2_val = np.dot(X, np.transpose(Theta1))
hiddenLayer = sigmoid(z2_val)
hiddenLayer = np.column_stack((np.ones(m), hiddenLayer))
outputLayer = sigmoid(np.dot(hiddenLayer, np.transpose(Theta2)))
p = np.argmax(outputLayer, axis=1) + 1
p = p.reshape(-1, 1)
return p
def sigmoid(z):
g = np.zeros(z.shape)
g = 1 / (1 + np.exp(-z))
return g
def sigmoidGradient(z):
g = np.zeros(z.shape)
# ============= YOUR CODE HERE =============
# Instructions: Compute the gradient of the sigmoid function at
# each value of z (z can be a matrix, vector or scalar)
tmp = sigmoid(z)
g = tmp * (1 - tmp)
# ===========================================
return g
def randInitializeWeights(L_in, L_out):
W = np.zeros((L_out, 1 + L_in))
# ============= YOUR CODE HERE =============
# Instructions: Initialize W randomly so that we break the symmetry while
# training the neural network.
# Note: The first row of W corresponds to the parameters for the bias units
epsilon_init = 0.12
W = np.random.random((L_out, 1 + L_in)) * 2 * epsilon_init - epsilon_init
# ===========================================
return W
def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_val):
m = X.shape[0]
J = 0
Theta1, Theta2 = reshape_param(nn_params, input_layer_size, hidden_layer_size, num_labels)
# ============= YOUR CODE HERE =============
# Instructions: Complete the following code to calculate the gradient function
# by using feedforward and regularization
X = np.column_stack((np.ones(m), X))
z2_val = np.dot(X, np.transpose(Theta1))
hiddenLayer = sigmoid(z2_val)
hiddenLayer = np.column_stack((np.ones(m), hiddenLayer))
outputLayer = sigmoid(np.dot(hiddenLayer, np.transpose(Theta2)))
y_array = np.zeros((m, num_labels))
for i in xrange(m):
y_array[i, y[i]-1] = 1
J1 = -y_array * np.log(outputLayer)
J2 = (1 - y_array) * np.log(1 - outputLayer)
J = np.sum(J1 - J2) / m
J += np.sum(np.power(Theta1[:, 1:], 2)) * lambda_val / (2 * m)
J += np.sum(np.power(Theta2[:, 1:], 2)) * lambda_val / (2 * m)
# ===========================================
return J
def nnGradFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_val):
m = X.shape[0]
Theta1, Theta2 = reshape_param(nn_params, input_layer_size, hidden_layer_size, num_labels)
Theta1_grad = np.zeros(Theta1.shape)
Theta2_grad = np.zeros(Theta2.shape)
# ============= YOUR CODE HERE =============
# Instructions: Complete the following code to calculate the gradient function
# by using backpropagation and regularization
X = np.column_stack((np.ones(m), X))
z2_val = np.dot(X, np.transpose(Theta1))
hiddenLayer = sigmoid(z2_val)
hiddenLayer = np.column_stack((np.ones(m), hiddenLayer))
outputLayer = sigmoid(np.dot(hiddenLayer, np.transpose(Theta2)))
y_array = np.zeros((m, num_labels))
for i in xrange(m):
y_array[i, y[i]-1] = 1
error_3 = outputLayer - y_array
for t in xrange(m):
error_3_col = error_3[t,:].reshape((-1,1))
hiddenLayer_row = np.array([hiddenLayer[t, :]])
z2_val_col = z2_val[t,:].reshape((-1,1))
X_row = np.array([X[t,:]])
Theta2_grad = Theta2_grad + np.dot(error_3_col, hiddenLayer_row)
error_2 = np.dot(np.transpose(Theta2), error_3_col)
error_2 = error_2[1:] # Remove bias term
error_2 = error_2 * sigmoidGradient(z2_val_col)
Theta1_grad = Theta1_grad + np.dot(error_2, X_row)
Theta1_grad = Theta1_grad / m
Theta2_grad = Theta2_grad / m
Theta1_grad[:,1:] += Theta1[:,1:] * lambda_val / m
Theta2_grad[:,1:] += Theta2[:,1:] * lambda_val / m
# ===========================================
grad = np.hstack((Theta1_grad.ravel(), Theta2_grad.ravel()))
return grad
if __name__ == "__main__":
plt.close('all')
plt.ion() # interactive mode
# Setup the parameters you will use for this part of the exercies
input_layer_size = 400 # 20x20 Input Images of Digits
hidden_layer_size = 25 # 25 hidden units
num_labels = 10 # 10 labels, from 1 to 10
# (note that we have mapped "0" to label 10)
# ==================== Part 1: Loading and Visualizing Data ====================
print('Loading and Visualizing Data ...')
data_file = '../../data/ex4/ex4data1.mat'
mat_content = sio.loadmat(data_file)
X = mat_content['X']
y = mat_content['y']
m, n = X.shape
rand_indices = np.random.permutation(m)
sel = X[rand_indices[:100], :]
displayData(sel)
raw_input('Program paused. Press enter to continue')
# =================== Part 2: Loading Parameters ===================
print('Loading Saved Neural Network Parameters ...')
data_file = '../../data/ex4/ex4weights.mat'
mat_content = sio.loadmat(data_file)
Theta1 = mat_content['Theta1']
Theta2 = mat_content['Theta2']
nn_params = np.hstack((Theta1.ravel(), Theta2.ravel()))
# =================== Part 3: Compute Cost (Feedforward) ===================
print('Feedforward Using Neural Network ...')
# Weight regularization parameter (we set this to 0 here).
lambda_val = 0
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_val)
print('Cost at parameters (loaded from ex4weights): %f \n(this value should be about 0.287629)' % J)
raw_input('Program paused. Press enter to continue')
# =================== Part 4: Implement Regularization ===================
print('Checking Cost Function (w/ Regularization) ...')
lambda_val = 1
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_val)
print('Cost at parameters (loaded from ex4weights): %f \n(this value should be about 0.383770)' % J)
raw_input('Program paused. Press enter to continue')
# =================== Part 5: Sigmoid Gradient ===================
print('Evaluating sigmoid gradient...')
g = sigmoidGradient(np.array([1, -0.5, 0, 0.5, 1]))
print('Sigmoid gradient evaluated at [1 -0.5 0 0.5 1]:')
print(g)
raw_input('Program paused. Press enter to continue')
# =================== Part 6: Initializing Parameters ===================
print('Initializing Neural Network Parameters ...')
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size);
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels);
initial_nn_params = np.hstack((initial_Theta1.ravel(), initial_Theta2.ravel()))
# =================== Part 7: Implement Backpropagation ===================
print('Checking Backpropagation...')
checkNNGradients(None)
raw_input('Program paused. Press enter to continue')
# =================== Part 8: Implement Regularization ===================
print('Checking Backpropagation (w/ Regularization) ...')
lambda_val = 3
checkNNGradients(lambda_val)
debug_J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_val)
print('Cost at (fixed) debugging parameters (w/ lambda = 3): %f' % debug_J)
print('(this value should be about 0.576051)')
raw_input('Program paused. Press enter to continue')
# =================== Part 9: Training NN ===================
print('Training Neural Network...')
lambda_val = 1
costFunc = lambda p : nnCostFunction(p, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_val)
gradFunc = lambda p : nnGradFunction(p, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_val)
fmin_ret = optimize.fmin_cg(costFunc, initial_nn_params, gradFunc, maxiter=30, full_output=True)
nn_params = fmin_ret[0]
cost = fmin_ret[1]
print('Cost at theta found by fmin: %f' % cost)
Theta1, Theta2 = reshape_param(nn_params, input_layer_size, hidden_layer_size, num_labels)
raw_input('Program paused. Press enter to continue')
# =================== Part 10: Visualize Weights ===================
print('Visualizing Neural Network...')
plt.figure()
displayData(Theta1[:, 1:])
raw_input('Program paused. Press enter to continue')
# =================== Part 11: Implement Predict ===================
pred = predict(Theta1, Theta2, X);
print('Training Set Accuracy: %f' % (np.mean(pred == y) * 100))
|
mit
|
sclc/NAEF
|
exp_scripts/worker_exp_160520.py
|
1
|
10606
|
"""
Experiment Diary 2016-05-18
"""
import sys
import math
import matplotlib.pyplot as plt
from scipy import io
import numpy as np
from scipy.sparse.linalg import *
sys.path.append("../src/")
from worker import Worker
from native_conjugate_gradient import NativeConjugateGradient
from native_conjugate_gradient import NativeBlockConjugateGradient
from gerschgorin_circle_theorem import GerschgorinCircleTheoremEigenvalueEstimator
from chebyshev_polynomial import ChebyshevPolynomial
from chebyshev_basis_cacg import CBCG
from legendre_basis_cacg import LBCG
from legendre_basis_cacg import BLBCG
from chebyshev_basis_cacg import BCBCG
from presenter import Presenter
from power_iteration import PowerIteration
class WorkerIterativeLinearSystemSolverCG_Exp_160520(Worker):
""" Description: Experiment A
Numerical Method: Naive Conjugate Gradient
tol:
max_iteration:
matrix:
Reference:
1.
"""
def __init__(self, mat_path):
""" """
#print ("WorkerIterativeLinearSystemSolver works good")
Worker.__init__(self)
self._hist_list = []
if mat_path == "":
""" Need to generatre matrix """
print("calling self._matrix_generation")
#self._mat = self._matrix_generation()
else:
self._mat_coo = io.mmread(mat_path)
self._mat = self._mat_coo.tocsr()
self._mat_info = io.mminfo(mat_path)
print("Done reading matrix {}, Row:{}, Col:{}".format( mat_path, self._mat.shape[0], self._mat.shape[1]))
print("mminfo:{}".format(self._mat_info))
if self._mat.getformat() == "csr":
print("Yeah, it is CSR")
def _matrix_generator(self):
""" generation of matrix """
print("_matrix_generator")
def _setup_testbed(self, block_size):
""" this can considered as a basic experiment input descripting """
self._SB = np.random.random( ( self._mat.shape[0],1) )
self._BB = np.random.random( ( self._mat.shape[0],block_size) )
#np.savetxt("/home/scl/tmp/rhs.csv",self._B, delimiter=",")
#self._B = np.ones( ( self._mat.shape[0],6) )
self._SX = np.ones ( (self._mat.shape[1],1) )
self._BX = np.ones ( (self._mat.shape[1],block_size) )
#self._X = np.zeros ( (self._mat.shape[1],1) )
def _setup_numerical_algorithm(self,tol, maxiter, step_val):
""" After a linear solver or other numerical methods loaded
we need to setup the basic prarm for the algorithm
"""
self._tol = tol
self._maxiter = maxiter
self._step_val = step_val
def conduct_experiments(self, block_size, tol, maxiter, step_val):
""" function to condution the experiment """
print("to conduct the experient")
self._setup_testbed(block_size)
self._setup_numerical_algorithm(tol,maxiter,step_val)
#print ("before:{}".format(np.inner(self._X[:,0], self._X[:,0])))
#self._bcbcg_exp()
#self._db_presenter_a()
#self._db_power_iteration()
#self._db_lbcg_exp()
self._db_blbcg_exp()
print("Experiments done")
def _bcbcg_exp(self):
bcbcg_solver_obj = BCBCG()
step_val_a = 3
step_val_b = 5
self._final_X_a, self._final_R_a, self._residual_hist_a = \
bcbcg_solver_obj.bcbcg_solver(self._mat, self._B, self._X, step_val_a, self._tol, self._maxiter,0)
self._final_X_b, self._final_R_b, self._residual_hist_b = \
bcbcg_solver_obj.bcbcg_solver(self._mat, self._B, self._X, step_val_b, self._tol, self._maxiter,0)
def _db_presenter_a(self):
plot_worker = Presenter()
residual_list = [self._residual_hist_a]
residual_list.append(self._residual_hist_b)
legend_list = ["bcbcg_s3", "bcbcg_s5"]
color_list = ["r", "k"]
# latex style notation
#plot_worker.instant_plot_y_log10(residual_list, "crystm01 $x_1$")
#plot_worker.instant_plot_y_log10(residual_list, "crystm01", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
plot_worker.instant_plot_y(residual_list, "crystm01", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def _db_power_iteration(self):
pi_worker = PowerIteration();
tol = 1e-4
maxiters = 1000
self._v_eigen, self._lambda_eigen, self._lambda_eigen_list = pi_worker.naive_power_iteration (self._mat, self._X, maxiters, tol)
self._v_shift_eigen, self._lambda_shift_eigen, self._lambda_shift_eigen_list = pi_worker.power_iteration_with_shifting (self._mat, self._X, self._lambda_eigen, maxiters, tol)
print ("lambda ", self._lambda_shift_eigen)
plot_worker = Presenter()
ratio_list = [ [x+self._lambda_eigen for x in self._lambda_shift_eigen_list] ]
legend_list = ["naive power iteration"]
color_list = ["k"]
plot_worker.instant_plot_y_log10(ratio_list, "crystm01" , "#iteration", "$\\frac{\\lambda_{old} - \\lambda_{new}}{\\lambda_i{old} }$", legend_list, color_list)
def chebyshev_poly_exp_a (self, order_lo, order_hi):
""" """
x= np.linspace(-1.1,1.1,41)
order_controller = np.zeros(order_hi+1)
y_list = []
plot_worker = Presenter()
legend_list = []
color_list = []
for order_idx in range(order_lo, order_hi+1):
order_controller[order_idx] = 1
cheb = np.polynomial.chebyshev.Chebyshev( order_controller )
choef = np.polynomial.chebyshev.cheb2poly(cheb.coef )
poly = np.polynomial.Polynomial(choef)
y_list.append( poly(x) )
print(order_idx, " ", poly(x))
legend_list.append( "order_"+str(order_idx) )
color_list.append("k")
order_controller[order_idx] = 0
plot_worker.instant_plot_unified_x_axis(x, y_list, "chebyshev Poly" , "x", "y", legend_list, color_list)
def legendre_poly_exp_a(self, order_lo, order_hi):
""" """
x= np.linspace(-1.1,1.1,41)
order_controller = np.zeros(order_hi+1)
y_list = []
plot_worker = Presenter()
legend_list = []
color_list = []
for order_idx in range(order_lo, order_hi+1):
order_controller[order_idx] = 1
legp = np.polynomial.legendre.Legendre( order_controller )
legcoef = np.polynomial.legendre.leg2poly(legp.coef )
poly = np.polynomial.Polynomial(legcoef)
y_list.append( poly(x) )
print(order_idx, " ", poly(x))
legend_list.append( "order_"+str(order_idx) )
color_list.append("k")
order_controller[order_idx] = 0
plot_worker.instant_plot_unified_x_axis(x, y_list, "Legendre Poly" , "x", "y", legend_list, color_list)
def _db_lbcg_exp (self):
""" """
lbcg_solver_obj = LBCG()
self._final_x_a, self._final_r_a, self._residual_hist_a = \
lbcg_solver_obj.lbcg_solver(self._mat, self._B, self._X, 8, self._tol, self._maxiter)
self._final_x_b, self._final_r_b, self._residual_hist_b = \
lbcg_solver_obj.lbcg_solver(self._mat, self._B, self._X, 16, self._tol, self._maxiter)
cbcg_solver_obj = CBCG()
self._final_x_c, self._final_r_c, self._residual_hist_c = \
cbcg_solver_obj.cbcg_solver(self._mat, self._B, self._X, 16, self._tol, self._maxiter)
plot_worker = Presenter()
residual_list = [self._residual_hist_a, self._residual_hist_b, self._residual_hist_c]
legend_list = ["lbcg_s8","lbcg_s16", "cbcg_s16"]
color_list = ["r","k", "b"]
#plot_worker.instant_plot_y_log10(residual_list, "crystm01", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
plot_worker.instant_plot_y_log10(residual_list, "wathen100", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def _db_blbcg_exp(self):
""" """
lbcg_solver_obj = LBCG()
self._final_x_a, self._final_r_a, self._residual_hist_a = \
lbcg_solver_obj.lbcg_solver(self._mat, self._SB, self._SX, 8, self._tol, self._maxiter)
blbcg_solver_obj = BLBCG()
self._final_x_b, self._final_r_b, self._residual_hist_b = \
blbcg_solver_obj.blbcg_solver(self._mat, self._BB, self._BX, 8, self._tol, self._maxiter, 0)
bcbcg_solver_obj = BCBCG()
self._final_x_c, self._final_r_c, self._residual_hist_c = \
bcbcg_solver_obj.bcbcg_solver(self._mat, self._BB, self._BX, 8, self._tol, self._maxiter, 0)
plot_worker = Presenter()
residual_list = [self._residual_hist_a, self._residual_hist_b, self._residual_hist_c]
legend_list = ["lbcg_s8","blbcg_s8b10", "bcbcg_s8b10"]
color_list = ["r","k", "b"]
plot_worker.instant_plot_y_log10(residual_list, "bodyy6", "#iteration", "$\\frac{||x_1||}{||b_1||}$", legend_list, color_list)
def main ():
# main function for today's experiments
#bad
#mat_path = "/home/scl/MStore/vanbody/vanbody.mtx"
#mat_path = "/home/scl/MStore/olafu/olafu.mtx"
#mat_path = "/home/scl/MStore/raefsky4/raefsky4.mtx"
#mat_path = "/home/scl/MStore/smt/smt.mtx"
#mat_path = "/home/scl/MStore/bcsstk36/bcsstk36.mtx"
#mat_path = "/home/scl/MStore/pdb1HYS/pdb1HYS.mtx"
#mat_path = "/home/scl/MStore/ship_001/ship_001.mtx"
# not so good
#mat_path = "/home/scl/MStore/Dubcova1/Dubcova1.mtx"
#mat_path = "/home/scl/MStore/bcsstk17/bcsstk17.mtx"
#mat_path = "/home/scl/MStore/wathen100/wathen100.mtx"
#mat_path = "/home/scl/MStore/nasa2146/nasa2146.mtx"
#mat_path = "/home/scl/MStore/crystm01/crystm01.mtx"
#mat_path = "/home/scl/MStore/ex13/ex13.mtx"
#mat_path = "/home/scl/MStore/LFAT5/LFAT5.mtx"
#good
mat_path = "/home/scl/MStore/bodyy6/bodyy6.mtx"
#mat_path = "/home/scl/MStore/crystm02/crystm02.mtx"
block_size = 10
tol = 1e-12
maxiter = 5000
step_val =2
linear_system_solver_worker_test = WorkerIterativeLinearSystemSolverCG_Exp_160520(mat_path)
linear_system_solver_worker_test.conduct_experiments(block_size,tol,maxiter, step_val)
#linear_system_solver_worker_test.chebyshev_poly_exp_a(0,6)
#linear_system_solver_worker_test.legendre_poly_exp_a(0,6)
#linear_system_solver_worker_test.debug_NativeConjugateGradient()
if __name__ == "__main__":
""" call main funtion for testing """
main()
|
gpl-3.0
|
SunghanKim/numpy
|
numpy/lib/function_base.py
|
13
|
143198
|
from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def _hist_optim_numbins_estimator(a, estimator):
"""
A helper function to be called from histogram to deal with estimating optimal number of bins
estimator: str
If estimator is one of ['auto', 'fd', 'scott', 'rice', 'sturges'] this function
will choose the appropriate estimator and return it's estimate for the optimal
number of bins.
"""
assert isinstance(estimator, basestring)
# private function should not be called otherwise
if a.size == 0:
return 1
def sturges(x):
"""
Sturges Estimator
A very simplistic estimator based on the assumption of normality of the data
Poor performance for non-normal data, especially obvious for large X.
Depends only on size of the data.
"""
return np.ceil(np.log2(x.size)) + 1
def rice(x):
"""
Rice Estimator
Another simple estimator, with no normality assumption.
It has better performance for large data, but tends to overestimate number of bins.
The number of bins is proportional to the cube root of data size (asymptotically optimal)
Depends only on size of the data
"""
return np.ceil(2 * x.size ** (1.0 / 3))
def scott(x):
"""
Scott Estimator
The binwidth is proportional to the standard deviation of the data and
inversely proportional to the cube root of data size (asymptotically optimal)
"""
h = 3.5 * x.std() * x.size ** (-1.0 / 3)
if h > 0:
return np.ceil(x.ptp() / h)
return 1
def fd(x):
"""
Freedman Diaconis rule using Inter Quartile Range (IQR) for binwidth
Considered a variation of the Scott rule with more robustness as the IQR
is less affected by outliers than the standard deviation. However the IQR depends on
fewer points than the sd so it is less accurate, especially for long tailed distributions.
If the IQR is 0, we return 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size (asymptotically optimal)
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
if iqr > 0:
h = (2 * iqr * x.size ** (-1.0 / 3))
return np.ceil(x.ptp() / h)
# If iqr is 0, default number of bins is 1
return 1
def auto(x):
"""
The FD estimator is usually the most robust method, but it tends to be too small
for small X. The Sturges estimator is quite good for small (<1000) datasets and is
the default in R.
This method gives good off the shelf behaviour.
"""
return max(fd(x), sturges(x))
optimal_numbins_methods = {'sturges': sturges, 'rice': rice, 'scott': scott,
'fd': fd, 'auto': auto}
try:
estimator_func = optimal_numbins_methods[estimator.lower()]
except KeyError:
raise ValueError("{0} not a valid method for `bins`".format(estimator))
else:
# these methods return floats, np.histogram requires an int
return int(estimator_func(a))
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use the method
chosen to calculate the optimal number of bins (see Notes for more detail
on the estimators). For visualisation, we suggest using the 'auto' option.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into account data
variability and data size .
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data size.
Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only optimal for
gaussian data and underestimates number of bins for large non-gaussian datasets.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well found in literature,
and are inspired by the choices R provides for histogram visualisation.
Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal,
which is why it appears in most estimators.
These are simply plug-in methods that give good starting points for number of bins.
In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the sturges
value will usually be chosen, while larger datasets will usually default to FD.
Avoids the overly conservative behaviour of FD and Sturges for small and
large datasets respectively. Switchover point is usually x.size~1000.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \\frac{IQR}{n^{-1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \\frac{3.5\\sigma}{n^{-1/3}}
The binwidth is proportional to the standard deviation (sd) of the data
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The sd is not very robust to outliers. Values
are very similar to the Freedman Diaconis Estimator in the absence of outliers.
'Rice'
.. math:: n_h = \\left\\lceil 2n^{1/3} \\right\\rceil
The number of bins is only proportional to cube root of a.size.
It tends to overestimate the number of bins
and it does not take into account data variability.
'Sturges'
.. math:: n_h = \\left\\lceil \\log _{2}n+1 \\right\\rceil
The number of bins is the base2 log of a.size.
This estimator assumes normality of data and is too conservative for larger,
non-normal datasets. This is the default method in R's `hist` method.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data with 2000 points
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size = 1000), rng.normal(loc = 5, scale = 2, size = 1000)))
>>> plt.hist(a, bins = 'auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if isinstance(bins, basestring):
bins = _hist_optim_numbins_estimator(a, bins)
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
number of observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = float(X.shape[1] - ddof)
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
return (dot(X, X_T.conj())/fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no affect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
bsd-3-clause
|
jaden/2014-hour-of-code
|
plotting/histogram.py
|
1
|
1034
|
"""
Demo of the histogram (hist) function with a few features.
In addition to the basic histogram, this demo shows a few optional features:
* Setting the number of data bins
* The ``normed`` flag, which normalizes bin heights so that the integral of
the histogram is 1. The resulting histogram is a probability density.
* Setting the face color of the bars
* Setting the opacity (alpha value).
"""
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# example data
mu = 100 # mean of distribution
sigma = 15 # standard deviation of distribution
x = mu + sigma * np.random.randn(10000)
num_bins = 50
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.xlabel('IQ')
plt.ylabel('Probability')
plt.title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show()
|
mit
|
lcharleux/truss
|
doc/example_code/truss_logo.py
|
1
|
1421
|
import truss
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import patches
modulus = 210.e3 #Pa
rho = 2700. #kg/m**3
surface = .001 #m**2
yield_stress = 400 #Pa
from scipy import optimize
m = truss.core.Model()
A = m.add_node((0., 1.), label = "A")
B = m.add_node((.5, 1.), label = "B")
C = m.add_node((1., 1.), label = "C")
D = m.add_node((.5, 0.), label = "D")
E = m.add_node((1., 0.), label = "E")
F = m.add_node((2., 0.), label = "F")
G = m.add_node((2., 1.), label = "G")
A.block[1] = True
A.block[0] = True
C.block[0] = True
C.block[1] = True
D.block[1] = True
D.block[0] = True
AB = m.add_bar(A, B, modulus = modulus, density = rho, section = surface)
BC = m.add_bar(B, C, modulus = modulus, density = rho, section = surface)
BD = m.add_bar(B, D, modulus = modulus, density = rho, section = surface)
m.add_bar(C, E, modulus = modulus, density = rho, section = surface)
m.add_bar(E, F, modulus = modulus, density = rho, section = surface)
m.add_bar(F, G, modulus = modulus, density = rho, section = surface)
B.force = np.array([-1., -1.])
#m.solve()
xlim, ylim = m.bbox(deformed = False)
fig = plt.figure(0)
plt.clf()
ax = fig.add_subplot(1,1,1)
ax.set_aspect("equal")
#ax.axis("off")
m.draw(ax, deformed = True, field = "stress", label = True, force_scale = .1, forces = True)
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid()
plt.show()
|
gpl-2.0
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/pandas/tests/test_categorical.py
|
1
|
6680
|
# pylint: disable=E1101,E1103,W0232
from datetime import datetime
from pandas.compat import range, lrange, u
import nose
import re
import numpy as np
from pandas.core.categorical import Categorical
from pandas.core.index import Index, Int64Index, MultiIndex
from pandas.core.frame import DataFrame
from pandas.util.testing import assert_almost_equal
import pandas.core.common as com
import pandas.util.testing as tm
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'])
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf.labels, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf.labels, [2, 2, 2])
def test_constructor_unsortable(self):
raise nose.SkipTest('skipping for now')
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
# it works!
factor = Categorical.from_array(arr)
def test_factor_agg(self):
import pandas.core.frame as frame
arr = np.arange(len(self.factor))
f = np.sum
agged = frame.factor_agg(self.factor, arr, f)
labels = self.factor.labels
for i, idx in enumerate(self.factor.levels):
self.assertEqual(f(arr[labels == i]), agged[i])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assert_(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assert_(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assert_(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assert_(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assert_(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assert_(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_(np.array_equal(result, expected))
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_(np.array_equal(result, expected))
def test_na_flags_int_levels(self):
# #1457
levels = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, levels)
repr(cat)
self.assert_(np.array_equal(com.isnull(cat), labels == -1))
def test_levels_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'])
self.assert_(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame.from_dict(dict(counts=[3, 2, 3],
freqs=[3/8., 2/8., 3/8.],
levels=['a', 'b', 'c'])
).set_index('levels')
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()
expected = DataFrame.from_dict(dict(counts=[5, 3, 3],
freqs=[5/11., 3/11., 3/11.],
levels=[1,2,3]
)
).set_index('levels')
tm.assert_frame_equal(desc, expected)
def test_print(self):
expected = [" a", " b", " b", " a", " a", " c", " c", " c",
"Levels (3): Index([a, b, c], dtype=object)"]
expected = "\n".join(expected)
# hack because array_repr changed in numpy > 1.6.x
actual = repr(self.factor)
pat = "Index\(\['a', 'b', 'c']"
sub = "Index([a, b, c]"
actual = re.sub(pat, sub, actual)
self.assertEquals(actual, expected)
def test_big_print(self):
factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat')
expected = [" a", " b", " c", " a", " b", " c", " a", " b", " c",
" a", " b", " c", " a", "...", " c", " a", " b", " c",
" a", " b", " c", " a", " b", " c", " a", " b", " c",
"Levels (3): Index([a, b, c], dtype=object)",
"Name: cat, Length: 600" ]
expected = "\n".join(expected)
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
pat = "Index\(\['a', 'b', 'c']"
sub = "Index([a, b, c]"
actual = re.sub(pat, sub, actual)
self.assertEquals(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a","b","c"], name="cat")
expected = ("Categorical([], Name: cat, Levels (3): "
"Index([a, b, c], dtype=object)")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
pat = "Index\(\['a', 'b', 'c']"
sub = "Index([a, b, c]"
actual = re.sub(pat, sub, actual)
self.assertEqual(actual, expected)
factor = Categorical([], ["a","b","c"])
expected = ("Categorical([], Levels (3): "
"Index([a, b, c], dtype=object)")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
pat = "Index\(\['a', 'b', 'c']"
sub = "Index([a, b, c]"
actual = re.sub(pat, sub, actual)
self.assertEqual(actual, expected)
factor = Categorical([], [])
expected = ("Categorical([], Levels (0): "
"Index([], dtype=object)")
self.assertEqual(repr(factor), expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
|
gpl-3.0
|
PaulGrimal/peach
|
tutorial/neural-networks/mapping-a-plane.py
|
6
|
2368
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/mapping-a-plane.py
# Using a neuron to map a plane
################################################################################
# Please, for more information on this demo, see the tutorial documentation.
# We import numpy, random and peach, as those are the libraries we will be
# using.
from numpy import *
import random
import peach as p
# Here, we create a FeedForward network with only one layer, with two inputs and
# one output. Since it is only one output, there is only one neuron in the
# layer. We use LMS as the learning algorithm, and the neuron must be biased.
# Notice that we use 0.02 as the learning rate for the algorithm.
nn = p.FeedForward((2, 1), lrule=p.LMS(0.02), bias=True)
# These lists will track the values of the synaptic weights and the error. We
# will use it later to plot the convergence, if the matplotlib module is
# available
w0 = [ ]
w1 = [ ]
w2 = [ ]
elog = [ ]
# We start by setting the error to 1, so we can enter the looping:
error = 1
while abs(error) > 1e-7: # Max error is 1e-7
x1 = random.uniform(-10, 10) # Generating an example
x2 = random.uniform(-10, 10)
x = array([ x1, x2 ], dtype = float)
d = -1 - 3*x1 + 2*x2 # Plane equation
error = nn.feed(x, d)
w0.append(nn[0].weights[0][0]) # Tracking error and weights.
w1.append(nn[0].weights[0][1])
w2.append(nn[0].weights[0][2])
elog.append(d - nn(x)[0, 0])
print "After %d iterations, we get as synaptic weights:" % (len(w0),)
print nn[0].weights
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``mapping-a-plane.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
vsize = 4
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(array(w0))
a1.plot(array(w1))
a1.plot(array(w2))
a1.plot(array(elog))
a1.set_ylim([-10, 10])
a1.legend([ "$w_0$", "$w_1$", "$w_2$", "$error$" ])
savefig("mapping-a-plane.png")
except ImportError:
pass
|
lgpl-2.1
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/matplotlib/tests/test_cycles.py
|
4
|
7475
|
import warnings
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import numpy as np
from nose.tools import assert_raises
from cycler import cycler
@image_comparison(baseline_images=['color_cycle_basic'], remove_text=True,
extensions=['png'])
def test_colorcycle_basic():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']))
xs = np.arange(10)
ys = 0.25 * xs + 2
ax.plot(xs, ys, label='red', lw=4)
ys = 0.45 * xs + 3
ax.plot(xs, ys, label='green', lw=4)
ys = 0.65 * xs + 4
ax.plot(xs, ys, label='yellow', lw=4)
ys = 0.85 * xs + 5
ax.plot(xs, ys, label='red2', lw=4)
ax.legend(loc='upper left')
@image_comparison(baseline_images=['marker_cycle', 'marker_cycle'],
remove_text=True, extensions=['png'])
def test_marker_cycle():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']) +
cycler('marker', ['.', '*', 'x']))
xs = np.arange(10)
ys = 0.25 * xs + 2
ax.plot(xs, ys, label='red dot', lw=4, ms=16)
ys = 0.45 * xs + 3
ax.plot(xs, ys, label='green star', lw=4, ms=16)
ys = 0.65 * xs + 4
ax.plot(xs, ys, label='yellow x', lw=4, ms=16)
ys = 0.85 * xs + 5
ax.plot(xs, ys, label='red2 dot', lw=4, ms=16)
ax.legend(loc='upper left')
fig = plt.figure()
ax = fig.add_subplot(111)
# Test keyword arguments, numpy arrays, and generic iterators
ax.set_prop_cycle(color=np.array(['r', 'g', 'y']),
marker=iter(['.', '*', 'x']))
xs = np.arange(10)
ys = 0.25 * xs + 2
ax.plot(xs, ys, label='red dot', lw=4, ms=16)
ys = 0.45 * xs + 3
ax.plot(xs, ys, label='green star', lw=4, ms=16)
ys = 0.65 * xs + 4
ax.plot(xs, ys, label='yellow x', lw=4, ms=16)
ys = 0.85 * xs + 5
ax.plot(xs, ys, label='red2 dot', lw=4, ms=16)
ax.legend(loc='upper left')
@image_comparison(baseline_images=['lineprop_cycle_basic'], remove_text=True,
extensions=['png'])
def test_linestylecycle_basic():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_prop_cycle(cycler('linestyle', ['-', '--', ':']))
xs = np.arange(10)
ys = 0.25 * xs + 2
ax.plot(xs, ys, label='solid', lw=4)
ys = 0.45 * xs + 3
ax.plot(xs, ys, label='dashed', lw=4)
ys = 0.65 * xs + 4
ax.plot(xs, ys, label='dotted', lw=4)
ys = 0.85 * xs + 5
ax.plot(xs, ys, label='solid2', lw=4)
ax.legend(loc='upper left')
@image_comparison(baseline_images=['fill_cycle_basic'], remove_text=True,
extensions=['png'])
def test_fillcycle_basic():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('linestyle', ['-', '--', ':']))
xs = np.arange(10)
ys = 0.25 * xs**.5 + 2
ax.fill(xs, ys, label='red, xx', linewidth=3)
ys = 0.45 * xs**.5 + 3
ax.fill(xs, ys, label='green, circle', linewidth=3)
ys = 0.65 * xs**.5 + 4
ax.fill(xs, ys, label='yellow, cross', linewidth=3)
ys = 0.85 * xs**.5 + 5
ax.fill(xs, ys, label='red2, xx', linewidth=3)
ax.legend(loc='upper left')
@image_comparison(baseline_images=['fill_cycle_ignore'], remove_text=True,
extensions=['png'])
def test_fillcycle_ignore():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('marker', ['.', '*', 'D']))
xs = np.arange(10)
ys = 0.25 * xs**.5 + 2
# Should not advance the cycler, even though there is an
# unspecified property in the cycler "marker".
# "marker" is not a Polygon property, and should be ignored.
ax.fill(xs, ys, 'r', hatch='xx', label='red, xx')
ys = 0.45 * xs**.5 + 3
# Allow the cycler to advance, but specify some properties
ax.fill(xs, ys, hatch='O', label='red, circle')
ys = 0.65 * xs**.5 + 4
ax.fill(xs, ys, label='green, circle')
ys = 0.85 * xs**.5 + 5
ax.fill(xs, ys, label='yellow, cross')
ax.legend(loc='upper left')
@image_comparison(baseline_images=['property_collision_plot'],
remove_text=True, extensions=['png'])
def test_property_collision_plot():
fig, ax = plt.subplots()
ax.set_prop_cycle('linewidth', [2, 4])
for c in range(1, 4):
ax.plot(np.arange(10), c * np.arange(10), lw=0.1)
ax.plot(np.arange(10), 4 * np.arange(10))
ax.plot(np.arange(10), 5 * np.arange(10))
@image_comparison(baseline_images=['property_collision_fill'],
remove_text=True, extensions=['png'])
def test_property_collision_fill():
fig, ax = plt.subplots()
xs = np.arange(10)
ys = 0.25 * xs**.5 + 2
ax.set_prop_cycle(linewidth=[2, 3, 4, 5, 6], facecolor='bgcmy')
for c in range(1, 4):
ax.fill(xs, c * ys, lw=0.1)
ax.fill(xs, 4 * ys)
ax.fill(xs, 5 * ys)
@cleanup
def test_valid_input_forms():
fig, ax = plt.subplots()
# These should not raise an error.
ax.set_prop_cycle(None)
ax.set_prop_cycle(cycler('linewidth', [1, 2]))
ax.set_prop_cycle('color', 'rgywkbcm')
ax.set_prop_cycle('linewidth', (1, 2))
ax.set_prop_cycle('linewidth', [1, 2])
ax.set_prop_cycle('linewidth', iter([1, 2]))
ax.set_prop_cycle('linewidth', np.array([1, 2]))
ax.set_prop_cycle('color', np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
ax.set_prop_cycle(lw=[1, 2], color=['k', 'w'], ls=['-', '--'])
ax.set_prop_cycle(lw=np.array([1, 2]),
color=np.array(['k', 'w']),
ls=np.array(['-', '--']))
assert True
@cleanup
def test_cycle_reset():
fig, ax = plt.subplots()
# Can't really test a reset because only a cycle object is stored
# but we can test the first item of the cycle.
prop = next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(linewidth=[10, 9, 4])
assert prop != next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(None)
got = next(ax._get_lines.prop_cycler)
assert prop == got, "expected %s, got %s" % (prop, got)
fig, ax = plt.subplots()
# Need to double-check the old set/get_color_cycle(), too
with warnings.catch_warnings():
prop = next(ax._get_lines.prop_cycler)
ax.set_color_cycle(['c', 'm', 'y', 'k'])
assert prop != next(ax._get_lines.prop_cycler)
ax.set_color_cycle(None)
got = next(ax._get_lines.prop_cycler)
assert prop == got, "expected %s, got %s" % (prop, got)
@cleanup
def test_invalid_input_forms():
fig, ax = plt.subplots()
assert_raises((TypeError, ValueError), ax.set_prop_cycle, 1)
assert_raises((TypeError, ValueError), ax.set_prop_cycle, [1, 2])
assert_raises((TypeError, ValueError), ax.set_prop_cycle, 'color', 'fish')
assert_raises((TypeError, ValueError), ax.set_prop_cycle, 'linewidth', 1)
assert_raises((TypeError, ValueError), ax.set_prop_cycle,
'linewidth', {'1': 1, '2': 2})
assert_raises((TypeError, ValueError), ax.set_prop_cycle,
linewidth=1, color='r')
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
mit
|
wxgeo/geophar
|
wxgeometrie/geolib/courbes.py
|
1
|
14345
|
# -*- coding: utf-8 -*-
##--------------------------------------#######
# Interpolation #
##--------------------------------------#######
# WxGeometrie
# Dynamic geometry, graph plotter, and more for french mathematic teachers.
# Copyright (C) 2005-2013 Nicolas Pourcelot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from itertools import chain
from numpy import isnan, isinf, sign, arange, inf, append
from sympy import oo
from .objet import Objet, Argument, Ref
from .points import Point_generique
from .contexte import contexte
from .routines import distance_segment
from .. import param
from ..pylib import print_error
def inf_or_nan(x):
return isinf(x) or isnan(x)
class Courbe_generique(Objet):
"""Classe mère de toutes les courbes."""
_affichage_depend_de_la_fenetre = True
_style_defaut = param.courbes
def __init__(self, **styles):
Objet.__init__(self, **styles)
def _distance_inf(self, x, y, d):
if len(self.xarray) and len(self.yarray):
u, v = self.feuille.coo2pix(self.xarray, self.yarray)
u -= x
v -= y
m = min(u*u + v*v) # u*u est beaucoup plus rapide que u**2 (sic!)
return m < d**2
return False
def _espace_vital(self):
if len(self.xarray) and len(self.yarray):
return (min(self.xarray), max(self.xarray), min(self.yarray), max(self.yarray))
@property
def xarray(self):
if self._Objet__figure_perimee:
self._creer_figure()
return self._xarray
@property
def yarray(self):
if self._Objet__figure_perimee:
self._creer_figure()
return self._yarray
class Courbe(Courbe_generique):
"""Une courbe de fonction.
L'expression doit être donnée en fonction de 'x'.
Exemple : '2x^2-1/x+1'
"""
_prefixe_nom = "C"
__fonction = fonction = Argument("Fonction")
def __new__(cls, *args, **kw):
if args and isinstance(args[0], Point_generique):
from .interpolations import Interpolation_lineaire, scipy_found,\
Interpolation_polynomiale_par_morceaux
cls = (Interpolation_polynomiale_par_morceaux if scipy_found
else Interpolation_lineaire)
return cls(*args, **kw)
return object.__new__(cls)
def __init__(self, fonction, **styles):
self.__fonction = fonction = Ref(fonction)
Courbe_generique.__init__(self, **styles)
def _creer_figure(self):
## self.__canvas__.graph.supprimer(self._representation)
self._representation = []
fenetre = self.feuille.fenetre_reellement_affichee()
pas = self.canvas.pas()
self._xarray = ()
self._yarray = ()
ancien_intervalle = None
# derniere_valeur = None
# derniere_fonction = None
# ancien_x = None
# ancien_y = None
for fonction, union, e_cach in zip(self.__fonction._Fonction__fonctions,
self.__fonction._Fonction__unions,
self.__fonction.style('extremites_cachees')):
for intervalle in union.intervalles:
x = intervalle.asarray(fenetre[0], fenetre[1], pas)[0]
if len(x):
#TODO: cas où len(x) == 1 (et donc, x[1] n'existe pas)
y = fonction(x)
x0 = x[0]
xN = x[-1]
y0 = y[0]
yN = y[-1]
# Bug de wxAgg pour des valeurs trop importantes, ou pour NaN
x, y = self.supprimer_valeurs_extremes(x, y, fonction)
self._representation.append(self.rendu.ligne(x, y,
color = self.style("couleur"),
linestyle = self.style("style"),
linewidth = self.style("epaisseur"),
zorder = self.style("niveau"),
))
# _xarray et _yarray ne servent pas pour la représentation graphique,
# mais pour ._distance_inf() uniquement
self._xarray = append(self._xarray, x)
self._yarray = append(self._yarray, y)
if fenetre[0] < intervalle.inf < fenetre[1]:
if ancien_intervalle is None:
self._creer_debut_morceau(x, y, intervalle, e_cach)
else:
print(intervalle, y[0], abs(y[0] - ancien_y[-1]), abs(x[0] - ancien_x[-1]), contexte['tolerance'] , pas)
fusion = abs(x0 - ancien_xN) < contexte['tolerance'] \
and (abs(y0 - ancien_yN) < contexte['tolerance'] or (isnan(y0) and isnan(ancien_yN)))
if fusion:
#Fusion
print('Fusion', y0)
if isnan(y0):
print('Fusion avancée')
for i in range(10, 70, 10):
try:
val1 = ancienne_fonction(ancien_xN - 8**(-i))
val2 = ancienne_fonction(x0 + 8**(-i))
if abs(val1 - val2) < contexte['tolerance']:
self._append_point(x0, val1, plein = False)
break
except (ZeroDivisionError, ValueError):
print_error()
fusion = False
break
else:
fusion = False
elif not(ancien_intervalle.sup_inclus or intervalle.inf_inclus):
print('Fusion classique')
self._append_point(x[0], y[0], plein = False)
if not fusion:
self._creer_fin_morceau(ancien_x, ancien_y, ancien_intervalle, e_cach)
self._creer_debut_morceau(x, y, intervalle, e_cach)
ancien_x = x
ancien_y = y
ancien_xN = xN
ancien_yN = yN
ancien_intervalle = intervalle
ancienne_fonction = fonction
if ancien_intervalle is not None and fenetre[0] < ancien_intervalle.sup < fenetre[1]:
self._creer_fin_morceau(ancien_x, ancien_y, ancien_intervalle, e_cach)
def _creer_debut_morceau(self, x, y, intervalle, e_cach):
if len(y) == 0:
return
elif len(y) == 1:
if not inf_or_nan(y[0]):
self._append_point(x[0], y[0])
return
if self._extremite_cachee(x[0], e_cach):
return
if not(inf_or_nan(y[0]) or inf_or_nan(y[1])):
if intervalle.inf_inclus:
self._append_point(x[0], y[0])
else:
vec = x[1] - x[0], y[1] - y[0]
self._append_arc(x[0], y[0], vec)
# TODO: cas où len(y) < 3
elif isnan(y[0]) and not (isnan(y[1]) or isnan(y[2])) :
if not intervalle.inf_inclus:
vec = x[2] - x[1], y[2] - y[1]
self._append_arc(x[1], y[1], vec)
def _creer_fin_morceau(self, x, y, intervalle, e_cach):
if len(y) <= 1:
return
if self._extremite_cachee(x[-1], e_cach):
return
if not(inf_or_nan(y[-1]) or inf_or_nan(y[-2])):
if intervalle.sup_inclus:
self._append_point(x[-1], y[-1])
else:
vec = x[-2] - x[-1], y[-2] - y[-1]
self._append_arc(x[-1], y[-1], vec)
# TODO: cas où len(y) < 3
elif isnan(y[-1]) and not (isnan(y[-2]) or isnan(y[-3])) :
if not intervalle.inf_inclus:
vec = x[-3] - x[-2], y[-3] - y[-2]
self._append_arc(x[-2], y[-2], vec)
def _extremite_cachee(self, val, e_cach):
"Renvoie True si une variable de `e_cach` vaut `val`."
return val in [v.val for v in e_cach]
def _append_arc(self, x0, y0, vec):
if self.style("extremites"):
self._representation.append(self.rendu.arc(x0, y0, vec,
taille=self.style('taille_extremites'), color=self.style("couleur"),
linewidth=self.style("epaisseur")))
def _append_point(self, x0, y0, plein = True):
if self.style("extremites"):
self._representation.append(self.rendu.point(x0, y0, plein=plein,
taille=.8*self.style('taille_extremites'), color=self.style("couleur"),
markeredgewidth=self.style("epaisseur")))
def _supprimer_valeurs_extremes(self, x, y, fonction, i, j):
"""Lorsque les valeurs aux bornes sont indéterminées (NaN), infinies (+/-Inf)
ou très éloignées de zéro (2e200), on cherche à les convertir en une valeur
raisonnable pour la fenêtre d'affichage.
La principale difficulté est de déterminer **numériquement** la limite probable.
On commence par regarder la valeur calculée par numpy à la borne considérée :
* Si la valeur est +/-Inf, il faut étudier son signe.
En effet, numpy ne peut généralement par faire la différence entre des calculs
du type 1/0+ et 1/0- (ex: 1/(x-3) en 3+ et 3-).
L'idée est la suivante : si les valeurs diminuent en se rapprochant de la borne,
alors la limite est -Inf. De même, si elles augmentent, la limite est +Inf.
On retourne alors une valeur en dehors de la fenêtre d'affichage, qui simule l'infini.
* Si le résultat est un nombre très éloigné de zéro, on le tronque tout en restant
en dehors de la fenêtre d'affichage, de manière à simuler l'infini.
En effet, le traceur de matplotlib réagit mal aux valeurs "extrêmes".
* Enfin si le résultat est de type NaN, on s'éloigne légèrement (puis de plus en plus vite)
de la borne, et on reitère, dans une limite de 20 itérations.
"""
x0 = x[i]; y0 = y[i]
x1 = x[j]; y1 = y[j]
k = 2**arange(-20., 0.)
entre = k*(x1 - x0) + x0 # (1 - k)*x0 + k*x1
xk = chain([x0], entre, [x1])
yk = chain([y0], fonction(entre), [y1])
y_finis = [] # dernières valeurs finies
infini = False
xi_infini = None
for xi, yi in zip(xk, yk):
if infini:
if not inf_or_nan(yi):
y_finis.append(yi)
if len(y_finis) == 2:
x0 = xi_infini
y0 = self._rogner_valeur(sign(y_finis[0] - y_finis[1])*inf)
break
else:
if isinf(yi):
infini = True
xi_infini = xi
elif not isnan(yi):
x0 = xi
y0 = self._rogner_valeur(yi)
break
x[i] = x0
y[i] = y0
return x, y
def _rogner_valeur(self, y0):
"Remplace -inf et +inf par des valeurs numériques dépassant la fenêtre."
if isnan(y0):
return y0
xmin, xmax, ymin, ymax = self.feuille.fenetre
decalage = 100*(ymax - ymin)
if isinf(y0):
return (ymin - decalage) if (y0 < 0) else (ymax + decalage)
# Bug de wxAgg pour des valeurs trop importantes
return max(min(y0, ymax + decalage), ymin - decalage)
# assert (ymin - decalage < y0 < ymax + decalage)
def supprimer_valeurs_extremes(self, x, y, fonction):
x, y = self._supprimer_valeurs_extremes(x, y, fonction, 0, 1)
x, y = self._supprimer_valeurs_extremes(x, y, fonction, -1, -2)
return x, y
@property
def xmin(self):
return self.__fonction._Fonction__unions[0].intervalles[0].inf
@property
def xmax(self):
return self.__fonction._Fonction__unions[-1].intervalles[-1].sup
def _espace_vital(self):
xmin = self.xmin
xmax = self.xmax
if xmin == -oo:
xmin = None
if xmax == oo:
xmax = None
return (xmin, xmax, min(self.yarray), max(self.yarray))
def _distance_inf(self, x, y, d):
P = x, y
xm = self.feuille.pix2coo(x - d, y)[0]
xM = self.feuille.pix2coo(x + d, y)[0]
xarray = self.xarray
filtre = (xm < xarray) & (xarray < xM)
xa, ya = self.feuille.coo2pix(xarray[filtre], self.yarray[filtre])
A = None
for x, y in zip(xa, ya):
B = A
A = x, y
if distance_segment(P, A, B, d):
return True
return False
@staticmethod
def _convertir(objet):
"Convertit un objet en fonction."
return NotImplemented
def _update(self, objet):
if not isinstance(objet, Courbe):
objet = self._convertir(objet)
if isinstance(objet, Courbe):
self.fonction = objet.fonction
else:
raise TypeError("L'objet n'est pas une courbe.")
|
gpl-2.0
|
ChinaQuants/bokeh
|
bokeh/sampledata/daylight.py
|
45
|
2522
|
"""Daylight hours from http://www.sunrisesunset.com """
from __future__ import absolute_import
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
import pandas as pd
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")
def fetch_daylight_hours(lat, lon, tz, dst, year):
"""Fetch daylight hours from sunrisesunset.com for a given location.
Parameters
----------
lat : float
Location's latitude.
lon : float
Location's longitude.
tz : int or float
Time zone offset from UTC. Use floats for half-hour time zones.
dst : int
Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe.
See sunrisesunset.com/custom.asp for other possible values.
year : int
Year (1901..2099).
"""
daylight = []
summer = 0 if lat >= 0 else 1
for month in xrange(1, 12+1):
args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month)
response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args)
entries = r1.findall(r0.sub("", response.text))
for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries:
if note == "DST Begins":
summer = 1
elif note == "DST Ends":
summer = 0
date = datetime.date(year, month, int(day))
sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute))
sunset = datetime.time(int(sunset_hour), int(sunset_minute))
daylight.append([date, sunrise, sunset, summer])
return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"])
# daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013)
# daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False)
def load_daylight_hours(file):
path = join(dirname(abspath(__file__)), file)
df = pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
daylight_warsaw_2013 = load_daylight_hours("daylight_warsaw_2013.csv")
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/tests/tslibs/test_array_to_datetime.py
|
1
|
5949
|
from datetime import date, datetime
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import iNaT, tslib
from pandas.compat.numpy import np_array_datetime64_compat
from pandas import Timestamp
import pandas.util.testing as tm
@pytest.mark.parametrize("data,expected", [
(["01-01-2013", "01-02-2013"],
["2013-01-01T00:00:00.000000000-0000",
"2013-01-02T00:00:00.000000000-0000"]),
(["Mon Sep 16 2013", "Tue Sep 17 2013"],
["2013-09-16T00:00:00.000000000-0000",
"2013-09-17T00:00:00.000000000-0000"])
])
def test_parsing_valid_dates(data, expected):
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dt_string, expected_tz", [
["01-01-2013 08:00:00+08:00", 480],
["2013-01-01T08:00:00.000000000+0800", 480],
["2012-12-31T16:00:00.000000000-0800", -480],
["12-31-2012 23:00:00-01:00", -60]
])
def test_parsing_timezone_offsets(dt_string, expected_tz):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added.
arr = np.array(["01-01-2013 00:00:00"], dtype=object)
expected, _ = tslib.array_to_datetime(arr)
arr = np.array([dt_string], dtype=object)
result, result_tz = tslib.array_to_datetime(arr)
tm.assert_numpy_array_equal(result, expected)
assert result_tz is pytz.FixedOffset(expected_tz)
def test_parsing_non_iso_timezone_offset():
dt_string = "01-01-2013T00:00:00.000000000+0000"
arr = np.array([dt_string], dtype=object)
result, result_tz = tslib.array_to_datetime(arr)
expected = np.array([np.datetime64("2013-01-01 00:00:00.000000000")])
tm.assert_numpy_array_equal(result, expected)
assert result_tz is pytz.FixedOffset(0)
def test_parsing_different_timezone_offsets():
# see gh-17697
data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"]
data = np.array(data, dtype=object)
result, result_tz = tslib.array_to_datetime(data)
expected = np.array([datetime(2015, 11, 18, 15, 30,
tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 15, 30,
tzinfo=tzoffset(None, 23400))],
dtype=object)
tm.assert_numpy_array_equal(result, expected)
assert result_tz is None
@pytest.mark.parametrize("data", [
["-352.737091", "183.575577"],
["1", "2", "3", "4", "5"]
])
def test_number_looking_strings_not_into_datetime(data):
# see gh-4601
#
# These strings don't look like datetimes, so
# they shouldn't be attempted to be converted.
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr, errors="ignore")
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize("invalid_date", [
date(1000, 1, 1),
datetime(1000, 1, 1),
"1000-01-01",
"Jan 1, 1000",
np.datetime64("1000-01-01")])
@pytest.mark.parametrize("errors", ["coerce", "raise"])
def test_coerce_outside_ns_bounds(invalid_date, errors):
arr = np.array([invalid_date], dtype="object")
kwargs = dict(values=arr, errors=errors)
if errors == "raise":
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(ValueError, match=msg):
tslib.array_to_datetime(**kwargs)
else: # coerce.
result, _ = tslib.array_to_datetime(**kwargs)
expected = np.array([iNaT], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_coerce_outside_ns_bounds_one_valid():
arr = np.array(["1/1/1000", "1/1/2000"], dtype=object)
result, _ = tslib.array_to_datetime(arr, errors="coerce")
expected = [iNaT, "2000-01-01T00:00:00.000000000-0000"]
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("errors", ["ignore", "coerce"])
def test_coerce_of_invalid_datetimes(errors):
arr = np.array(["01-01-2013", "not_a_date", "1"], dtype=object)
kwargs = dict(values=arr, errors=errors)
if errors == "ignore":
# Without coercing, the presence of any invalid
# dates prevents any values from being converted.
result, _ = tslib.array_to_datetime(**kwargs)
tm.assert_numpy_array_equal(result, arr)
else: # coerce.
# With coercing, the invalid dates becomes iNaT
result, _ = tslib.array_to_datetime(arr, errors="coerce")
expected = ["2013-01-01T00:00:00.000000000-0000",
iNaT,
iNaT]
tm.assert_numpy_array_equal(
result,
np_array_datetime64_compat(expected, dtype="M8[ns]"))
def test_to_datetime_barely_out_of_bounds():
# see gh-19382, gh-19529
#
# Close enough to bounds that dropping nanos
# would result in an in-bounds datetime.
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
msg = "Out of bounds nanosecond timestamp: 2262-04-11 23:47:16"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
tslib.array_to_datetime(arr)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize("data,expected", [
([SubDatetime(2000, 1, 1)],
["2000-01-01T00:00:00.000000000-0000"]),
([datetime(2000, 1, 1)],
["2000-01-01T00:00:00.000000000-0000"]),
([Timestamp(2000, 1, 1)],
["2000-01-01T00:00:00.000000000-0000"])
])
def test_datetime_subclass(data, expected):
# GH 25851
# ensure that subclassed datetime works with
# array_to_datetime
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
|
bsd-3-clause
|
cureHsu/angr
|
angr/functionmanager.py
|
3
|
22333
|
import logging
import networkx
import string
import claripy
l = logging.getLogger(name="angr.functionmanager")
class Function(object):
'''
A representation of a function and various information about it.
'''
def __init__(self, function_manager, addr, name=None, syscall=False):
'''
Function constructor
@param addr The address of the function
@param name (Optional) The name of the function
@param syscall (Optional) Whether this function is a sycall or not
'''
self._transition_graph = networkx.DiGraph()
self._local_transition_graph = None
self._ret_sites = set()
self._call_sites = {}
self._retn_addr_to_call_site = {}
self._addr = addr
self._function_manager = function_manager
self.name = name
self.is_syscall = syscall
if self.name is None:
# Try to get a name from project.loader
self.name = self._function_manager._project.loader.find_symbol_name(addr)
if self.name is None:
self.name = self._function_manager._project.loader.find_plt_stub_name(addr)
if self.name is not None:
self.name = 'plt.' + self.name
if self.name is None:
self.name = 'sub_%x' % addr
# Register offsets of those arguments passed in registers
self._argument_registers = []
# Stack offsets of those arguments passed in stack variables
self._argument_stack_variables = []
# These properties are set by VariableManager
self._bp_on_stack = False
self._retaddr_on_stack = False
self._sp_delta = 0
# Calling convention
self.cc = None
# Whether this function returns or not. `None` means it's not determined yet
self.returning = None
self.prepared_registers = set()
self.prepared_stack_variables = set()
self.registers_read_afterwards = set()
self.blocks = { addr }
@property
def has_unresolved_jumps(self):
for addr in self._transition_graph.nodes():
if addr in self._function_manager._cfg._unresolved_indirect_jumps:
b = self._function_manager._project.factory.block(addr)
if b.vex.jumpkind == 'Ijk_Boring':
return True
return False
@property
def has_unresolved_calls(self):
for addr in self._transition_graph.nodes():
if addr in self._function_manager._cfg._unresolved_indirect_jumps:
b = self._function_manager._project.factory.block(addr)
if b.vex.jumpkind == 'Ijk_Call':
return True
return False
@property
def operations(self):
'''
All of the operations that are done by this functions.
'''
operations = [ ]
for b in self.basic_blocks:
if b in self._function_manager._project.loader.memory:
try:
operations.extend(self._function_manager._project.factory.block(b).vex.operations)
except AngrTranslationError:
continue
return operations
@property
def code_constants(self):
'''
All of the constants that are used by this functions's code.
'''
# TODO: remove link register values
constants = [ ]
for b in self.basic_blocks:
if b in self._function_manager._project.loader.memory:
try:
constants.extend(v.value for v in self._function_manager._project.factory.block(b).vex.constants)
except AngrTranslationError:
continue
return constants
def string_references(self, minimum_length=1):
"""
ALl of the constant string reference used by this function
:param minimum_length: the minimum length of strings to find (default is 1)
:return: a list of tuples of (address, string) where is address is the location of the string in memory
"""
strings = []
memory = self._function_manager._project.loader.memory
# get known instruction addresses and call targets
# these addresses cannot be string references, but show up frequently in the runtime values
known_executable_addresses = set()
for b in self.basic_blocks:
if b in memory:
sirsb = self._function_manager._project.factory.block(b)
known_executable_addresses.update(sirsb.instruction_addrs)
for node in self._function_manager._cfg.nodes():
known_executable_addresses.add(node.addr)
# loop over all local runtime values and check if the value points to a printable string
for addr in self.partial_local_runtime_values:
if not isinstance(addr, claripy.fp.FPV) and addr in memory:
# check that the address isn't an pointing to known executable code
# and that it isn't an indirect pointer to known executable code
try:
possible_pointer = memory.read_addr_at(addr)
if addr not in known_executable_addresses and possible_pointer not in known_executable_addresses:
# build string
stn = ""
offset = 0
current_char = memory[addr + offset]
while current_char in string.printable:
stn += current_char
offset += 1
current_char = memory[addr + offset]
# check that the string was a null terminated string with minimum length
if current_char == "\x00" and len(stn) >= minimum_length:
strings.append((addr, stn))
except KeyError:
pass
return strings
@property
def partial_local_runtime_values(self):
"""
Tries to find all runtime values of this function which do not come from inputs.
These values are generated by starting from a blank state and reanalyzing the basic blocks once each.
Function calls are skipped, and back edges are never taken so these values are often unreliable,
This function is good at finding simple constant addresses which the function will use or calculate.
:return: a set of constants
"""
constants = set()
if not self._function_manager._project.loader.main_bin.contains_addr(self.startpoint):
return constants
# reanalyze function with a new initial state (use persistent registers)
initial_state = self._function_manager._cfg.get_any_irsb(self.startpoint).initial_state
fresh_state = self._function_manager._project.factory.blank_state(mode="fastpath")
for reg in initial_state.arch.persistent_regs + ['ip']:
fresh_state.registers.store(reg, initial_state.registers.load(reg))
# process the nodes in a breadth-first order keeping track of which nodes have already been analyzed
analyzed = set()
q = [fresh_state]
analyzed.add(fresh_state.se.any_int(fresh_state.ip))
while len(q) > 0:
state = q.pop()
# don't trace into simprocedures
if self._function_manager._project.is_hooked(state.se.any_int(state.ip)):
continue
# don't trace outside of the binary
if not self._function_manager._project.loader.main_bin.contains_addr(state.se.any_int(state.ip)):
continue
# get runtime values from logs of successors
p = self._function_manager._project.factory.path(state)
p.step()
for succ in p.next_run.flat_successors + p.next_run.unsat_successors:
for a in succ.log.actions:
for ao in a.all_objects:
if not isinstance(ao.ast, claripy.Base):
constants.add(ao.ast)
elif not ao.ast.symbolic:
constants.add(succ.se.any_int(ao.ast))
# add successors to the queue to analyze
if not succ.se.symbolic(succ.ip):
succ_ip = succ.se.any_int(succ.ip)
if succ_ip in self.basic_blocks and succ_ip not in analyzed:
analyzed.add(succ_ip)
q.insert(0, succ)
# force jumps to missing successors
# (this is a slightly hacky way to force it to explore all the nodes in the function)
missing = set(self.transition_graph.successors(state.se.any_int(state.ip))) - analyzed
for succ_addr in missing:
l.info("Forcing jump to missing successor: 0x%x", succ_addr)
if succ_addr not in analyzed:
all_successors = p.next_run.unconstrained_successors + p.next_run.flat_successors + \
p.next_run.unsat_successors
if len(all_successors) > 0:
# set the ip of a copied successor to the successor address
succ = all_successors[0].copy()
succ.ip = succ_addr
analyzed.add(succ_addr)
q.insert(0, succ)
else:
l.warning("Could not reach successor: 0x%x", succ_addr)
return constants
@property
def runtime_values(self):
'''
All of the concrete values used by this function at runtime (i.e., including passed-in arguments and global values).
'''
constants = set()
for b in self.basic_blocks:
for sirsb in self._function_manager._cfg.get_all_irsbs(b):
for s in sirsb.successors + sirsb.unsat_successors:
for a in s.log.actions:
for ao in a.all_objects:
if not isinstance(ao.ast, claripy.Base):
constants.add(ao.ast)
elif not ao.ast.symbolic:
constants.add(s.se.any_int(ao.ast))
return constants
@property
def num_arguments(self):
return len(self._argument_registers) + len(self._argument_stack_variables)
def __contains__(self, val):
if isinstance(val, (int, long)):
return val in self._transition_graph
else:
return False
def __str__(self):
if self.name is None:
s = 'Function [0x%x]\n' % (self._addr)
else:
s = 'Function %s [0x%x]\n' % (self.name, self._addr)
s += ' Syscall: %s\n' % self.is_syscall
s += ' SP difference: %d\n' % self.sp_delta
s += ' Has return: %s\n' % self.has_return
s += ' Returning: %s\n' % ('Unknown' if self.returning is None else self.returning)
s += ' Arguments: reg: %s, stack: %s\n' % \
(self._argument_registers,
self._argument_stack_variables)
s += ' Blocks: [%s]\n' % ", ".join([hex(i) for i in self.blocks])
s += " Calling convention: %s" % self.cc
return s
def __repr__(self):
if self.name is None:
return '<Function 0x%x>' % (self._addr)
else:
return '<Function %s (0x%x)>' % (self.name, self._addr)
@property
def startpoint(self):
return self._addr
@property
def endpoints(self):
return list(self._ret_sites)
def clear_transition_graph(self):
self.blocks = { self._addr }
self._transition_graph = networkx.DiGraph()
self._transition_graph.add_node(self._addr)
self._local_transition_graph = None
def transit_to(self, from_addr, to_addr):
'''
Registers an edge between basic blocks in this function's transition graph
@param from_addr The address of the basic block that control
flow leaves during this transition
@param to_addr The address of the basic block that control
flow enters during this transition
'''
self.blocks.add(from_addr)
self.blocks.add(to_addr)
self._transition_graph.add_edge(from_addr, to_addr, type='transition')
def call_to(self, from_addr, to_addr, return_target, syscall=False):
"""
Registers an edge between the caller basic block and callee basic block
:param from_addr: The address of the basic block that control flow leaves during the transition
:param to_addr: The address of the basic block that control flow enters during the transition, which is also
the address of the target function to call
:param return_target: The address of instruction to execute after returning from the function. `None` indicates
the call does not return.
:param syscall: Whether this call is a syscall or nor.
"""
self.blocks.add(from_addr)
if syscall:
self._transition_graph.add_edge(from_addr, to_addr, type='syscall')
else:
self._transition_graph.add_edge(from_addr, to_addr, type='call')
if return_target is not None:
self._transition_graph.add_edge(from_addr, return_target, type='fake_return')
def return_from_call(self, src_function_addr, to_addr):
self.blocks.add(to_addr)
self._transition_graph.add_edge(src_function_addr, to_addr, type='return_from_call')
def add_block(self, addr):
'''
Registers a basic block as part of this function
@param addr The address of the basic block to add
'''
self.blocks.add(addr)
self._transition_graph.add_node(addr)
def add_return_site(self, return_site_addr):
'''
Registers a basic block as a site for control flow to return from this function
@param return_site_addr The address of the basic block ending with a return
'''
self._ret_sites.add(return_site_addr)
def add_call_site(self, call_site_addr, call_target_addr, retn_addr):
'''
Registers a basic block as calling a function and returning somewhere
@param call_site_addr The basic block that ends in a call
@param call_target_addr The target of said call
@param retn_addr The address that said call will return to
'''
self._call_sites[call_site_addr] = (call_target_addr, retn_addr)
self._retn_addr_to_call_site[retn_addr] = call_site_addr
def get_call_sites(self):
'''
Gets a list of all the basic blocks that end in calls
@returns What the hell do you think?
'''
return self._call_sites.keys()
def get_call_target(self, callsite_addr):
'''
Get the target of a call
@param callsite_addr The address of the basic block that ends in
a call
@returns The target of said call
'''
if callsite_addr in self._call_sites:
return self._call_sites[callsite_addr][0]
return None
def get_call_return(self, callsite_addr):
'''
Get the hypothetical return address of a call
@param callsite_addr The address of the basic block that ends in
a call
@returns The likely return target of said call
'''
if callsite_addr in self._call_sites:
return self._call_sites[callsite_addr][1]
return None
@property
def basic_blocks(self):
return self.blocks
@property
def transition_graph(self):
return self._transition_graph
@property
def local_transition_graph(self):
"""
Return a local transition graph that only contain nodes in current function.
"""
if self._local_transition_graph is not None:
return self._local_transition_graph
g = networkx.DiGraph()
for src, dst, data in self._transition_graph.edges_iter(data=True):
if src in self.blocks and dst in self.blocks:
g.add_edge(src, dst, attr_dict=data)
elif src in self.blocks:
g.add_node(src)
elif dst in self.blocks:
g.add_node(dst)
for node in self._transition_graph.nodes_iter():
if node in self.blocks:
g.add_node(node)
self._local_transition_graph = g
return g
def dbg_print(self):
'''
Returns a representation of the list of basic blocks in this function
'''
return "[%s]" % (', '.join(('0x%08x' % n) for n in self._transition_graph.nodes()))
def dbg_draw(self, filename):
'''
Draw the graph and save it to a PNG file
'''
import matplotlib.pyplot as pyplot # pylint: disable=import-error
tmp_graph = networkx.DiGraph()
for edge in self._transition_graph.edges():
node_a = "0x%08x" % edge[0]
node_b = "0x%08x" % edge[1]
if node_b in self._ret_sites:
node_b += "[Ret]"
if node_a in self._call_sites:
node_a += "[Call]"
tmp_graph.add_edge(node_a, node_b)
pos = networkx.graphviz_layout(tmp_graph, prog='fdp')
networkx.draw(tmp_graph, pos, node_size=1200)
pyplot.savefig(filename)
def add_argument_register(self, reg_offset):
'''
Registers a register offset as being used as an argument to the function
@param reg_offset The offset of the register to register
'''
if reg_offset in self._function_manager.arg_registers and \
reg_offset not in self._argument_registers:
self._argument_registers.append(reg_offset)
def add_argument_stack_variable(self, stack_var_offset):
if stack_var_offset not in self._argument_stack_variables:
self._argument_stack_variables.append(stack_var_offset)
@property
def arguments(self):
if self.cc is None:
return self._argument_registers, self._argument_stack_variables
else:
return self.cc.arguments
@property
def bp_on_stack(self):
return self._bp_on_stack
@bp_on_stack.setter
def bp_on_stack(self, value):
self._bp_on_stack = value
@property
def retaddr_on_stack(self):
return self._retaddr_on_stack
@retaddr_on_stack.setter
def retaddr_on_stack(self, value):
self._retaddr_on_stack = value
@property
def sp_delta(self):
return self._sp_delta
@sp_delta.setter
def sp_delta(self, value):
self._sp_delta = value
@property
def has_return(self):
return len(self._ret_sites) > 0
class FunctionManager(object):
'''
This is a function boundaries management tool. It takes in intermediate
results during CFG generation, and manages a function map of the binary.
'''
def __init__(self, project, cfg):
self._project = project
self._cfg = cfg
# A map that uses function starting address as the key, and maps
# to a function class
self._function_map = {}
self.interfunction_graph = networkx.DiGraph()
# Registers used for passing arguments around
self.arg_registers = project.arch.argument_registers
def _create_function_if_not_exist(self, function_addr):
if function_addr not in self._function_map:
self._function_map[function_addr] = Function(self, function_addr)
self._function_map[function_addr].add_block(function_addr)
def call_to(self, function_addr, from_addr, to_addr, retn_addr, syscall=False):
self._create_function_if_not_exist(function_addr)
self._create_function_if_not_exist(to_addr)
self._function_map[function_addr].call_to(from_addr, to_addr, retn_addr, syscall=syscall)
self._function_map[function_addr].add_call_site(from_addr, to_addr, retn_addr)
self.interfunction_graph.add_edge(function_addr, to_addr)
def return_from(self, function_addr, from_addr, to_addr=None): #pylint:disable=unused-argument
self._create_function_if_not_exist(function_addr)
self._function_map[function_addr].add_return_site(from_addr)
def transit_to(self, function_addr, from_addr, to_addr):
self._create_function_if_not_exist(function_addr)
self._function_map[function_addr].transit_to(from_addr, to_addr)
def return_from_call(self, function_addr, src_function_addr, to_addr):
self._create_function_if_not_exist(function_addr)
self._function_map[function_addr].return_from_call(src_function_addr, to_addr)
@property
def functions(self):
return self._function_map
def function(self, addr=None, name=None, create_if_not_exist=False):
if addr:
if addr in self._function_map:
return self._function_map[addr]
elif create_if_not_exist:
self._create_function_if_not_exist(addr)
return self._function_map[addr]
elif name:
funcs = [ i for i in self._function_map.values() if i.name == name ]
if funcs:
return funcs[0]
else:
return None
else:
return None
def dbg_print(self):
result = ''
for func_addr, func in self._function_map.items():
f_str = "Function 0x%08x\n%s\n" % (func_addr, func.dbg_print())
result += f_str
return result
def dbg_draw(self):
for func_addr, func in self._function_map.items():
filename = "dbg_function_0x%08x.png" % func_addr
func.dbg_draw(filename)
from .errors import AngrTranslationError
|
bsd-2-clause
|
mlyundin/scikit-learn
|
examples/linear_model/plot_ard.py
|
248
|
2622
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/Simulation/createPool.py
|
1
|
5695
|
'''
Copyleft Dec 4, 2015 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd; pd.options.display.max_rows=30;pd.options.display.expand_frame_repr=False
import os; home=os.path.expanduser('~') +'/'
import sys;sys.path.insert(1,'/home/arya/workspace/bio/')
import subprocess
from Utils import Simulation
from multiprocessing import Pool
import Utils.Util as utl
import CLEAR.Libs.Markov as mkv
import optparse, socket, datetime
parser = optparse.OptionParser()
parser.add_option('-n', '--name', action="store", dest="name", help="method can be [TimeSeries,Chrom]")
parser.add_option('-o', '--shutstd', action="store", dest="shutstd", help="takes 0,1", default=0, type='int')
options, args = parser.parse_args()
options.runname = 'SimulationPool.{}.'.format(options.name) + str(datetime.datetime.now()).split('.')[0]
print 'Running {}.'.format(options.runname)
if options.shutstd:sys.stderr = sys.stdout = open(utl.stdoutpath + options.runname + '.out', 'w')
print 'Running on', socket.gethostname(), str(datetime.datetime.now()), options
sys.stdout.flush()
def computeEmissions(mypath=utl.simoutpath + 'TimeSeries/simpop/'):
print "computing emissions..."
E = []
depths = [30, 100, 300]
for depth in depths:
cd = []
for f in [f for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath, f))]:
sim = pd.read_pickle(mypath + f)
print f
cd += [pd.concat([pd.Series(sim.C.loc[depth].reshape(-1)), pd.Series(sim.D.loc[depth].reshape(-1))],
axis=1).drop_duplicates()]
cd = pd.concat(cd).drop_duplicates()
cd = cd.apply(lambda x: (x[0], x[1]), axis=1)
cd.index = index = pd.MultiIndex.from_tuples(cd.values, names=['c', 'd'])
nu = pd.Series(np.arange(0, 1.0000001, 1. / (2. * sim.N)), index=np.arange(0, 1.0000001, 1. / (2. * sim.N)))
a = cd.apply(lambda x: mkv.getStateLikelihoods(x, nu)).sort_index()
E += [a]
pd.Series(E, index=depths).to_pickle(utl.outpath + 'markov/Emissions.df')
def generateSimulation(param):
print param
if 'generationStep'not in param.keys(): param['generationStep']=10
if 'maxGeneration' not in param.keys(): param['maxGeneration']=50
if 'save' not in param.keys(): param['save']=True
for s in param['S']:
try:
filename = '{}{}/msms/L{:E}.{:E}.msms'.format(utl.simoutpath, param['ModelName'], param['L'], param['i'])
Simulation.Simulation(s=s, experimentID=param['i'], msmsFile=filename, L=param['L'], numReplicates=3, initialCarrierFreq=param['nu'], save=param['save'],
maxGeneration=param['maxGeneration'], ExperimentName=param['ModelName'], generationStep=param['generationStep'])
print 'L,nu,s,i:', param['L'],param['nu'],s, param['i']
sys.stdout.flush()
except:
import traceback
print 'Error************: L,nu,s,i:', param['L'],param['nu'],s, param['i']
traceback.print_exc()
def createOneMSMS(param,forceToHaveSoftFreq ):
theta=2*param['Ne']*param['mu']*param['L']; rho=2*param['Ne']*param['r']*param['L']
path = '{}{}/msms/'.format(utl.simoutpath, param['ModelName'])
utl.mkdir(path)
if isinstance(param['i'],(int,float,long)):
filename = '{}L{:E}.{:E}.msms'.format(path, param['L'], param['i'])
else:
filename = '{}L{:E}.{}.msms'.format(path, param['L'], param['i'])
cmd = "java -jar -Xmx2g ~/bin/msms/lib/msms.jar -ms {} 1 -t {:.0f} -r {:.0f} {:.0f} -oFP 0.000000000000E00 > {}".\
format(param['n'], theta, rho, param['L'], filename)
subprocess.call(cmd,shell=True)
if forceToHaveSoftFreq and not (Simulation.MSMS.load(filename)[0].mean(0) == 0.1).sum(): # make sure inital freq 0.1 exist
createOneMSMS(param)
def getModelParam(name):
if name=='TimeSeries':
param={'L':int(5e4), 'ModelName':'TimeSeries','numProc': 1,'S' : [0.1, 0.075, 0.05, 0.025], 'numExp' : 1000,\
'n':200, 'mu':2*1e-9, 'Ne':1e6, 'r':4*1e-9}
elif name=='Chrom':
param={'L':int(1e7), 'ModelName':'Chrom','numProc': 4,'S' : [0.1, 0.075, 0.05, 0.025], 'numExp' : 100,\
'n':200, 'mu':2*1e-9, 'Ne':1e6, 'r':4*1e-9}
elif name=='Null':
param={'L':int(5e6), 'ModelName':'Null','numProc': 4,'S' : [0], 'numExp' : 1,'nu':0.005,
'n':200, 'mu':2*1e-9, 'Ne':1e6, 'r':4*1e-9, 'maxGeneration':59, 'generationStep':1, 'save':False}
else:
print 'Invalid Model Name'
exit()
return param
def createSimulations(name):
param=getModelParam(name)
print pd.Series(param)
params=[param.copy()for _ in range(param['numExp'])]
for i,p in enumerate(params): p.update({'i':i})
#Pool(param['numProc']).map(createOneMSMS, params)
for p in params: p.update({'nu':0.1})
Pool(param['numProc']).map(generateSimulation, params)
for p in params: p.update({'nu':0.005})
Pool(param['numProc']).map(generateSimulation, params)
for p in params: p.update({'S':[0]})
Pool(param['numProc']).map(generateSimulation, params)
# computeEmissions()
# computeLinkage(0)
# Pool(4).map(computeLinkage,range(100))
if __name__ == '__main__':
if options.name is None: options.name='Chrom'
#scan(300)
# SimulationsToDF()
# createSimulations(options.name)
# param=getModelParam('Null')
# print pd.Series(param)
# sampleReads(getGens(,d),d)
# CD = pd.read_pickle(utl.outpath + 'real/CDEidx.df')
#
# reload(Simulation)
|
mit
|
google/eclipse2017
|
movie/daemon/app/pipeline_stats.py
|
1
|
4437
|
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
import itertools
import matplotlib # Force matplotlib to not use any Xwindows backend
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from google.cloud import datastore, storage
from common import datastore_schema as ds
from common import config, constants
from common.cluster_points import cluster_points
from multiprocessing import Pool
from sklearn.preprocessing import StandardScaler
from mpl_toolkits.basemap import Basemap
class Pipeline_Stats():
def __init__(self, datastore_client, storage_client):
self.datastore = datastore_client
self.storage = storage_client
def get_clusters(self, fnames):
"""
Returns list of clusters and outliers
"""
coordinates, fnames = self._get_gps_coordinates(fnames)
if coordinates:
clusters = self._get_clusters(fnames, coordinates)
else:
return None
return clusters
def _get_clusters(self, fnames, coordinates):
"""
Returns list of lists of files clustered in a geographic area
"""
# Get number of clusters and list of labels
db = cluster_points(coordinates)
n_clusters, cluster_labels = count_clusters(db, eps=constants.CLUSTER_RADIUS_DEGREES, min_samples=constants.MIN_PHOTOS_IN_CLUSTER, n_jobs=min(len(coordinates), constants.MOVIE_DAEMON_MAX_PROCESSES))
# Convert labels into list of files grouped by cluster, maintain order within cluster
clusters = [[] for i in range(n_clusters)]
for i in range(len(coordinates)):
cluster_label = cluster_labels[i]
if cluster_label != -1:
clusters[cluster_label].append(fnames[i])
self._create_map(coordinates, cluster_labels, n_clusters)
return clusters
def _get_gps_coordinates(self, fnames):
"""
Function to pull lat/long fields from datastore Photo entities
"""
coordinates = []
successful_fnames = []
# Get Photo entities from datastore
keys = list(self.datastore.key(ds.DATASTORE_PHOTO, fname) for fname in fnames)
try:
entities = self.datastore.get_multi(keys)
except Exception, e:
msg = 'Failed to get {0} from Cloud Datastore.'
logging.exception(msg.format(keys))
return None
for entity in entities:
try:
coordinates.append((entity['lat'], entity['lon']))
except Exception, e:
msg = 'Entity {0} missing {1}'
logging.error(msg.format(entity, e))
continue
successful_fnames.append(entity.key.name)
return coordinates, successful_fnames
def _create_map(self, coordinates, labels, n_clusters):
"""
Graphs GPS coordinates on map (used ti verify clustering algo)
"""
map = Basemap(llcrnrlat=22, llcrnrlon=-119, urcrnrlat=49, urcrnrlon=-64,
projection='lcc',lat_1=33,lat_2=45,lon_0=-95)
base_color = 'white'
border_color = 'lightgray'
boundary_color = 'gray'
map.fillcontinents(color=base_color, lake_color=border_color)
map.drawstates(color=border_color)
map.drawcoastlines(color=border_color)
map.drawcountries(color=border_color)
map.drawmapboundary(color=boundary_color)
lat, lon = zip(*coordinates)
map.scatter(lon, lat, latlon=True, s=1, alpha=0.5, zorder=2)
plt.title('Estimated number of clusters: %d' % n_clusters)
#Upload an image to Cloud Storage
plt.savefig(constants.C_MAP_FPATH)
if os.path.exists(constants.C_MAP_FPATH):
print "HERERE"
else:
print "NOT HEEEEEEEEEEEEEEEEEEEEEEEEEEEEEERRRRRRRRRRRRRRRRRRREEEEEEEEEEEE"
|
apache-2.0
|
robbymeals/scikit-learn
|
sklearn/tree/export.py
|
75
|
15670
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
|
bsd-3-clause
|
mfatihaktas/q_sim
|
mixed_models.py
|
1
|
10771
|
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import scipy
import numpy as np
from scipy.optimize import fsolve
from patch import *
from rvs import *
from commonly_used import *
def lyapunov_stability_test():
n, k = 100, 10
V = lambda s_l: min(s_l)**(math.log(n/(n-1), 2.0) )
for c in range(1, 10000):
s_l = [c]*(k-1)
d = -V(s_l)
for i in range(k-1):
s_l_ = list(s_l)
s_l_[i] += 1
d += V(s_l_)/n
s_l_ = [s-1 for s in s_l]
d += V(s_l_)*(n-k+1)/n
print("c= {}, d= {}".format(c, d) )
def ET_mixednet_ub(n, k, l, qlambda_l=[] ):
if len(qlambda_l):
ET_l = []
for i,l in enumerate(qlambda_l):
qlambda_l_ = list(qlambda_l)
qlambda_l_.remove(l)
# print("l= {}, qlambda_l_= {}".format(l, qlambda_l_) )
mu = sum(qlambda_l_[0:k] )
ET_l.append(1/(mu-l) )
log(WARNING, "n= {}, k= {}, qlambda_l= {}\n\t ET_l= {}".format(n, k, qlambda_l_, ET_l) )
return ET_l
else:
EV = 1/l * (H(n-1) - H(n-k) )
EV2 = 1/l**2 * (H_2(n-1) - H_2(n-k) ) + EV**2
ET = EV + l*EV2/2/(1-l*EV)
log(WARNING, "n= {}, k= {}, l= {}\n\t ET= {}".format(n, k, l, ET) )
if ET < 0: return None
return ET
def ET_mixednet_lb(n, k, l):
EV = 1/(n-k+1)/l
EV2 = 2/((n-k+1)*l)**2
ET = EV + l*EV2/2/(1-l*EV)
log(WARNING, "n= {}, k= {}, l= {}\n\t ET= {}".format(n, k, l, ET) )
if ET < 0: return None
return ET
def ET_mixednet_approx(n, k, l):
# Using MC
# pbusy = Pr_busy_mixednet_approx(n, k)
# p = 1/(1 + pbusy*(n-k) )
# mu = p*(n-k+1)*l
# ro = l/mu
# ro_1 = (1-p)*ro
# E_N = ro_1/(1-ro)/(1-ro+ro_1)
# return E_N/l
# pbusy = (1/(n-k+1) )**(1/k)
pbusy = (1/(n-k+1) )**(1/(k-1))
p = pbusy**(k-1)
print("pbusy= {}, p= {}".format(pbusy, p) )
# p = pbusy**(k-2)
mu = p*(n-k+1)*l
# return (k-1)/n * 1/(mu-l)
return 1/(mu-l)
def Pr_busy_mixednet_approx(n=100, k=None):
def eq(pbusy, data):
k = data
# print("k= {}".format(k) )
def p():
sum_ = 0.0
for i in range(k):
sum_ += binom(n, i) * pbusy**i * (1-pbusy)**(n-i)
return binom(n, k-1) * pbusy**(k-1) * (1-pbusy)**(n-k+1) / sum_
# sum_ = 0.0
# for i in range(k-1, n+1):
# sum_ += binom(n, i)* pbusy**i * (1-pbusy)**(n-i)
# return sum_
# return binom(n, k-1)* pbusy**(k-1) * (1-pbusy)**(n-k+1)
# return p() - 1/(n-k+1)/pbusy
return p() - 1/(1 + (n-k)*pbusy)
if k is not None:
root = scipy.optimize.brentq(eq, 0.0001, 0.99, args = (k) )
print("n= {}, k= {}, root= {}".format(n, k, root) )
return root
else:
mew, ms = 3, 5
for k in range(1, n+1, 20):
if k == 1: continue
# roots = fsolve(eq, 0.0, args=(k,), xtol=1e-06)
roots = scipy.optimize.brentq(eq, 0.0001, 0.95, args = (k) )
print("n= {}, k= {}, roots= {}".format(n, k, roots) )
# pbusy_l, eq_l = [], []
# for pbusy in np.linspace(0.01, 1, 1000):
# pbusy_l.append(pbusy)
# eq_l.append(eq(pbusy, k) )
# plot.plot(pbusy_l, eq_l, label=r'$k={}$'.format(k), color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
# plot.legend()
# plot.xlabel(r'pbusy', fontsize=13)
# plot.ylabel(r'Eq', fontsize=13)
# fig = plot.gcf()
# # def_size = fig.get_size_inches()
# # fig.set_size_inches(def_size[0]/1.4, def_size[1]/1.4)
# fig.tight_layout()
# plot.savefig("prob_busy_complete_eq_n_{}.pdf".format(n) )
# log(WARNING, "done; n= {}".format(n) )
# def Pr_busy(n, k):
# return k/n * (1/(n-k+1) )**(1/k)
# ***************************** M/G/1 Approx ***************************** #
def serv_tail_approx(pe, n, k, t, dist_m):
cdf = 0
for e in range(k):
cdf += binom(k-1,e)*pe**e*(1-pe)**(k-1-e) * Pr_Xnk_leq_x(n-k+1+e, e, t, dist_m)
return 1 - cdf
def approx_serv_tail_approx(pe, n, k, t, dist_m):
return 1 - I(F(t, dist_m), (k-1)*pe, n-k+2)
def plot_serv_tail_approx(n, k, dist_m):
pe = pempty(n, k, l)
x_l, y_l = [], []
for t in np.linspace(0, 10, 100):
x_l.append(t)
y_l.append(serv_tail_approx(pe, n, k, t, dist_m) )
plot.plot(x_l, y_l, label=r'$\lambda= {}$'.format(l), color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.legend()
plot.title(r'$n= {}$, $k= {}$, $\lambda= {}$'.format(n, k, l) )
plot.xlabel(r'$t$', fontsize=13)
plot.ylabel(r'$Pr\{S > t\}$', fontsize=13)
plot.savefig("plot_serv_tail_approx_n_{}_k_{}.png".format(n, k) )
log(WARNING, "done; n= {}, k= {}".format(n, k) )
def serv_moment_approx(pe, n, k, m, dist_m):
# return mpmath.quad(lambda t: m*t**(m-1)*serv_tail_approx(pe, n, k, t, dist_m), [0, mpmath.inf] ) # 100000
return scipy.integrate.quad(lambda t: m*t**(m-1)*serv_tail_approx(pe, n, k, t, dist_m), 0, np.inf)[0]
def ET_mg1approx(n, k, dist_m):
# pe = pempty_approx(n, k)
pe = pempty(n, k, dist_m)
EV = serv_moment_approx(pe, n, k, 1, dist_m)
EV2 = serv_moment_approx(pe, n, k, 2, dist_m)
print("k= {}, pe= {}".format(k, pe) )
dist = dist_m['dist']
if dist == 'Exp':
ar = dist_m['mu']
ET = EV + ar*EV2/2/(1-ar*EV)
elif dist == 'Pareto':
rv = Pareto(dist_m['loc'], dist_m['a'] )
EX, VX = rv.mean(), rv.var()
ar = 1/EX
coeffvar_ar2 = VX/EX**2
coeffvar_serv2 = (EV2 - EV**2)/EX**2
ro = ar*EV
ET = (ro/(1-ro) ) * (coeffvar_ar2 + coeffvar_serv2)/2 * EV
print("ET= {}".format(ET) )
if ET < 0: return None
return ET
def ET_mg1approx_(n, k, dist_m):
pe = pempty_approx(n, k, dist_m)
print("k= {}, pe= {}".format(k, pe) )
ar = dist_m['mu']
EB = serv_moment_approx(pe, n, k, 1, dist_m)
EB2 = serv_moment_approx(pe, n, k, 2, dist_m)
ET = EB + ar*EB2/2/(1 - ar*EB)
return ET
def pempty(n, k, dist_m):
pe = 1
mu = dist_m['mu']
x_pdf = lambda x: mu*math.exp(-mu*x)
for k_ in range(1, k+1):
# pe = mpmath.quad(lambda t: (1 - serv_tail_approx(pe, n, k_, t, dist_m) ) * f(t, dist_m), [0, mpmath.inf] )
pe = scipy.integrate.quad(lambda t: (1 - serv_tail_approx(pe, n, k_, t, dist_m) ) * x_pdf(t), 0, np.inf)[0]
# print("k_= {}, pe= {}".format(k_, pe) )
# for _ in range(10):
# print("... pe= {}".format(pe) )
# pe = scipy.integrate.quad(lambda t: (1 - serv_tail_approx(pe, n, k, t, dist_m) ) * x_pdf(t), 0, np.inf)[0]
return pe
# return 1 - (k-1)/n
def pempty_approx(n, k, dist_m):
# pe = 1
# a = (k-1)/(n-k+2)
# if a == 0: return None
# return (-1 + math.sqrt(1 + 4*a) )/2/a
ar = dist_m['mu']
def ro(pe):
return ar*serv_moment_approx(pe, n, k, 1, dist_m)
eq = lambda pe: pe - (1 - ro(pe) )
pe = scipy.optimize.brentq(eq, 0.0001, 1)
return pe
def plot_qoi():
dist_m = {'dist': 'Exp', 'mu': 1}
# dist_m = {'dist': 'Pareto', 'loc': 1, 'a': 50}
n = 10
print("n= {}, dist_m= {}".format(n, dist_m) )
x_l, y_l, y_approx_l = [], [], []
def plot_ar_forfixdelay(d=1):
ET_base = ET_mg1approx(n, 2, dist_m)
for k in range(3, n):
def eq(ar, data):
k = data
return ET_mg1approx(n, k, {'dist': 'Exp', 'mu': ar} ) - ET_base
ar = scipy.optimize.brentq(eq, 0.0001, 100, args = (k) )
print("ar= {}".format(ar) )
def plot_ED_vs_k():
for k in range(2, n):
x_l.append(k)
ED = ET_mg1approx(n, k, dist_m)
# E = ED / () if ED is not None else ED
y_l.append(E)
pe = pempty(n, k, dist_m)
EV = serv_moment_approx(pe, n, k, 1, dist_m)
EV2 = serv_moment_approx(pe, n, k, 2, dist_m)
# y_l.append(EV**2)
pe = 1
j, i = n - (k-1)*(1-pe), (k-1)*pe
# print("k= {}, i= {}, j= {}".format(k, i, j) )
# y_approx_l.append(math.log(1 + j/(j-i))**2 )
# y_approx_l.append(math.log(1 + (n+1)/(n-k+1))**2 )
y_approx_l.append(math.log(math.sqrt((k-1)/(n-k+2) ) )**2)
# y_approx_l.append(H_cont(j) - H_cont(j-i) )
plot.plot(x_l, y_l, label='actual', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.plot(x_l, y_approx_l, label='approx', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.xlabel(r'$k$', fontsize=13)
def plot_avgdelay():
for k in range(2, n):
# for k in np.linspace(2, n-1, 10):
# k = int(k)
x_l.append(k)
y_l.append(ET_mg1approx(n, k, dist_m) )
plot.plot(x_l, y_l, color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.xlabel(r'$k$', fontsize=13)
plot.ylabel(r'$E[D]$', fontsize=14)
def plot_pe():
for k in range(2, n):
x_l.append(k)
y_l.append(pempty(n, k, dist_m) )
y_approx_l.append(pempty_approx(n, k) )
plot.plot(x_l, y_l, label='Iterative', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.plot(x_l, y_approx_l, label='Approx', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.xlabel(r'$k$', fontsize=13)
plot.ylabel(r'$p_0$', fontsize=14)
def plot_avgnumempty():
for k in range(2, n):
x_l.append(k)
pe = pempty(n, k, dist_m)
y_l.append((k - 1)*pe)
pe = pempty_approx(n, k)
y_approx_l.append((k - 1)*pe)
plot.plot(x_l, y_l, label='Iterative', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.plot(x_l, y_approx_l, label='Approx', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.xlabel(r'$k$', fontsize=13)
plot.ylabel(r'$E[N_e]$', fontsize=14)
# plot_ar_forfixdelay()
plot_ED_vs_k()
# plot_avgdelay()
# plot_avgnumempty()
plot.legend()
plot.title(r'$n= {}$, $X \sim {}$'.format(n, dist_m) )
plot.savefig("plot_qoi_n_{}.png".format(n) )
log(WARNING, "done.")
def EL_n_2(n):
return 1/2/(n-2)
def EL2_n_2(n):
return n/2/(n-2)**2
def ET_n_2(n, ar):
p0 = (n-2)/2/(n-1)
ro = (1-p0)/n
EL = 1/2/(n-2)
return 1/(n-1)/ar * (p0 + ro + EL)
def ET2_n_2(n, ar):
p0 = (n-2)/2/(n-1)
ro = (1-p0)/n
EL = 1/2/(n-2)
EL2 = n/2/(n-2)**2
return 1/((n-1)*ar)**2 * (2*p0 + 2*ro + EL2 + 3*EL)
def tail_exponent(n, k, dist_m):
ar = dist_m['mu']
pe = pempty(n, k, dist_m)
# pe = pempty_approx(n, k)
k_ = (k-1)*pe
n_ = n - (k-1)*(1-pe)
def eq(s):
Vs = B(k_, n_-k_+1+s/ar)/B(k_, n_-k_+1)
return ar + (s - ar)/Vs
mu = scipy.optimize.brentq(eq, -20, -0.001)
return mu
if __name__ == "__main__":
# n = 50
# dist_m = {'dist': 'Exp', 'mu': 1}
# print("n= {}, X ~ {}".format(n, dist_m) )
# for k in range(1, n, 5):
# pe = pempty(n, k, dist_m)
# print("k= {}, pe= {}".format(k, pe) )
# plot_serv_tail_approx(n=10, k=9, {'dist': 'Exp', 'mu': 1})
# plot_qoi()
lyapunov_stability_test()
|
mit
|
jblackburne/scikit-learn
|
sklearn/datasets/twenty_newsgroups.py
|
13
|
13737
|
"""Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from .base import _pkl_filepath
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info("Downloading 20news dataset. "
"This may take a few minutes.")
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
|
bsd-3-clause
|
JackKelly/neuralnilm_prototype
|
scripts/e238.py
|
2
|
4948
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
"""
def exp_a(name):
global source
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=5,
#input_padding=4,
include_diff=False,
clip_appliance_power=False,
lag=32
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
# 'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
# {
# 'type': FeaturePoolLayer,
# 'ds': 5, # number of feature maps to be pooled together
# 'axis': 1 # pool over the time axis
# },
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
# 'W': Uniform(1)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
mit
|
KarlTDebiec/myplotspec
|
Dataset.py
|
1
|
33744
|
# -*- coding: utf-8 -*-
# myplotspec.Dataset.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Represents data.
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("myplotspec")
import myplotspec
import h5py
import numpy as np
import pandas as pd
from IPython import embed
from . import sformat, wiprint
################################### CLASSES ###################################
class Dataset(object):
"""
Represents data.
.. note:
- pandas' MultiIndex only supports dtype of 'object'. It does not appear
to be possible to force pandas to use a 32 bit float or integer for a
MultiIndex. _read_hdf5 and _write_hdf5 must behave accordingly
"""
default_h5_address = "/"
default_h5_kw = dict(chunks=True, compression="gzip")
@classmethod
def get_cache_key(cls, infile=None, **kwargs):
"""
Generates tuple of arguments to be used as key for dataset cache.
Arguments:
infile (str): Path to infile
kwargs (dict): Additional keyword arguments
Returns:
tuple: Cache key
.. todo:
- Verify that keyword arguments passed to pandas may be safely
converted to hashable tuple, and if they cannot throw a
warning and load dataset without caching
"""
from os.path import expandvars
if infile is None:
return None
read_csv_kw = []
for key, value in kwargs.get("read_csv_kw", {}).items():
if isinstance(value, list):
value = tuple(value)
read_csv_kw.append((key, value))
return (cls, expandvars(infile), tuple(read_csv_kw))
@classmethod
def main(cls):
"""
Provides command-line interface
"""
parser = cls.construct_argparser()
kwargs = vars(parser.parse_args())
kwargs.pop("cls")(**kwargs)
@staticmethod
def calc_pdist(df, columns=None, mode="kde", bandwidth=None, grid=None,
**kwargs):
"""
Calcualtes probability distribution over DataFrame.
Arguments:
df (DataFrame): DataFrame over which to calculate probability
distribution of each column over rows
columns (list): Columns for which to calculate probability
distribution
mode (ndarray, str, optional): Method of calculating
probability distribution; eventually will support 'hist' for
histogram and 'kde' for kernel density estimate, though
presently only 'kde' is implemented
bandwidth (float, dict, str, optional): Bandwidth to use for
kernel density estimates; may be a single float that will be
applied to all columns or a dictionary whose keys are column
names and values are floats corresponding to the bandwidth
for each column; for any column for which *bandwidth* is not
specified, the standard deviation will be used
grid (list, ndarray, dict, optional): Grid on which to
calculate kernel density estimate; may be a single ndarray
that will be applied to all columns or a dictionary whose
keys are column names and values are ndarrays corresponding
to the grid for each column; for any column for which *grid*
is not specified, a grid of 1000 points between the minimum
value minus three times the standard deviation and the
maximum value plots three times the standard deviation will
be used
kde_kw (dict, optional): Keyword arguments passed to
:function:`sklearn.neighbors.KernelDensity`
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
Returns:
OrderedDict: Dictionary whose keys are columns in *df* and
values are DataFrames whose indexes are the *grid* for that
column and contain a single column 'probability' containing
the normalized probability at each grid point
.. todo:
- Implement flag to return single dataframe with single grid
"""
from collections import OrderedDict
import six
from sklearn.neighbors import KernelDensity
# Process arguments
verbose = kwargs.get("verbose", 1)
if verbose >= 1:
wiprint("""Calculating probability distribution over DataFrame""")
if columns is None:
columns = [a for a in df.columns.values if
str(df[a].dtype).startswith("float")]
elif isinstance(columns, six.string_types):
columns = [columns]
if mode == "kde":
# Prepare bandwidths
if bandwidth is None:
all_bandwidth = None
bandwidth = {}
elif isinstance(bandwidth, float):
all_bandwidth = bandwidth
bandwidth = {}
elif isinstance(bandwidth, dict):
all_bandwidth = None
else:
raise Exception()
for column in df.columns.values:
series = df[column]
if column in bandwidth:
bandwidth[column] = float(bandwidth[column])
elif all_bandwidth is not None:
bandwidth[column] = all_bandwidth
else:
bandwidth[column] = series.std()
# Prepare grids
if grid is None:
all_grid = None
grid = {}
elif isinstance(grid, list) or isinstance(grid, np.ndarray):
all_grid = np.array(grid)
grid = {}
elif isinstance(grid, dict):
all_grid = None
for column in df.columns.values:
series = df[column]
if column in grid:
grid[column] = np.array(grid[column])
elif all_grid is not None:
grid[column] = all_grid
else:
grid[column] = np.linspace(series.min() - 3 * series.std(),
series.max() + 3 * series.std(), 1000)
# Calculate probability distributions
kde_kw = kwargs.get("kde_kw", {})
pdist = OrderedDict()
for column in df.columns.values:
series = df[column]
if verbose >= 1:
wiprint("calculating probability distribution of "
"{0} using a kernel density estimate".format(
column))
kde = KernelDensity(bandwidth=bandwidth[column], **kde_kw)
kde.fit(series.dropna()[:, np.newaxis])
pdf = np.exp(kde.score_samples(grid[column][:, np.newaxis]))
pdf /= pdf.sum()
series_pdist = pd.DataFrame(pdf, index=grid[column],
columns=["probability"])
series_pdist.index.name = column
pdist[column] = series_pdist
else:
raise Exception(sformat("""only kernel density estimation is
currently supported"""))
return pdist
@staticmethod
def construct_argparser(parser_or_subparsers=None, **kwargs):
"""
Adds arguments to an existing argument parser, constructs a
subparser, or constructs a new parser
Arguments:
parser_or_subparsers (ArgumentParser, _SubParsersAction,
optional): If ArgumentParser, existing parser to which
arguments will be added; if _SubParsersAction, collection of
subparsers to which a new argument parser will be added; if
None, a new argument parser will be generated
kwargs (dict): Additional keyword arguments
Returns:
ArgumentParser: Argument parser or subparser
"""
import argparse
# Process arguments
help_message = """Process data"""
if isinstance(parser_or_subparsers, argparse.ArgumentParser):
parser = parser_or_subparsers
elif isinstance(parser_or_subparsers, argparse._SubParsersAction):
parser = parser_or_subparsers.add_parser(name="data",
description=help_message, help=help_message)
elif parser_or_subparsers is None:
parser = argparse.ArgumentParser(description=help_message)
# Defaults
if parser.get_default("cls") is None:
parser.set_defaults(cls=Dataset)
# Arguments unique to this class
arg_groups = {ag.title: ag for ag in parser._action_groups}
# Standard arguments
# Unfortunately; this appears to be the only way to handle the change
# the chance that a mutually-exclusive group will be added more than
# once. add_mutually_exclusive_group does not support setting 'title'
# or 'description', as soon as the local variable pointing to the group
# is lost, the parser has no information about what the group is
# supposed to be or contain. If the parser has multiple
# mutually-exclusive groups that contain degenerate arguments, it will
# not fail until parse_args is called.
if hasattr(parser, "_verbosity"):
verbosity = parser._verbosity
else:
verbosity = parser._verbosity = \
parser.add_mutually_exclusive_group()
try:
verbosity.add_argument("-v", "--verbose", action="count",
default=1, help="""enable verbose output, may be specified
more than
once""")
except argparse.ArgumentError:
pass
try:
verbosity.add_argument("-q", "--quiet", action="store_const",
const=0, default=1, dest="verbose",
help="disable verbose output")
except argparse.ArgumentError:
pass
try:
parser.add_argument("-d", "--debug", action="count", default=1,
help="""enable debug output, may be specified more than
once""")
except argparse.ArgumentError:
pass
try:
parser.add_argument("-I", "--interactive", action="store_true",
help="""enable interactive ipython terminal after loading
and processing data""")
except argparse.ArgumentError:
pass
# Input arguments
input_group = arg_groups.get("input",
parser.add_argument_group("input"))
try:
input_group.add_argument("-infiles", required=True, dest="infiles",
metavar="INFILE", nargs="+", type=str, help="""file(s) from
which to load data; may be text or
hdf5; may contain environment variables and
wildcards""")
except argparse.ArgumentError:
pass
# Output arguments
output_group = arg_groups.get("output",
parser.add_argument_group("output"))
try:
output_group.add_argument("-outfile", required=False, type=str,
help="""text or hdf5 file to which processed DataFrame will
be output; may contain environment variables""")
except argparse.ArgumentError:
pass
return parser
@staticmethod
def add_shared_args(parser, **kwargs):
"""
Adds command line arguments shared by all subclasses.
Arguments:
parser (ArgumentParser): Nascent argument parser to which to add
arguments
kwargs (dict): Additional keyword arguments
"""
pass
@staticmethod
def add_argument(parser, *args, **kwargs):
"""
"""
import argparse
try:
parser.add_argument(*args, **kwargs)
except argparse.ArgumentError:
pass
@staticmethod
def get_cache_message(cache_key):
"""
Generates message to be used when reloading previously-loaded
dataset.
Arguments:
cache_key (tuple): key with which dataset object is stored in dataset
cache
Returns:
str: message to be used when reloading previously-loaded dataset
"""
return sformat("""Dataset previously loaded from
'{0}'""".format(cache_key[1]))
@staticmethod
def process_infiles(**kwargs):
"""
Processes a list of infiles, expanding environment variables and
wildcards.
Arguments:
infile{s} (str, list): Paths to infile(s), may contain environment
variables and wildcards
Returns:
list: Paths to infiles with environment variables and wildcards
expanded
.. todo:
- handle hdf5 addresses smoothly
"""
from glob import glob
from os.path import expandvars
import re
from . import multi_get_merged
# Process arguments
infiles = multi_get_merged(["infile", "infiles"], kwargs)
re_h5 = re.compile(
r"^(?P<path>(.+)\.(h5|hdf5))((:)?(/)?(?P<address>.+))?$",
flags=re.UNICODE)
processed_infiles = []
for infile in infiles:
if re_h5.match(infile):
path = expandvars(re_h5.match(infile).groupdict()["path"])
address = re_h5.match(infile).groupdict()["address"]
matching_infiles = sorted(glob(expandvars(path)))
matching_infiles = ["{0}:{1}".format(infile, address) for
infile in matching_infiles]
else:
matching_infiles = sorted(glob(expandvars(infile)))
processed_infiles.extend(matching_infiles)
return processed_infiles
def __init__(self, infile, address=None, dataset_cache=None, **kwargs):
"""
Initializes dataset.
Arguments:
infile (str): Path to input file, may contain environment
variables
address (str): Address within hdf5 file from which to load
dataset (hdf5 only)
dataset_cache (dict, optional): Cache of previously-loaded datasets
slice (slice): Slice to load from hdf5 dataset (hdf5 only)
dataframe_kw (dict): Keyword arguments passed to
pandas.DataFrame(...) (hdf5 only)
read_csv_kw (dict): Keyword arguments passed to
pandas.read_csv(...) (text only)
verbose (int): Level of verbose output
debug (int): Level of debug output
kwargs (dict): Additional keyword arguments
.. todo:
- Support loading from pandas format hdf5 (h5_mode?)
- Support other pandas input file formats
- Implement 'targets' other than pandas DataFrame?
"""
from os.path import expandvars
# Process arguments
verbose = kwargs.get("verbose", 1)
self.dataset_cache = dataset_cache
# Load dataset
if verbose >= 1:
wiprint("loading from '{0}'".format(expandvars(infile)))
target = "pandas"
if target == "pandas":
if infile.endswith("h5") or infile.endswith("hdf5"):
h5_mode = "h5py"
if h5_mode == "h5py":
dataframe_kw = kwargs.get("dataframe_kw", {})
with h5py.File(expandvars(infile)) as h5_file:
if address is None:
address = sorted(list(h5_file.keys()))[0]
if "slice" in kwargs:
slc = kwargs.pop("slice")
if not isinstance(slc, slice):
slc = slice(*kwargs["slice"])
data = np.array(h5_file[address][slc])
else:
data = np.array(h5_file[address])
attrs = dict(h5_file[address].attrs)
if "fields" in dataframe_kw:
dataframe_kw["columns"] = dataframe_kw.pop(
"fields")
elif "columns" in dataframe_kw:
pass
elif "fields" in attrs:
dataframe_kw["columns"] = list(attrs["fields"])
elif "columns" in attrs:
dataframe_kw["columns"] = list(attrs["columns"])
self.dataframe = pd.DataFrame(data=data,
**dataframe_kw)
else:
raise ()
else:
read_csv_kw = dict(index_col=0, delimiter="\s\s+")
read_csv_kw.update(kwargs.get("read_csv_kw", {}))
if (
"delimiter" in read_csv_kw and "delim_whitespace"
in read_csv_kw):
del (read_csv_kw["delimiter"])
self.dataframe = pd.read_csv(expandvars(infile), **read_csv_kw)
if (
self.dataframe.index.name is not None and
self.dataframe.index.name.startswith(
"#")):
self.dataframe.index.name = \
self.dataframe.index.name.lstrip(
"#")
def _read_hdf5(self, infile, **kwargs):
"""
Reads DataFrame from hdf5.
Arguments:
infile (str): Path to input hdf5 file and (optionally) address
within the file in the form
``/path/to/file.h5:/address/within/file``; may contain
environment variables
dataframe_kw (dict): Keyword arguments passed to
:class:`DataFrame<pandas:pandas.DataFrame>`
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
Returns:
DataFrame: DataFrame
"""
from os.path import expandvars
import re
# Process arguments
verbose = kwargs.get("verbose", 1)
re_h5 = re.match(
r"^(?P<path>(.+)\.(h5|hdf5))((:)?(/)?(?P<address>.+))?$", infile,
flags=re.UNICODE)
path = expandvars(re_h5.groupdict()["path"])
address = re_h5.groupdict()["address"]
if address == "None":
address = None
dataframe_kw = kwargs.get("dataframe_kw", {})
# Read DataFrame
with h5py.File(path) as h5_file:
# Determine address
if address is None:
if len(h5_file.keys()) == 1:
address = h5_file.keys()[0]
elif (hasattr(self,
"default_hdf5_address") and self.default_h5_address in
h5_file):
address = self.default_hdf5_address
else:
address = "/"
if verbose >= 1:
wiprint(
"""Reading DataFrame from '{0}:{1}'""".format(path, address))
# Determine address of values and index
if isinstance(h5_file[address], h5py._hl.dataset.Dataset):
values = np.array(h5_file[address])
index = np.arange(values.shape[0])
elif isinstance(h5_file[address], h5py._hl.group.Group):
if address + "/values" in h5_file:
values = np.array(h5_file[address + "/values"])
elif len(h5_file[address].keys() == 1):
values = np.array(
h5_file[address + "/" + h5_file[address].keys()[0]])
if address + "/index" in h5_file:
index = np.array(h5_file[address + "/index"])
else:
index = np.arange(values.shape[0])
attrs = dict(h5_file[address].attrs)
# Read columns from attribute; alternatively may be set
# manually in dataframe_kw; previous name was 'fields',
# which is retained here for convenience
if "fields" in dataframe_kw:
dataframe_kw["columns"] = dataframe_kw.pop("fields")
elif "columns" in dataframe_kw:
pass
elif "fields" in attrs:
dataframe_kw["columns"] = list(attrs["fields"])
elif "columns" in attrs:
dataframe_kw["columns"] = list(attrs["columns"])
if "columns" in dataframe_kw:
columns = dataframe_kw.pop("columns")
if np.array(
[isinstance(c, np.ndarray) for c in columns]).all():
columns = pd.MultiIndex.from_tuples(map(tuple, columns))
if np.array([isinstance(c, tuple) for c in columns]).all():
columns = pd.MultiIndex.from_tuples(columns)
dataframe_kw["columns"] = columns
if len(index.shape) == 1:
df = pd.DataFrame(data=values, index=index, **dataframe_kw)
if "index_name" in attrs:
df.index.name = attrs["index_name"]
else:
index = pd.MultiIndex.from_tuples(map(tuple, index))
df = pd.DataFrame(data=values, index=index, **dataframe_kw)
if "index_name" in attrs:
df.index.names = attrs["index_name"]
return df
def _read_text(self, infile, **kwargs):
"""
Reads DataFrame from text.
Arguments:
infile (str): Path to input file; may contain environment
variables
read_csv_kw (dict): Keyword arguments passed to
:func:`read_csv<pandas.read_csv>`
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
Returns:
DataFrame: DataFrame
"""
from os.path import expandvars
import warnings
# Process arguments
verbose = kwargs.get("verbose", 1)
infile = expandvars(infile)
read_csv_kw = dict(index_col=0, delimiter="\s\s+")
read_csv_kw.update(kwargs.get("read_csv_kw", {}))
if ("delimiter" in read_csv_kw and "delim_whitespace" in read_csv_kw):
del (read_csv_kw["delimiter"])
# Read DataFrame
if verbose >= 1:
wiprint("""Reading DataFrame from '{0}' """.format(infile))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
df = pd.read_csv(infile, **read_csv_kw)
if (df.index.name is not None and df.index.name.startswith("#")):
df.index.name = df.index.name.lstrip("#")
return df
def _write_hdf5(self, outfile, **kwargs):
"""
Writes DataFrame to hdf5.
Arguments:
d{ata}f{rame} (DataFrame): DataFrame to write
outfile (str): Path to output hdf5 file and (optionally)
address within the file in the form
``/path/to/file.h5:/address/within/file``; may contain
environment variables
hdf5_kw (dict): Keyword arguments passed to
:meth:`create_dataset<h5py:Group.create_dataset>`
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
from os.path import expandvars
import re
import six
from . import multi_get
# Process arguments
verbose = kwargs.get("verbose", 1)
df = multi_get(["dataframe", "df"], kwargs)
if df is None:
if hasattr(self, "dataframe"):
df = self.dataframe
else:
raise Exception("Cannot find DataFrame to write")
re_h5 = re.match(
r"^(?P<path>(.+)\.(h5|hdf5))((:)?(/)?(?P<address>.+))?$", outfile,
flags=re.UNICODE)
path = expandvars(re_h5.groupdict()["path"])
address = re_h5.groupdict()["address"]
if (address is None or address == "" and hasattr(self,
"default_h5_address")):
address = self.default_h5_address
if hasattr(self, "default_h5_kw"):
h5_kw = self.default_h5_kw
else:
h5_kw = {}
h5_kw.update(kwargs.get("h5_kw", {}))
# Write DataFrame
if verbose >= 1:
wiprint("Writing DataFrame to '{0}'".format(outfile))
with h5py.File(path) as hdf5_file:
hdf5_file.create_dataset("{0}/values".format(address),
data=df.values, dtype=df.values.dtype, **h5_kw)
if df.index.values.dtype == object:
if type(df.index.values[0]) == tuple:
index = np.array(map(list, df.index.values))
else:
index = np.array(map(str, df.index.values))
else:
index = df.index.values
hdf5_file.create_dataset("{0}/index".format(address), data=index,
dtype=index.dtype, **h5_kw)
# Process and store columns as an attribute
columns = df.columns.tolist()
if (np.array(
[isinstance(c, six.string_types) for c in columns]).all()):
# String columns; must make sure all strings are strings
# and not unicode
columns = map(str, columns)
elif np.array([isinstance(c, tuple) for c in columns]).all():
# MultiIndex columns; must make sure all strings are
# strings and not unicode
new_columns = []
for column in columns:
new_column = []
for c in column:
if isinstance(c, six.string_types):
new_column.append(str(c))
else:
new_column.append(c)
new_columns.append(tuple(new_column))
columns = new_columns
hdf5_file[address].attrs["columns"] = columns
# Process and store index name as an attribute
if df.index.name is not None:
hdf5_file[address].attrs["index_name"] = str(df.index.name)
else:
hdf5_file[address].attrs["index_name"] = map(str,
df.index.names)
def _write_text(self, outfile, **kwargs):
"""
Writes DataFrame to hdf5
Arguments:
d{ata}f{rame} (DataFrame): DataFrame to write
outfile (str): Path to output file; may contain environment
variables
to_string_kw (dict): Keyword arguments passed to
:func:`to_string<pandas.DataFrame.to_string>`
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
from os.path import expandvars
from . import multi_get
# Process arguments
verbose = kwargs.get("verbose", 1)
df = multi_get(["dataframe", "df"], kwargs)
if df is None:
if hasattr(self, "dataframe"):
df = self.dataframe
else:
raise ()
outfile = expandvars(outfile)
to_string_kw = dict(col_space=12, sparsify=False)
to_string_kw.update(kwargs.get("to_string_kw", {}))
# Write DataFrame
if verbose >= 1:
wiprint("Writing DataFrame to '{0}'".format(outfile))
with open(outfile, "w") as text_file:
text_file.write(df.to_string(**to_string_kw))
def load_dataset(self, cls=None, **kwargs):
"""
Loads a dataset, or reloads a previously-loaded dataset from a
cache.
Arguments:
cls (class, str): Dataset class; may be either class object itself
or name of class in form of 'package.module.class'; if None,
will be set to self.__class__; if '__nocls_',
function will return None
Returns:
object: Dataset, either newly initialized or copied from cache
"""
from . import load_dataset
if cls is None:
cls = type(self)
return load_dataset(cls=cls, dataset_cache=self.dataset_cache,
**kwargs)
def read(self, **kwargs):
"""
Reads data from one or more *infiles* into a DataFrame.
If more than on *infile* is provided, the resulting DataFrame
will consist of their merged data.
If an *infile* is an hdf5 file path and (optionally) address
within the file in the form
``/path/to/file.h5:/address/within/file``, the corresponding
DataFrame's values will be loaded from
``/address/within/file/values``, its index will be loaded from
``/address/within/file/index``, its column names will be loaded
from the 'columns' attribute of ``/address/within/file`` if
present, and index name will be loaded from the 'index_name'
attribute of ``/address/within/file`` if present. Additional
arguments provided in *dataframe_kw* will be passes to
:class:`DataFrame<pandas:pandas.DataFrame>`.
If an *infile* is the path to a text file, the corresponding
DataFrame will be loaded using
:func:`read_csv<pandas.read_csv>`, including additional
arguments provided in *read_csv_kw*.
After generating the DataFrame from *infiles*, the index may be
set by loading a list of residue names and numbers in the form
``XAA:#`` from *indexfile*. This is useful when loading data
from files that do not specify residue names.
Arguments:
infile[s] (str): Path(s) to input file(s); may contain
environment variables and wildcards
dataframe_kw (dict): Keyword arguments passed to
:class:`DataFrame<pandas.DataFrame>` (hdf5 only)
read_csv_kw (dict): Keyword arguments passed to
:func:`read_csv<pandas.read_csv>` (text only)
indexfile (str): Path to index file; may contain environment
variables
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
Returns:
DataFrame: Sequence DataFrame
"""
import re
from . import multi_pop_merged
# Process arguments
infile_args = multi_pop_merged(["infile", "infiles"], kwargs)
infiles = self.infiles = self.process_infiles(infiles=infile_args)
if len(infiles) == 0:
raise Exception(sformat("""No infiles found matching
'{0}'""".format(infile_args)))
re_h5 = re.compile(
r"^(?P<path>(.+)\.(h5|hdf5))((:)?(/)?(?P<address>.+))?$",
flags=re.UNICODE)
# Load Data
dfs = []
for infile in infiles:
if re_h5.match(infile):
df = self._read_hdf5(infile, **kwargs)
else:
df = self._read_text(infile, **kwargs)
dfs.append(df)
df = dfs.pop(0)
for df_i in dfs:
df = df.merge(df_i, how="outer", left_index=True, right_index=True)
# Apply dtype
if kwargs.get("dtype") is not None:
df = df.astype(kwargs.get("dtype"))
return df
def write(self, outfile, **kwargs):
"""
Writes DataFrame to text or hdf5.
If *outfile* is an hdf5 file path and (optionally) address
within the file in the form
``/path/to/file.h5:/address/within/file``, DataFrame's values
will be written to ``/address/within/file/values``, index will
be written to ``/address/within/file/index``, column names will
be written to the 'columns' attribute of
``/address/within/file``, and index name will be written to the
'index.name' attribute of ``/address/within/file``.
If *outfile* is the path to a text file, DataFrame will be
written using :meth:`to_string<pandas.DataFrame.to_string>`,
including additional arguments provided in *read_csv_kw*.
Arguments:
outfile (str): Path to output file; may be path to text file
or path to hdf5 file in the form
'/path/to/hdf5/file.h5:/address/within/hdf5/file'; may
contain environment variables
hdf5_kw (dict): Keyword arguments passed to
:meth:`create_dataset<h5py:Group.create_dataset>` (hdf5
only)
read_csv_kw (dict): Keyword arguments passed to
:meth:`to_string<pandas.DataFrame.to_string>` (text only)
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
from os.path import expandvars
import re
# Process arguments
outfile = expandvars(outfile)
re_h5 = re.match(
r"^(?P<path>(.+)\.(h5|hdf5))((:)?(/)?(?P<address>.+))?$", outfile,
flags=re.UNICODE)
# Write DataFrame
if re_h5:
self._write_hdf5(outfile=outfile, **kwargs)
else:
self._write_text(outfile=outfile, **kwargs)
#################################### MAIN #####################################
if __name__ == "__main__":
Dataset.main()
|
bsd-3-clause
|
ndingwall/scikit-learn
|
sklearn/compose/_column_transformer.py
|
5
|
33482
|
"""
The :mod:`sklearn.compose._column_transformer` module implements utilities
to work with heterogeneous data and to apply different transformers to
different columns.
"""
# Author: Andreas Mueller
# Joris Van den Bossche
# License: BSD
from itertools import chain
import numpy as np
from scipy import sparse
from joblib import Parallel
from ..base import clone, TransformerMixin
from ..utils._estimator_html_repr import _VisualBlock
from ..pipeline import _fit_transform_one, _transform_one, _name_estimators
from ..preprocessing import FunctionTransformer
from ..utils import Bunch
from ..utils import _safe_indexing
from ..utils import _get_column_indices
from ..utils import _determine_key_type
from ..utils.metaestimators import _BaseComposition
from ..utils.validation import check_array, check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
__all__ = [
'ColumnTransformer', 'make_column_transformer', 'make_column_selector'
]
_ERR_MSG_1DCOLUMN = ("1D data passed to a transformer that expects 2D data. "
"Try to specify the column selection as a list of one "
"item instead of a scalar.")
class ColumnTransformer(TransformerMixin, _BaseComposition):
"""Applies transformers to columns of an array or pandas DataFrame.
This estimator allows different columns or column subsets of the input
to be transformed separately and the features generated by each transformer
will be concatenated to form a single feature space.
This is useful for heterogeneous or columnar data, to combine several
feature extraction mechanisms or transformations into a single transformer.
Read more in the :ref:`User Guide <column_transformer>`.
.. versionadded:: 0.20
Parameters
----------
transformers : list of tuples
List of (name, transformer, columns) tuples specifying the
transformer objects to be applied to subsets of the data.
name : str
Like in Pipeline and FeatureUnion, this allows the transformer and
its parameters to be set using ``set_params`` and searched in grid
search.
transformer : {'drop', 'passthrough'} or estimator
Estimator must support :term:`fit` and :term:`transform`.
Special-cased strings 'drop' and 'passthrough' are accepted as
well, to indicate to drop the columns or to pass them through
untransformed, respectively.
columns : str, array-like of str, int, array-like of int, \
array-like of bool, slice or callable
Indexes the data on its second axis. Integers are interpreted as
positional columns, while strings can reference DataFrame columns
by name. A scalar string or int should be used where
``transformer`` expects X to be a 1d array-like (vector),
otherwise a 2d array will be passed to the transformer.
A callable is passed the input data `X` and can return any of the
above. To select multiple columns by name or dtype, you can use
:obj:`make_column_selector`.
remainder : {'drop', 'passthrough'} or estimator, default='drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='passthrough'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support :term:`fit` and :term:`transform`.
Note that using this feature requires that the DataFrame columns
input at :term:`fit` and :term:`transform` have identical order.
sparse_threshold : float, default=0.3
If the output of the different transformers contains sparse matrices,
these will be stacked as a sparse matrix if the overall density is
lower than this value. Use ``sparse_threshold=0`` to always return
dense. When the transformed output consists of all dense data, the
stacked result will be dense, and this keyword will be ignored.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
transformer_weights : dict, default=None
Multiplicative weights for features per transformer. The output of the
transformer is multiplied by these weights. Keys are transformer names,
values the weights.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Attributes
----------
transformers_ : list
The collection of fitted transformers as tuples of
(name, fitted_transformer, column). `fitted_transformer` can be an
estimator, 'drop', or 'passthrough'. In case there were no columns
selected, this will be the unfitted transformer.
If there are remaining columns, the final element is a tuple of the
form:
('remainder', transformer, remaining_columns) corresponding to the
``remainder`` parameter. If there are remaining columns, then
``len(transformers_)==len(transformers)+1``, otherwise
``len(transformers_)==len(transformers)``.
named_transformers_ : :class:`~sklearn.utils.Bunch`
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
sparse_output_ : bool
Boolean flag indicating whether the output of ``transform`` is a
sparse matrix or a dense numpy array, which depends on the output
of the individual transformers and the `sparse_threshold` keyword.
Notes
-----
The order of the columns in the transformed feature matrix follows the
order of how the columns are specified in the `transformers` list.
Columns of the original feature matrix that are not specified are
dropped from the resulting transformed feature matrix, unless specified
in the `passthrough` keyword. Those columns specified with `passthrough`
are added at the right to the output of the transformers.
See Also
--------
make_column_transformer : Convenience function for
combining the outputs of multiple transformer objects applied to
column subsets of the original feature space.
make_column_selector : Convenience function for selecting
columns based on datatype or the columns name with a regex pattern.
Examples
--------
>>> import numpy as np
>>> from sklearn.compose import ColumnTransformer
>>> from sklearn.preprocessing import Normalizer
>>> ct = ColumnTransformer(
... [("norm1", Normalizer(norm='l1'), [0, 1]),
... ("norm2", Normalizer(norm='l1'), slice(2, 4))])
>>> X = np.array([[0., 1., 2., 2.],
... [1., 1., 0., 1.]])
>>> # Normalizer scales each row of X to unit norm. A separate scaling
>>> # is applied for the two first and two last elements of each
>>> # row independently.
>>> ct.fit_transform(X)
array([[0. , 1. , 0.5, 0.5],
[0.5, 0.5, 0. , 1. ]])
"""
_required_parameters = ['transformers']
@_deprecate_positional_args
def __init__(self,
transformers, *,
remainder='drop',
sparse_threshold=0.3,
n_jobs=None,
transformer_weights=None,
verbose=False):
self.transformers = transformers
self.remainder = remainder
self.sparse_threshold = sparse_threshold
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self.verbose = verbose
@property
def _transformers(self):
"""
Internal list of transformer only containing the name and
transformers, dropping the columns. This is for the implementation
of get_params via BaseComposition._get_params which expects lists
of tuples of len 2.
"""
return [(name, trans) for name, trans, _ in self.transformers]
@_transformers.setter
def _transformers(self, value):
self.transformers = [
(name, trans, col) for ((name, trans), (_, _, col))
in zip(value, self.transformers)]
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `transformers` of the
`ColumnTransformer`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return self._get_params('_transformers', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that you
can directly set the parameters of the estimators contained in
`transformers` of `ColumnTransformer`.
Returns
-------
self
"""
self._set_params('_transformers', **kwargs)
return self
def _iter(self, fitted=False, replace_strings=False):
"""
Generate (name, trans, column, weight) tuples.
If fitted=True, use the fitted transformers, else use the
user specified transformers updated with converted column names
and potentially appended with transformer for remainder.
"""
if fitted:
transformers = self.transformers_
else:
# interleave the validated column specifiers
transformers = [
(name, trans, column) for (name, trans, _), column
in zip(self.transformers, self._columns)
]
# add transformer tuple for remainder
if self._remainder[2] is not None:
transformers = chain(transformers, [self._remainder])
get_weight = (self.transformer_weights or {}).get
for name, trans, column in transformers:
if replace_strings:
# replace 'passthrough' with identity transformer and
# skip in case of 'drop'
if trans == 'passthrough':
trans = FunctionTransformer(
accept_sparse=True, check_inverse=False
)
elif trans == 'drop':
continue
elif _is_empty_column_selection(column):
continue
yield (name, trans, column, get_weight(name))
def _validate_transformers(self):
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t in ('drop', 'passthrough'):
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All estimators should implement fit and "
"transform, or can be 'drop' or 'passthrough' "
"specifiers. '%s' (type %s) doesn't." %
(t, type(t)))
def _validate_column_callables(self, X):
"""
Converts callable column specifications.
"""
columns = []
for _, _, column in self.transformers:
if callable(column):
column = column(X)
columns.append(column)
self._columns = columns
def _validate_remainder(self, X):
"""
Validates ``remainder`` and defines ``_remainder`` targeting
the remaining columns.
"""
is_transformer = ((hasattr(self.remainder, "fit")
or hasattr(self.remainder, "fit_transform"))
and hasattr(self.remainder, "transform"))
if (self.remainder not in ('drop', 'passthrough')
and not is_transformer):
raise ValueError(
"The remainder keyword needs to be one of 'drop', "
"'passthrough', or estimator. '%s' was passed instead" %
self.remainder)
# Make it possible to check for reordered named columns on transform
self._has_str_cols = any(_determine_key_type(cols) == 'str'
for cols in self._columns)
if hasattr(X, 'columns'):
self._df_columns = X.columns
self._n_features = X.shape[1]
cols = []
for columns in self._columns:
cols.extend(_get_column_indices(X, columns))
remaining_idx = sorted(set(range(self._n_features)) - set(cols))
self._remainder = ('remainder', self.remainder, remaining_idx or None)
@property
def named_transformers_(self):
"""Access the fitted transformer by name.
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
"""
# Use Bunch object to improve autocomplete
return Bunch(**{name: trans for name, trans, _
in self.transformers_})
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
check_is_fitted(self)
feature_names = []
for name, trans, column, _ in self._iter(fitted=True):
if trans == 'drop' or (
hasattr(column, '__len__') and not len(column)):
continue
if trans == 'passthrough':
if hasattr(self, '_df_columns'):
if ((not isinstance(column, slice))
and all(isinstance(col, str) for col in column)):
feature_names.extend(column)
else:
feature_names.extend(self._df_columns[column])
else:
indices = np.arange(self._n_features)
feature_names.extend(['x%d' % i for i in indices[column]])
continue
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s (type %s) does not "
"provide get_feature_names."
% (str(name), type(trans).__name__))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def _update_fitted_transformers(self, transformers):
# transformers are fitted; excludes 'drop' cases
fitted_transformers = iter(transformers)
transformers_ = []
for name, old, column, _ in self._iter():
if old == 'drop':
trans = 'drop'
elif old == 'passthrough':
# FunctionTransformer is present in list of transformers,
# so get next transformer, but save original string
next(fitted_transformers)
trans = 'passthrough'
elif _is_empty_column_selection(column):
trans = old
else:
trans = next(fitted_transformers)
transformers_.append((name, trans, column))
# sanity check that transformers is exhausted
assert not list(fitted_transformers)
self.transformers_ = transformers_
def _validate_output(self, result):
"""
Ensure that the output of each transformer is 2D. Otherwise
hstack can raise an error or produce incorrect results.
"""
names = [name for name, _, _, _ in self._iter(fitted=True,
replace_strings=True)]
for Xs, name in zip(result, names):
if not getattr(Xs, 'ndim', 0) == 2:
raise ValueError(
"The output of the '{0}' transformer should be 2D (scipy "
"matrix, array, or pandas DataFrame).".format(name))
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return '(%d of %d) Processing %s' % (idx, total, name)
def _fit_transform(self, X, y, func, fitted=False):
"""
Private function to fit and/or transform on demand.
Return value (transformers and/or transformed X data) depends
on the passed function.
``fitted=True`` ensures the fitted transformers are used.
"""
transformers = list(
self._iter(fitted=fitted, replace_strings=True))
try:
return Parallel(n_jobs=self.n_jobs)(
delayed(func)(
transformer=clone(trans) if not fitted else trans,
X=_safe_indexing(X, column, axis=1),
y=y,
weight=weight,
message_clsname='ColumnTransformer',
message=self._log_message(name, idx, len(transformers)))
for idx, (name, trans, column, weight) in enumerate(
self._iter(fitted=fitted, replace_strings=True), 1))
except ValueError as e:
if "Expected 2D array, got 1D array instead" in str(e):
raise ValueError(_ERR_MSG_1DCOLUMN) from e
else:
raise
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,...), default=None
Targets for supervised learning.
Returns
-------
self : ColumnTransformer
This estimator
"""
# we use fit_transform to make sure to set sparse_output_ (for which we
# need the transformed data) to have consistent output type in predict
self.fit_transform(X, y=y)
return self
def fit_transform(self, X, y=None):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,), default=None
Targets for supervised learning.
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
# TODO: this should be `feature_names_in_` when we start having it
if hasattr(X, "columns"):
self._feature_names_in = np.asarray(X.columns)
else:
self._feature_names_in = None
X = _check_X(X)
# set n_features_in_ attribute
self._check_n_features(X, reset=True)
self._validate_transformers()
self._validate_column_callables(X)
self._validate_remainder(X)
result = self._fit_transform(X, y, _fit_transform_one)
if not result:
self._update_fitted_transformers([])
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
# determine if concatenated output will be sparse or not
if any(sparse.issparse(X) for X in Xs):
nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)
total = sum(X.shape[0] * X.shape[1] if sparse.issparse(X)
else X.size for X in Xs)
density = nnz / total
self.sparse_output_ = density < self.sparse_threshold
else:
self.sparse_output_ = False
self._update_fitted_transformers(transformers)
self._validate_output(Xs)
return self._hstack(list(Xs))
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
The data to be transformed by subset.
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
check_is_fitted(self)
X = _check_X(X)
if hasattr(X, "columns"):
X_feature_names = np.asarray(X.columns)
else:
X_feature_names = None
self._check_n_features(X, reset=False)
if (self._feature_names_in is not None and
X_feature_names is not None and
np.any(self._feature_names_in != X_feature_names)):
raise RuntimeError(
"Given feature/column names do not match the ones for the "
"data given during fit."
)
Xs = self._fit_transform(X, None, _transform_one, fitted=True)
self._validate_output(Xs)
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
return self._hstack(list(Xs))
def _hstack(self, Xs):
"""Stacks Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer.
Parameters
----------
Xs : list of {array-like, sparse matrix, dataframe}
"""
if self.sparse_output_:
try:
# since all columns should be numeric before stacking them
# in a sparse matrix, `check_array` is used for the
# dtype conversion if necessary.
converted_Xs = [check_array(X,
accept_sparse=True,
force_all_finite=False)
for X in Xs]
except ValueError as e:
raise ValueError(
"For a sparse output, all columns should "
"be a numeric or convertible to a numeric."
) from e
return sparse.hstack(converted_Xs).tocsr()
else:
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
return np.hstack(Xs)
def _sk_visual_block_(self):
if isinstance(self.remainder, str) and self.remainder == 'drop':
transformers = self.transformers
elif hasattr(self, "_remainder"):
remainder_columns = self._remainder[2]
if hasattr(self, '_df_columns'):
remainder_columns = (
self._df_columns[remainder_columns].tolist()
)
transformers = chain(self.transformers,
[('remainder', self.remainder,
remainder_columns)])
else:
transformers = chain(self.transformers,
[('remainder', self.remainder, '')])
names, transformers, name_details = zip(*transformers)
return _VisualBlock('parallel', transformers,
names=names, name_details=name_details)
def _check_X(X):
"""Use check_array only on lists and other non-array-likes / sparse"""
if hasattr(X, '__array__') or sparse.issparse(X):
return X
return check_array(X, force_all_finite='allow-nan', dtype=object)
def _is_empty_column_selection(column):
"""
Return True if the column selection is empty (empty list or all-False
boolean array).
"""
if hasattr(column, 'dtype') and np.issubdtype(column.dtype, np.bool_):
return not column.any()
elif hasattr(column, '__len__'):
return (len(column) == 0 or
all(isinstance(col, bool) for col in column)
and not any(column))
else:
return False
def _get_transformer_list(estimators):
"""
Construct (name, trans, column) tuples from list
"""
transformers, columns = zip(*estimators)
names, _ = zip(*_name_estimators(transformers))
transformer_list = list(zip(names, transformers, columns))
return transformer_list
def make_column_transformer(*transformers,
remainder='drop',
sparse_threshold=0.3,
n_jobs=None,
verbose=False):
"""Construct a ColumnTransformer from the given transformers.
This is a shorthand for the ColumnTransformer constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting with ``transformer_weights``.
Read more in the :ref:`User Guide <make_column_transformer>`.
Parameters
----------
*transformers : tuples
Tuples of the form (transformer, columns) specifying the
transformer objects to be applied to subsets of the data.
transformer : {'drop', 'passthrough'} or estimator
Estimator must support :term:`fit` and :term:`transform`.
Special-cased strings 'drop' and 'passthrough' are accepted as
well, to indicate to drop the columns or to pass them through
untransformed, respectively.
columns : str, array-like of str, int, array-like of int, slice, \
array-like of bool or callable
Indexes the data on its second axis. Integers are interpreted as
positional columns, while strings can reference DataFrame columns
by name. A scalar string or int should be used where
``transformer`` expects X to be a 1d array-like (vector),
otherwise a 2d array will be passed to the transformer.
A callable is passed the input data `X` and can return any of the
above. To select multiple columns by name or dtype, you can use
:obj:`make_column_selector`.
remainder : {'drop', 'passthrough'} or estimator, default='drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='passthrough'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support :term:`fit` and :term:`transform`.
sparse_threshold : float, default=0.3
If the transformed output consists of a mix of sparse and dense data,
it will be stacked as a sparse matrix if the density is lower than this
value. Use ``sparse_threshold=0`` to always return dense.
When the transformed output consists of all sparse or all dense data,
the stacked result will be sparse or dense, respectively, and this
keyword will be ignored.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Returns
-------
ct : ColumnTransformer
See Also
--------
ColumnTransformer : Class that allows combining the
outputs of multiple transformer objects used on column subsets
of the data into a single feature space.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
>>> from sklearn.compose import make_column_transformer
>>> make_column_transformer(
... (StandardScaler(), ['numerical_column']),
... (OneHotEncoder(), ['categorical_column']))
ColumnTransformer(transformers=[('standardscaler', StandardScaler(...),
['numerical_column']),
('onehotencoder', OneHotEncoder(...),
['categorical_column'])])
"""
# transformer_weights keyword is not passed through because the user
# would need to know the automatically generated names of the transformers
transformer_list = _get_transformer_list(transformers)
return ColumnTransformer(transformer_list, n_jobs=n_jobs,
remainder=remainder,
sparse_threshold=sparse_threshold,
verbose=verbose)
class make_column_selector:
"""Create a callable to select columns to be used with
:class:`ColumnTransformer`.
:func:`make_column_selector` can select columns based on datatype or the
columns name with a regex. When using multiple selection criteria, **all**
criteria must match for a column to be selected.
Parameters
----------
pattern : str, default=None
Name of columns containing this regex pattern will be included. If
None, column selection will not be selected based on pattern.
dtype_include : column dtype or list of column dtypes, default=None
A selection of dtypes to include. For more details, see
:meth:`pandas.DataFrame.select_dtypes`.
dtype_exclude : column dtype or list of column dtypes, default=None
A selection of dtypes to exclude. For more details, see
:meth:`pandas.DataFrame.select_dtypes`.
Returns
-------
selector : callable
Callable for column selection to be used by a
:class:`ColumnTransformer`.
See Also
--------
ColumnTransformer : Class that allows combining the
outputs of multiple transformer objects used on column subsets
of the data into a single feature space.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
>>> from sklearn.compose import make_column_transformer
>>> from sklearn.compose import make_column_selector
>>> import pandas as pd # doctest: +SKIP
>>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'],
... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP
>>> ct = make_column_transformer(
... (StandardScaler(),
... make_column_selector(dtype_include=np.number)), # rating
... (OneHotEncoder(),
... make_column_selector(dtype_include=object))) # city
>>> ct.fit_transform(X) # doctest: +SKIP
array([[ 0.90453403, 1. , 0. , 0. ],
[-1.50755672, 1. , 0. , 0. ],
[-0.30151134, 0. , 1. , 0. ],
[ 0.90453403, 0. , 0. , 1. ]])
"""
@_deprecate_positional_args
def __init__(self, pattern=None, *, dtype_include=None,
dtype_exclude=None):
self.pattern = pattern
self.dtype_include = dtype_include
self.dtype_exclude = dtype_exclude
def __call__(self, df):
if not hasattr(df, 'iloc'):
raise ValueError("make_column_selector can only be applied to "
"pandas dataframes")
df_row = df.iloc[:1]
if self.dtype_include is not None or self.dtype_exclude is not None:
df_row = df_row.select_dtypes(include=self.dtype_include,
exclude=self.dtype_exclude)
cols = df_row.columns
if self.pattern is not None:
cols = cols[cols.str.contains(self.pattern, regex=True)]
return cols.tolist()
|
bsd-3-clause
|
NetX-lab/RepNet-Experiments
|
mininet-repnet/dctopo.py
|
3
|
13666
|
#!/usr/bin/env python
'''@package dctopo
Data center network topology creation and drawing.
@author Brandon Heller ([email protected])
This package includes code to create and draw networks with a regular,
repeated structure. The main class is StructuredTopo, which augments the
standard Mininet Topo object with layer metadata plus convenience functions to
enumerate up, down, and layer edges.
'''
from mininet.topo import Topo
PORT_BASE = 1 # starting index for OpenFlow switch ports
class NodeID(object):
'''Topo node identifier.'''
def __init__(self, dpid = None):
'''Init.
@param dpid dpid
'''
# DPID-compatible hashable identifier: opaque 64-bit unsigned int
self.dpid = dpid
def __str__(self):
'''String conversion.
@return str dpid as string
'''
return str(self.dpid)
def name_str(self):
'''Name conversion.
@return name name as string
'''
return str(self.dpid)
def ip_str(self):
'''Name conversion.
@return ip ip as string
'''
hi = (self.dpid & 0xff0000) >> 16
mid = (self.dpid & 0xff00) >> 8
lo = self.dpid & 0xff
return "10.%i.%i.%i" % (hi, mid, lo)
class StructuredNodeSpec(object):
'''Layer-specific vertex metadata for a StructuredTopo graph.'''
def __init__(self, up_total, down_total, up_speed, down_speed,
type_str = None):
'''Init.
@param up_total number of up links
@param down_total number of down links
@param up_speed speed in Gbps of up links
@param down_speed speed in Gbps of down links
@param type_str string; model of switch or server
'''
self.up_total = up_total
self.down_total = down_total
self.up_speed = up_speed
self.down_speed = down_speed
self.type_str = type_str
class StructuredEdgeSpec(object):
'''Static edge metadata for a StructuredTopo graph.'''
def __init__(self, speed = 1.0):
'''Init.
@param speed bandwidth in Gbps
'''
self.speed = speed
class StructuredTopo(Topo):
'''Data center network representation for structured multi-trees.'''
def __init__(self, node_specs, edge_specs):
'''Create StructuredTopo object.
@param node_specs list of StructuredNodeSpec objects, one per layer
@param edge_specs list of StructuredEdgeSpec objects for down-links,
one per layer
'''
super(StructuredTopo, self).__init__()
self.node_specs = node_specs
self.edge_specs = edge_specs
def def_nopts(self, layer):
'''Return default dict for a structured topo.
@param layer layer of node
@return d dict with layer key/val pair, plus anything else (later)
'''
return {'layer': layer}
def layer(self, name):
'''Return layer of a node
@param name name of switch
@return layer layer of switch
'''
return self.node_info[name]['layer']
def isPortUp(self, port):
''' Returns whether port is facing up or down
@param port port number
@return portUp boolean is port facing up?
'''
return port % 2 == PORT_BASE
def layer_nodes(self, layer):
'''Return nodes at a provided layer.
@param layer layer
@return names list of names
'''
def is_layer(n):
'''Returns true if node is at layer.'''
return self.layer(n) == layer
nodes = [n for n in self.g.nodes() if is_layer(n)]
return nodes
def up_nodes(self, name):
'''Return edges one layer higher (closer to core).
@param name name
@return names list of names
'''
layer = self.layer(name) - 1
nodes = [n for n in self.g[name] if self.layer(n) == layer]
return nodes
def down_nodes(self, name):
'''Return edges one layer higher (closer to hosts).
@param name name
@return names list of names
'''
layer = self.layer(name) + 1
nodes = [n for n in self.g[name] if self.layer(n) == layer]
return nodes
def up_edges(self, name):
'''Return edges one layer higher (closer to core).
@param name name
@return up_edges list of name pairs
'''
edges = [(name, n) for n in self.up_nodes(name)]
return edges
def down_edges(self, name):
'''Return edges one layer lower (closer to hosts).
@param name name
@return down_edges list of name pairs
'''
edges = [(name, n) for n in self.down_nodes(name)]
return edges
# def draw(self, filename = None, edge_width = 1, node_size = 1,
# node_color = 'g', edge_color = 'b'):
# '''Generate image of RipL network.
#
# @param filename filename w/ext to write; if None, show topo on screen
# @param edge_width edge width in pixels
# @param node_size node size in pixels
# @param node_color node color (ex 'b' , 'green', or '#0000ff')
# @param edge_color edge color
# '''
# import matplotlib.pyplot as plt
#
# pos = {} # pos[vertex] = (x, y), where x, y in [0, 1]
# for layer in range(len(self.node_specs)):
# v_boxes = len(self.node_specs)
# height = 1 - ((layer + 0.5) / v_boxes)
#
# layer_nodes = sorted(self.layer_nodes(layer, False))
# h_boxes = len(layer_nodes)
# for j, dpid in enumerate(layer_nodes):
# pos[dpid] = ((j + 0.5) / h_boxes, height)
#
# fig = plt.figure(1)
# fig.clf()
# ax = fig.add_axes([0, 0, 1, 1], frameon = False)
#
# draw_networkx_nodes(self.g, pos, ax = ax, node_size = node_size,
# node_color = node_color, with_labels = False)
# # Work around networkx bug; does not handle color arrays properly
# for edge in self.edges(False):
# draw_networkx_edges(self.g, pos, [edge], ax = ax,
# edge_color = edge_color, width = edge_width)
#
# # Work around networkx modifying axis limits
# ax.set_xlim(0, 1.0)
# ax.set_ylim(0, 1.0)
# ax.set_axis_off()
#
# if filename:
# plt.savefig(filename)
# else:
# plt.show()
class FatTreeTopo(StructuredTopo):
'''Three-layer homogeneous Fat Tree.
From "A scalable, commodity data center network architecture, M. Fares et
al. SIGCOMM 2008."
'''
LAYER_CORE = 0
LAYER_AGG = 1
LAYER_EDGE = 2
LAYER_HOST = 3
class FatTreeNodeID(NodeID):
'''Fat Tree-specific node.'''
def __init__(self, pod = 0, sw = 0, host = 0, dpid = None, name = None):
'''Create FatTreeNodeID object from custom params.
Either (pod, sw, host) or dpid must be passed in.
@param pod pod ID
@param sw switch ID
@param host host ID
@param dpid optional dpid
@param name optional name
'''
if dpid:
self.pod = (dpid & 0xff0000) >> 16
self.sw = (dpid & 0xff00) >> 8
self.host = (dpid & 0xff)
self.dpid = dpid
elif name:
pod, sw, host = [int(s) for s in name.split('_')]
self.pod = pod
self.sw = sw
self.host = host
self.dpid = (pod << 16) + (sw << 8) + host
else:
self.pod = pod
self.sw = sw
self.host = host
self.dpid = (pod << 16) + (sw << 8) + host
def __str__(self):
return "(%i, %i, %i)" % (self.pod, self.sw, self.host)
def name_str(self):
'''Return name string'''
return "%i_%i_%i" % (self.pod, self.sw, self.host)
def mac_str(self):
'''Return MAC string'''
return "00:00:00:%02x:%02x:%02x" % (self.pod, self.sw, self.host)
def ip_str(self):
'''Return IP string'''
return "10.%i.%i.%i" % (self.pod, self.sw, self.host)
"""
def _add_port(self, src, dst):
'''Generate port mapping for new edge.
Since Node IDs are assumed hierarchical and unique, we don't need to
maintain a port mapping. Instead, compute port values directly from
node IDs and topology knowledge, statelessly, for calls to self.port.
@param src source switch DPID
@param dst destination switch DPID
'''
pass
"""
def def_nopts(self, layer, name = None):
'''Return default dict for a FatTree topo.
@param layer layer of node
@param name name of node
@return d dict with layer key/val pair, plus anything else (later)
'''
d = {'layer': layer}
if name:
id = self.id_gen(name = name)
# For hosts only, set the IP
if layer == self.LAYER_HOST:
d.update({'ip': id.ip_str()})
d.update({'mac': id.mac_str()})
d.update({'dpid': "%016x" % id.dpid})
return d
def __init__(self, k = 4, speed = 1.0):
'''Init.
@param k switch degree
@param speed bandwidth in Gbps
'''
core = StructuredNodeSpec(0, k, None, speed, type_str = 'core')
agg = StructuredNodeSpec(k / 2, k / 2, speed, speed, type_str = 'agg')
edge = StructuredNodeSpec(k / 2, k / 2, speed, speed,
type_str = 'edge')
host = StructuredNodeSpec(1, 0, speed, None, type_str = 'host')
node_specs = [core, agg, edge, host]
edge_specs = [StructuredEdgeSpec(speed)] * 3
super(FatTreeTopo, self).__init__(node_specs, edge_specs)
self.k = k
self.id_gen = FatTreeTopo.FatTreeNodeID
self.numPods = k
self.aggPerPod = k / 2
pods = range(0, k)
core_sws = range(1, k / 2 + 1)
agg_sws = range(k / 2, k)
edge_sws = range(0, k / 2)
hosts = range(2, k / 2 + 2)
for p in pods:
for e in edge_sws:
edge_id = self.id_gen(p, e, 1).name_str()
edge_opts = self.def_nopts(self.LAYER_EDGE, edge_id)
self.addSwitch(edge_id, **edge_opts)
for h in hosts:
host_id = self.id_gen(p, e, h).name_str()
host_opts = self.def_nopts(self.LAYER_HOST, host_id)
self.addHost(host_id, **host_opts)
self.addLink(host_id, edge_id)
for a in agg_sws:
agg_id = self.id_gen(p, a, 1).name_str()
agg_opts = self.def_nopts(self.LAYER_AGG, agg_id)
self.addSwitch(agg_id, **agg_opts)
self.addLink(edge_id, agg_id)
for a in agg_sws:
agg_id = self.id_gen(p, a, 1).name_str()
c_index = a - k / 2 + 1
for c in core_sws:
core_id = self.id_gen(k, c_index, c).name_str()
core_opts = self.def_nopts(self.LAYER_CORE, core_id)
self.addSwitch(core_id, **core_opts)
self.addLink(core_id, agg_id)
def port(self, src, dst):
'''Get port number (optional)
Note that the topological significance of DPIDs in FatTreeTopo enables
this function to be implemented statelessly.
@param src source switch name
@param dst destination switch name
@return tuple (src_port, dst_port):
src_port: port on source switch leading to the destination switch
dst_port: port on destination switch leading to the source switch
'''
src_layer = self.layer(src)
dst_layer = self.layer(dst)
src_id = self.id_gen(name = src)
dst_id = self.id_gen(name = dst)
LAYER_CORE = 0
LAYER_AGG = 1
LAYER_EDGE = 2
LAYER_HOST = 3
if src_layer == LAYER_HOST and dst_layer == LAYER_EDGE:
src_port = 0
dst_port = (src_id.host - 2) * 2 + 1
elif src_layer == LAYER_EDGE and dst_layer == LAYER_CORE:
src_port = (dst_id.sw - 2) * 2
dst_port = src_id.pod
elif src_layer == LAYER_EDGE and dst_layer == LAYER_AGG:
src_port = (dst_id.sw - self.k / 2) * 2
dst_port = src_id.sw * 2 + 1
elif src_layer == LAYER_AGG and dst_layer == LAYER_CORE:
src_port = (dst_id.host - 1) * 2
dst_port = src_id.pod
elif src_layer == LAYER_CORE and dst_layer == LAYER_AGG:
src_port = dst_id.pod
dst_port = (src_id.host - 1) * 2
elif src_layer == LAYER_AGG and dst_layer == LAYER_EDGE:
src_port = dst_id.sw * 2 + 1
dst_port = (src_id.sw - self.k / 2) * 2
elif src_layer == LAYER_CORE and dst_layer == LAYER_EDGE:
src_port = dst_id.pod
dst_port = (src_id.sw - 2) * 2
elif src_layer == LAYER_EDGE and dst_layer == LAYER_HOST:
src_port = (dst_id.host - 2) * 2 + 1
dst_port = 0
else:
raise Exception("Could not find port leading to given dst switch")
# Shift by one; as of v0.9, OpenFlow ports are 1-indexed.
if src_layer != LAYER_HOST:
src_port += 1
if dst_layer != LAYER_HOST:
dst_port += 1
return (src_port, dst_port)
|
apache-2.0
|
andurilhuang/Movie_Income_Prediction
|
paper/historycode/Data/release_date.py
|
1
|
1630
|
import pandas as pd
import datetime
def Cleaning_Data(filename):
data = pd.read_csv(filename,encoding='latin1')
# index of released date col
index = data.columns.get_loc("Released")
#change date data to timestamp
date_list = pd.to_datetime(data["Released"])
# released date is weekend or not
weekend_list = []
for each in date_list:
day_ofweek = each.dayofweek
if day_ofweek >= 4 and day_ofweek <= 6:
tag = 1
else:
tag = 0
weekend_list.append(tag)
# released date is on dump months or not
dumpmonth_list = []
for each in date_list:
month = each.month
if month == 1 or month == 2 or month == 8 or month ==9:
tag = 1
else:
tag = 0
dumpmonth_list.append(tag)
data.insert(loc=index+1,column = "Released on weekend",value=weekend_list)
data.insert(loc=index+2,column = "Released on dump month",value=dumpmonth_list)
#Count the number of Language
data['Language'] = data.Language.str.count(',')+1
#Categorize the country
data["Country"] = data["Country"].map(lambda x: x.split(",")[0])
#Clean IMDB.Votes
data['IMDB.Votes'] = data['IMDB.Votes'].replace(',', '',regex=True)
data['IMDB.Votes'] = data['IMDB.Votes'].astype(int)
#Clean Runtime
data['Runtime'] = data['Runtime'].replace('min', '',regex=True)
data['Runime'] = data['Runtime'].astype(int)
data.to_csv("FinalMerge_updateson_cleaned_data.csv")
if __name__ == "__main__":
Filename = "FinalMerge.csv"
Cleaning_Data(Filename)
|
mit
|
mojoboss/scikit-learn
|
examples/datasets/plot_random_dataset.py
|
348
|
2254
|
"""
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
|
bsd-3-clause
|
zhushun0008/sms-tools
|
lectures/04-STFT/plots-code/windows-2.py
|
24
|
1026
|
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DF
import utilFunctions as UF
import math
(fs, x) = UF.wavread('../../../sounds/violin-B3.wav')
N = 1024
pin = 5000
w = np.ones(801)
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
plt.figure(1, figsize=(9.5, 5))
plt.subplot(3,1,1)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.title('x (violin-B3.wav)')
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(3,1,2)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (rectangular window)')
w = np.blackman(801)
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(3,1,3)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (blackman window)')
plt.tight_layout()
plt.savefig('windows-2.png')
plt.show()
|
agpl-3.0
|
OlafLee/DeepLearningTutorials
|
code/DBN.py
|
1
|
17623
|
"""
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from rbm import RBM
import sklearn.metrics as mt
import gzip
import cPickle
# start-snippet-1
class DBN(object):
"""Deep Belief Network
A deep belief network is obtained by stacking several RBMs on top of each
other. The hidden layer of the RBM at layer `i` becomes the input of the
RBM at layer `i+1`. The first layer RBM gets as input the input of the
network, and the hidden layer of the last RBM represents the output. When
used for classification, the DBN is treated as a MLP, by adding a logistic
regression layer on top.
"""
def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
hidden_layers_sizes=[500, 500], n_outs=10):
"""This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the DBN
:type hidden_layers_sizes: list of ints
:param hidden_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
"""
self.sigmoid_layers = []
self.rbm_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = MRG_RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector
# of [int] labels
# end-snippet-1
# The DBN is an MLP, for which all weights of intermediate
# layers are shared with a different RBM. We will first
# construct the DBN as a deep multilayer perceptron, and when
# constructing each sigmoidal layer we also construct an RBM
# that shares weights with that layer. During pretraining we
# will train these RBMs (which will lead to chainging the
# weights of the MLP as well) During finetuning we will finish
# training the DBN by doing stochastic gradient descent on the
# MLP.
for i in xrange(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden
# units of the layer below or the input size if we are on
# the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the
# hidden layer below or the input of the DBN if you are on
# the first layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question... but we are
# going to only declare that the parameters of the
# sigmoid_layers are parameters of the DBN. The visible
# biases in the RBM are parameters of those RBMs, but not
# of the DBN.
self.params.extend(sigmoid_layer.params)
# Construct an RBM that shared weights with this layer
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b)
self.rbm_layers.append(rbm_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
self.params.extend(self.logLayer.params)
# compute the cost for second phase of training, defined as the
# negative log likelihood of the logistic regression (output) layer
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, batch_size, k):
'''Generates a list of functions, for performing one step of
gradient descent at a given layer. The function will require
as input the minibatch index, and to train an RBM you just
need to iterate, calling the corresponding function on all
minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared var. that contains all datapoints used
for training the RBM
:type batch_size: int
:param batch_size: size of a [mini]batch
:param k: number of Gibbs steps to do in CD-k / PCD-k
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
learning_rate = T.scalar('lr') # learning rate to use
# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for rbm in self.rbm_layers:
# get the cost and the updates list
# using CD-k here (persisent=None) for training each RBM.
# TODO: change cost function to reconstruction error
cost, updates = rbm.get_cost_updates(learning_rate,
persistent=None, k=k)
# compile the theano function
fn = theano.function(
inputs=[index, theano.Param(learning_rate, default=0.1)],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin:batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on a
batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - gparam * learning_rate))
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
valid_score_i = theano.function(
[index],
self.errors,
givens={
self.x: valid_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: valid_set_y[
index * batch_size: (index + 1) * batch_size
]
}
)
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in xrange(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in xrange(n_test_batches)]
return train_fn, valid_score, test_score
'''
def test_DBN(finetune_lr=0.1, pretraining_epochs=1,
pretrain_lr=0.01, k=1, training_epochs=1,
dataset='mnist.pkl.gz', batch_size=10):
'''
if __name__ == '__main__':
finetune_lr=0.1
pretraining_epochs=10
pretrain_lr=0.01
k=1
training_epochs=100
dataset='mnist.pkl.gz'
batch_size=10
"""
Demonstrates how to train and test a Deep Belief Network.
This is demonstrated on MNIST.
:type finetune_lr: float
:param finetune_lr: learning rate used in the finetune stage
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type k: int
:param k: number of Gibbs steps in CD/PCD
:type training_epochs: int
:param training_epochs: maximal number of iterations ot run the optimizer
:type dataset: string
:param dataset: path the the pickled dataset
:type batch_size: int
:param batch_size: the size of a minibatch
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size/100
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
print '... building the model'
# construct the Deep Belief Network
dbn = DBN(numpy_rng=numpy_rng, n_ins=28 * 28,
hidden_layers_sizes=[1000, 1000, 1000],
n_outs=10)
# start-snippet-2
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size,
k=k)
print '... pre-training the model'
start_time = timeit.default_timer()
## Pre-train layer-wise
for i in xrange(dbn.n_layers):
# go through pretraining epochs
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
lr=pretrain_lr))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = timeit.default_timer()
# end-snippet-2
'''
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
'''
########################
# FINETUNING THE MODEL #
########################
# get the training, validation and testing function for the model
print '... getting the finetuning functions'
train_fn, validate_model, test_model = dbn.build_finetune_functions(
datasets=datasets,
batch_size=batch_size,
learning_rate=finetune_lr
)
print '... finetuning the model'
# early-stopping parameters
patience = 4 * n_train_batches # look as this many examples regardless
patience_increase = 2. # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatches before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_fn(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%'
% (
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = test_model()
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(
(
'Optimization complete with best validation score of %f %%, '
'obtained at iteration %i, '
'with test performance %f %%'
) % (best_validation_loss * 100., best_iter + 1, test_score * 100.)
)
'''
print >> sys.stderr, ('The fine tuning code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time)
/ 60.)
'''
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
test_y=numpy.array(test_set[1])
test_result = theano.function([],dbn.logLayer.y_pred,givens={dbn.x:test_set_x})
print mt.confusion_matrix(test_result(), test_y)
'''
if __name__ == '__main__':
test_DBN()
'''
|
bsd-3-clause
|
avmarchenko/exa
|
exa/core/tests/test_numerical.py
|
2
|
3922
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Tests for :mod:`~exa.core.umerical`
#################################
"""
import numpy as np
import pandas as pd
from unittest import TestCase
from exa.core.numerical import Numerical, Series, DataFrame
class TSeries(Series):
_sname = 'testing'
_iname = 'index'
_stype = np.float64
_itype = np.int64
_precision = 2
class TDF0(DataFrame):
_index = 'index'
_columns = ['column']
class TDF1(DataFrame):
_cardinal = ('group', np.int64)
_index = 'index'
_columns = ['column', 'type']
_categories = {'type': str}
class TDF2(DataFrame):
_cardinal = ('group', str)
_index = 'index'
_columns = ['x', 'y', 'z', 'type']
_categories = {'type': np.int64}
class NumericalTest(TestCase):
def setUp(self):
self.numerical = Numerical()
def test_slice(self):
with self.assertRaises(AttributeError):
self.numerical.slice_naive(0)
class SeriesTest(TestCase):
def setUp(self):
self.series = TSeries(np.random.rand(10))
def test_underattr(self):
"""
Test to ensure the (class level) underscore attributes (of
:class:`~exa.core.tests.test_numerical.TestingSeries`) are respected.
"""
self.assertTrue(self.series.name == TSeries._sname)
self.assertTrue(self.series.index.name == TSeries._iname)
def test_copy(self):
"""Test :func:`~exa.core.numerical.Series.copy`."""
cp = self.series.copy()
self.assertTrue(np.all(cp == self.series))
self.assertIsInstance(cp, self.series.__class__)
class DF0Test(TestCase):
"""
Test a basic example of an instance of :class:`~exa.core.numerical.DataFrame`.
"""
def setUp(self):
column = np.random.rand(10)
self.df = TDF0.from_dict({'column': column})
def test_copy(self):
"""Test :func:`~exa.core.numerical.DataFrame.copy`."""
cp = self.df.copy()
self.assertTrue(np.all(cp == self.df))
self.assertIsInstance(cp, self.df.__class__)
class DF1Test(TestCase):
"""
Test an example instance of :class:`~exa.core.numerical.DataFrame` with groupby.
"""
def setUp(self):
column = np.random.rand(10)
group = [0, 0, 0, 0, 1, 1, 1, 2, 2, 3]
typ = ['A']*5 + ['B']*5
self.df = TDF1.from_dict({'column': column, 'type': typ, 'group': group})
def test_copy(self):
"""Test :func:`~exa.core.numerical.DataFrame.copy`."""
cp = self.df.copy()
self.assertTrue(np.all(cp == self.df))
self.assertIsInstance(cp, self.df.__class__)
def test_categories(self):
"""Test that categoricals are being handled correctly."""
self.assertIsInstance(self.df['type'].dtype, pd.api.types.CategoricalDtype)
class DF2Test(TestCase):
"""
Test an example instance of :class:`~exa.core.numerical.DataFrame` with groupby.
"""
def setUp(self):
"""Create instance of :class:`~exa.core.test.test_numerical.TestingDF2`."""
x = np.random.rand(10)
y = np.random.rand(10)
z = np.random.rand(10)
typ = [0, 0, 0, 0, 1, 1, 1, 2, 2, 3]
group = ['A']*5 + ['B']*5
self.df = TDF2.from_dict({'x': x, 'y': y, 'z': z, 'type': typ,
'group': group})
def test_copy(self):
"""Test :func:`~exa.core.numerical.DataFrame.copy`."""
cp = self.df.copy()
self.assertTrue(np.all(cp == self.df))
self.assertIsInstance(cp, self.df.__class__)
def test_categories(self):
"""Test that categoricals are being handled correctly."""
self.assertIsInstance(self.df['type'].dtype, pd.api.types.CategoricalDtype)
self.assertIsInstance(self.df['group'].dtype, pd.api.types.CategoricalDtype)
|
apache-2.0
|
ProjectSidewalk/SidewalkWebpage
|
label_clustering.py
|
1
|
10638
|
import numpy as np
import pandas as pd
from haversine import haversine
import sys
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import pdist
import argparse
import requests
import json
from pandas.io.json import json_normalize
from concurrent.futures import ProcessPoolExecutor
# Custom distance function that returns max float if from the same user id, haversine distance otherwise.
def custom_dist(u, v):
if u[2] == v[2]:
return sys.float_info.max
else:
return haversine([u[0], u[1]], [v[0], v[1]])
# For each label type, cluster based on haversine distance.
def cluster(labels, curr_type, thresholds, single_user):
# Makes a normal dist matrix for a single user, but uses special dist function for multi-user clustering that
# prevents the same user's attributes from being clustered together.
if single_user:
dist_matrix = pdist(np.array(labels[['lat', 'lng']].values), lambda x, y: haversine(x, y))
else:
dist_matrix = pdist(np.array(labels[['lat', 'lng', 'user_id']].values), custom_dist)
link = linkage(dist_matrix, method='complete')
# Copies the labels dataframe and adds a column to it for the cluster id each label is in.
labelsCopy = labels.copy()
labelsCopy.loc[:,'cluster'] = fcluster(link, t=thresholds[curr_type], criterion='distance')
# Cuts tree so that only labels less than clust_threth kilometers apart are clustered.
clusters = labelsCopy.groupby('cluster')
# Computes the center of each cluster and assigns temporariness and severity.
cluster_list = [] # list of tuples (label_type, cluster_num, lat, lng, severity, temporary).
for clust_num, clust in clusters:
ave_pos = np.mean(clust['coords'].tolist(), axis=0) # use ave pos of clusters.
ave_sev = None if pd.isnull(clust['severity']).all() else int(round(np.median(clust['severity'][~np.isnan(clust['severity'])])))
ave_temp = None if pd.isnull(clust['temporary']).all() else bool(round(np.mean(clust['temporary'])))
cluster_list.append((curr_type, clust_num, ave_pos[0], ave_pos[1], ave_sev, ave_temp))
cluster_df = pd.DataFrame(cluster_list, columns=['label_type', 'cluster', 'lat', 'lng', 'severity', 'temporary'])
return (cluster_df, labelsCopy)
if __name__ == '__main__':
POST_HEADER = {'content-type': 'application/json; charset=utf-8'}
# Read in arguments from command line.
parser = argparse.ArgumentParser(description='Gets a set of labels, posts the labels grouped into clusters.')
parser.add_argument('--key', type=str,
help='Key string that is used to authenticate when using API.')
parser.add_argument('--user_id', type=str,
help='User id of a single user who\'s labels should be clustered.')
parser.add_argument('--region_id', type=int,
help='Region id of a region who\'s user-clustered should be clustered.')
parser.add_argument('--debug', action='store_true',
help='Debug mode adds print statements')
args = parser.parse_args()
KEY = args.key
DEBUG = args.debug
USER_ID = args.user_id.strip('\'\"') if args.user_id else None
REGION_ID = args.region_id
N_PROCESSORS = 8
# Determine what type of clustering should be done from command line args, and set variable accordingly.
getURL = None
postURL = None
SINGLE_USER = None
if USER_ID:
SINGLE_USER = True
getURL = 'http://localhost:9000/userLabelsToCluster?key=' + KEY + '&userId=' + str(USER_ID)
postURL = 'http://localhost:9000/singleUserClusteringResults?key=' + KEY + '&userId=' + str(USER_ID)
elif REGION_ID:
SINGLE_USER = False
getURL = 'http://localhost:9000/clusteredLabelsInRegion?key=' + KEY + '®ionId=' + str(REGION_ID)
postURL = 'http://localhost:9000/multiUserClusteringResults?key=' + KEY + '®ionId=' + str(REGION_ID)
# Send GET request to get the labels to be clustered.
try:
print getURL
print postURL
response = requests.get(getURL)
data = response.json()
label_data = json_normalize(data[0])
# print label_data
except:
print "Failed to get labels needed to cluster."
sys.exit()
# Define thresholds for single and multi user clustering (numbers are in kilometers).
if SINGLE_USER:
thresholds = {'CurbRamp': 0.002,
'NoCurbRamp': 0.002,
'SurfaceProblem': 0.0075,
'Obstacle': 0.0075,
'NoSidewalk': 0.0075,
'Occlusion': 0.0075,
'Other': 0.0075,
'Problem': 0.0075}
else:
thresholds = {'CurbRamp': 0.0075,
'NoCurbRamp': 0.0075,
'SurfaceProblem': 0.01,
'Obstacle': 0.01,
'NoSidewalk': 0.01,
'Occlusion': 0.01,
'Other': 0.01,
'Problem': 0.01}
# Pick which label types should be included in clustering, and which should be included in the "Problem" type.
label_types = ['CurbRamp', 'NoSidewalk', 'Problem', 'Occlusion', 'SurfaceProblem', 'Obstacle', 'Other', 'NoCurbRamp']
problem_types = ['SurfaceProblem', 'Obstacle', 'NoCurbRamp'] if SINGLE_USER else ['Problem']
# These are the columns required in the POST requests for the labels and clusters, respectively.
label_cols = ['label_id', 'label_type', 'cluster']
cluster_cols = ['label_type', 'cluster', 'lat', 'lng', 'severity', 'temporary']
# Check if there are 0 labels. If so, just send the post request and exit.
if len(label_data) == 0:
response = requests.post(postURL, data=json.dumps({'thresholds': [], 'labels': [], 'clusters': []}), headers=POST_HEADER)
sys.exit()
# Remove weird entries with latitude and longitude values (on the order of 10^14).
if sum(label_data.lng > 360) > 0:
if DEBUG: print 'There are %d invalid longitude vals, removing those entries.' % sum(label_data.lng > 360)
label_data = label_data.drop(label_data[label_data.lng > 360].index)
if sum(pd.isnull(label_data.lng)) > 0:
if DEBUG: print 'There are %d NaN longitude vals, removing those entries.' % sum(pd.isnull(label_data.lng))
label_data = label_data.drop(label_data[pd.isnull(label_data.lng)].index)
# Check if there are 0 labels left after removing those with errors. If so, just send the post request and exit.
if len(label_data) == 0:
response = requests.post(postURL, data=json.dumps({'thresholds': [], 'labels': [], 'clusters': []}), headers=POST_HEADER)
sys.exit()
# Put lat-lng in a tuple so it plays nice w/ haversine function.
label_data['coords'] = label_data.apply(lambda x: (x.lat, x.lng), axis = 1)
label_data['id'] = label_data.index.values
# Performs clustering on the data for a single label type; namely, the type at position i in the label_types array.
def cluster_label_type_at_index(i):
clusters_for_type_i = pd.DataFrame(columns=cluster_cols)
labels_for_type_i = pd.DataFrame(columns=label_cols)
label_type = label_types[i]
if label_type == 'Problem':
type_data = label_data[label_data.label_type.isin(problem_types)]
else:
type_data = label_data[label_data.label_type == label_type]
# If there are >1 labels, we can do clustering. Otherwise just copy the 1 (or 0) labels.
if type_data.shape[0] > 1:
(clusters_for_type_i, labels_for_type_i) = cluster(type_data, label_type, thresholds, SINGLE_USER)
elif type_data.shape[0] == 1:
labels_for_type_i = type_data.copy()
labels_for_type_i.loc[:,'cluster'] = 1 # Gives the single cluster a cluster_id of 1.
labels_for_type_i.loc[:,'label_type'] = label_type # Gives Problem type if needed.
clusters_for_type_i = labels_for_type_i.filter(items=cluster_cols)
return (label_type, clusters_for_type_i, labels_for_type_i)
# Calls `func` len(`args`) times, with `workers` number of threads. Used to compute diff label type in parallel.
def multiprocessing(func, args, workers):
with ProcessPoolExecutor(max_workers=workers) as executor:
res = executor.map(func, args)
return list(res)
# Calls the clustering function via the multiprocessing function.
clust_results_by_label_type = multiprocessing(cluster_label_type_at_index, range(0, len(label_types)), N_PROCESSORS)
# Clustering results were done individually for each label type, so their cluster_ids start at 1 for each type. So
# now we offset the cluster ids for different label types so they are unique, and combine the lists.
label_output = pd.DataFrame(columns=label_cols)
cluster_output = pd.DataFrame(columns=cluster_cols)
clusterOffset = 0
for i in range(0, len(label_types)):
(label_type, clusters_for_type_i, labels_for_type_i) = clust_results_by_label_type[i]
if not label_output.empty:
clusterOffset = np.max(label_output.cluster)
clusters_for_type_i.cluster += clusterOffset
cluster_output = cluster_output.append(clusters_for_type_i)
labels_for_type_i.cluster += clusterOffset
label_output = label_output.append(labels_for_type_i.filter(items=label_cols))
if DEBUG:
print "LABEL_TYPE: N_LABELS -> N_CLUSTERS"
print "----------------------------------"
for label_type in label_types:
print str(label_type) + ": " + \
str(label_output[label_output.label_type == label_type].cluster.nunique()) + \
" -> " + str(cluster_output[cluster_output.label_type == label_type].cluster.nunique())
# Convert to JSON.
cluster_json = cluster_output.to_json(orient='records')
label_json = label_output.to_json(orient='records')
threshold_json = pd.DataFrame({'label_type': thresholds.keys(),
'threshold': thresholds.values()}).to_json(orient='records')
output_json = json.dumps({'thresholds': json.loads(threshold_json),
'labels': json.loads(label_json),
'clusters': json.loads(cluster_json)})
# print output_json
# print 'chars in json: ' + str(len(output_json))
# POST results.
response = requests.post(postURL, data=output_json, headers=POST_HEADER)
sys.exit()
|
mit
|
abhisg/scikit-learn
|
sklearn/decomposition/truncated_svd.py
|
9
|
7737
|
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0782... 0.0552... 0.0544... 0.0499... 0.0413...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.279...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
|
bsd-3-clause
|
mugizico/scikit-learn
|
examples/tree/plot_tree_regression.py
|
206
|
1476
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
seismology/mc_kernel
|
UTILS/plot_timers.py
|
1
|
2984
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import os
import subprocess
import glob
import matplotlib.pyplot as plt
def tail(f, n, offset=0):
cmd = ["tail", "-n", str(n), f]
tail_output = subprocess.check_output(cmd)
tail_output.split('\n')
return lines
dirnam = '.'
output_files = glob.glob(os.path.join(dirnam, './OUTPUT*'))
output_files.sort()
fnam = output_files[1]
cmd = ["grep", "CLOCKS", fnam]
tail_output = subprocess.check_output(cmd, universal_newlines=True)
lines = str(tail_output).split("\n")[1:-2]
ntimers = len(lines)
timer_name = []
# Get the names of the timers
for line in lines:
start = 1
# Remove the '-' that some timer names have
for i in range(1, 3):
if line.split()[i] == '-':
start += 1
timer_name.append(" ".join(line.split()[start:-4]))
t_per_call = np.zeros(ntimers)
t_total = np.zeros(ntimers)
ncalls = np.zeros(ntimers)
# Go through each slave Output file and get the timing information
for fnam in output_files[1:-1]:
cmd = ["grep", "CLOCKS", fnam]
tail_output = subprocess.check_output(cmd, universal_newlines=True)
lines = str(tail_output).split("\n")[1:-2]
if len(lines)>0:
for iline in range(0, ntimers):
line = lines[iline]
timing = line.split()[-4:-1]
ncalls[iline] += int(timing[0])
t_per_call[iline] += float(timing[1])
t_total[iline] += float(timing[2])
interesting_timers = np.array([2, 8, 10, 11, 12, 14, 16])
uninteresting_timers = np.array([3, 4, 7])
timer_interesting = []
t_total_interesting = []
t_rest = 0.0
for i in range(0, ntimers):
if i not in uninteresting_timers:
if i in interesting_timers:
print 'Interesting: ', timer_name[i], t_total[i] / 3600.
timer_interesting.append('%s, \n %8.1f CPUh' % (timer_name[i],
t_total[i] / 3600.))
t_total_interesting.append(t_total[i])
else:
print 'Not interesting: ', timer_name[i], t_total[i] / 3600.
t_rest += t_total[i]
t_total_interesting.append(t_rest)
timer_interesting.append('%s, \n %8.1f CPUh' % ('Other',
t_rest / 3600.))
# Create pie chart
# IO colors are reddish, Computation blueish, MPI yellowish
colors = ['dodgerblue', # FFT
'firebrick', # NetCDF
'tomato', # Buffer
'lightskyblue', # Calc_strain
'aqua', # Lagrange
'cadetblue', # Filtering
'darkcyan', # Integration
'grey'] # Rest
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
ax.set_aspect(1)
ax.pie(t_total_interesting,
labels=timer_interesting,
colors=colors, autopct='%1.1f%%', shadow=True)
ax.set_title('Total calculation cost:%8.1f CPUh' % \
(np.sum(np.array(t_total_interesting)) / 3600.))
fig.savefig('Timing.pdf')
|
gpl-3.0
|
mwcraig/sinusoid-model
|
varstar/light_curve_plots.py
|
1
|
6196
|
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
def double_array(r):
return np.concatenate((r, r))
def narrow_periodgram(time, amplitude, center_frequency, frequency_width,
num=1000):
f_min = center_frequency - frequency_width / 2
f_max = center_frequency + frequency_width / 2
frequencies = np.linspace(f_min, f_max, num=num)
pgram = signal.lombscargle(time, amplitude, 2 * np.pi * frequencies)
return (frequencies, pgram)
def mark_at(freqs, labels, ax=None, voffset=0):
if ax is None:
ax = plt.axes()
for freq, label in zip(freqs, labels):
line, = ax.plot([freq, freq], ax.get_ylim(), ':')
ax.text(1.0 * freq, (0.9 + voffset) * ax.get_ylim()[1], label,
color=line.get_color(), horizontalalignment='center',
backgroundcolor='w', size='large')
def plot_near_harmonics(time, amplitude, fundamental_freq,
max_harmonic=20, nplots_per_row=5, freq_width=0.1,
mark_harmonics=None):
for harm in range(max_harmonic):
if (harm % nplots_per_row) == 0:
fig, axs = plt.subplots(ncols=nplots_per_row, sharey=True)
fm = mark_harmonics or False
axis_index = harm % nplots_per_row
ax = axs[axis_index]
if axis_index == int(nplots_per_row / 2):
ax.set_title('Normalized peridograms near harmonics')
cen_freq = (harm + 1) * fundamental_freq
f, p = narrow_periodgram(time, amplitude, cen_freq, freq_width)
cen_freq_label = '{}$f_0$'.format(harm + 1)
ax.plot(f - cen_freq, p / p.max(),
label=cen_freq_label + ' max is {:5f}'.format(p.max()))
if fm:
mark_freq = []
mark_label = []
for i in [-3, -2, -1, 1, 2, 3]:
mark_freq.append(i * fm)
mark_label.append('{}$f_m$'.format(i))
mark_at(mark_freq, mark_label, ax=ax)
mark_at([0], [cen_freq_label], ax=ax)
ax.set_xlabel('$f-$' + cen_freq_label)
ax.legend(loc='lower center')
plt.subplots_adjust(wspace=0)
def plot_data_and_model(phase, data, model=None):
double_phase = np.concatenate((phase, phase + 1))
plt.plot(double_phase, -double_array(data), '.', color='b', label='data')
if model is None:
return
ordered_phase = double_phase.argsort()
sorted_model = (double_array(model))[ordered_phase]
plt.plot(double_phase[ordered_phase], -sorted_model,
color='g', label='model', linewidth=3)
plt.xlabel('phase')
plt.ylabel(u'-$\Delta R$')
plt.legend()
def plot_data_model_with_fanciness(nights, phase, target_mag, dates,
model=None,
highlight_nights=None,
nights_to_include=None,
highlight_model=False):
unique_nights = np.unique(nights)
line_format = 'None'
colors = ['b', 'r', 'c', 'm', 'y']
markers = ['v', '^', 'd', 's']
nights_to_include = nights_to_include or unique_nights
#nights_to_include =[1, 4]
highlight_nights = highlight_nights or unique_nights
linew = 3
for idx, night in enumerate(unique_nights):
this_night = (nights == night)
marker_format = markers[idx % len(markers)]
if night in highlight_nights:
alpha = 1.0
else:
alpha = 0.1
legend_label = 'night ' + str(night)
this_color = colors[idx % (len(colors))]
if night in nights_to_include:
plt.plot(np.concatenate((phase[this_night], phase[this_night] + 1)),
- double_array(target_mag[this_night]),
marker=marker_format, linestyle=line_format,
markersize=6.0,
color=this_color,
alpha=alpha,
label=legend_label)
if model is None:
continue
p = phase[this_night].argsort()
gap_at = (phase[this_night][p] - np.roll(phase[this_night][p], 1))
gap_at = (gap_at > 0.1)
masked_model = np.ma.masked_where(gap_at, model(dates[this_night][p]),
copy=True)
if highlight_model:
model_alpha = 1.0
else:
model_alpha = alpha
plt.plot(phase[this_night][p],
- masked_model,
color=this_color,
linewidth=linew, alpha=model_alpha)
plt.plot(phase[this_night][p] + 1,
- masked_model,
color=this_color,
linewidth=linew, alpha=model_alpha)
sze = 22
#plt.legend(bbox_to_anchor=(0, 1), loc=2, ncol=3, borderaxespad=0.)
#plt.title('Period = ' + str(p0) + ' days', size = sze)
plt.xlabel('Phase', size=sze)
plt.ylabel(r'$-dm$', size=sze)
def plot_model_over_time(model, primary_period, secondary_period,
points_primary=100,
points_secondary=5,
primary_epoch=0.):
"""
Plot a light curve with changes over two different time scales
"""
one_period = np.linspace(primary_epoch,
primary_epoch + primary_period,
num=points_primary)
secondary_start_times = np.linspace(0, secondary_period,
num=points_secondary)
all_times = [t + one_period for t in secondary_start_times]
for i, time in enumerate(all_times):
phase = (time - primary_epoch) / primary_period
phase -= np.int64((time - primary_epoch) / primary_period)
sort_index = np.argsort(phase)
this_label = '{:5.4f} secondary periods'.format(i / points_secondary)
plt.plot(phase[sort_index], -model(time[sort_index]),
label=this_label)
plt.xlabel('Phase')
plt.ylabel('- Magnitude')
|
bsd-3-clause
|
chintak/scikit-image
|
skimage/transform/tests/test_warps.py
|
1
|
7574
|
from numpy.testing import (assert_array_almost_equal, run_module_suite,
assert_array_equal, assert_raises)
import numpy as np
from scipy.ndimage import map_coordinates
from skimage.transform import (warp, warp_coords, rotate, resize, rescale,
AffineTransform,
ProjectiveTransform,
SimilarityTransform,
downscale_local_mean)
from skimage import transform as tf, data, img_as_float
from skimage.color import rgb2gray
def test_warp_tform():
x = np.zeros((5, 5), dtype=np.double)
x[2, 2] = 1
theta = - np.pi / 2
tform = SimilarityTransform(scale=1, rotation=theta, translation=(0, 4))
x90 = warp(x, tform, order=1)
assert_array_almost_equal(x90, np.rot90(x))
x90 = warp(x, tform.inverse, order=1)
assert_array_almost_equal(x90, np.rot90(x))
def test_warp_callable():
x = np.zeros((5, 5), dtype=np.double)
x[2, 2] = 1
refx = np.zeros((5, 5), dtype=np.double)
refx[1, 1] = 1
shift = lambda xy: xy + 1
outx = warp(x, shift, order=1)
assert_array_almost_equal(outx, refx)
def test_warp_matrix():
x = np.zeros((5, 5), dtype=np.double)
x[2, 2] = 1
refx = np.zeros((5, 5), dtype=np.double)
refx[1, 1] = 1
matrix = np.array([[1, 0, 1], [0, 1, 1], [0, 0, 1]])
# _warp_fast
outx = warp(x, matrix, order=1)
assert_array_almost_equal(outx, refx)
# check for ndimage.map_coordinates
outx = warp(x, matrix, order=5)
def test_homography():
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
theta = -np.pi / 2
M = np.array([[np.cos(theta), - np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 4],
[0, 0, 1]])
x90 = warp(x,
inverse_map=ProjectiveTransform(M).inverse,
order=1)
assert_array_almost_equal(x90, np.rot90(x))
def test_fast_homography():
img = rgb2gray(data.lena()).astype(np.uint8)
img = img[:, :100]
theta = np.deg2rad(30)
scale = 0.5
tx, ty = 50, 50
H = np.eye(3)
S = scale * np.sin(theta)
C = scale * np.cos(theta)
H[:2, :2] = [[C, -S], [S, C]]
H[:2, 2] = [tx, ty]
tform = ProjectiveTransform(H)
coords = warp_coords(tform.inverse, (img.shape[0], img.shape[1]))
for order in range(4):
for mode in ('constant', 'reflect', 'wrap', 'nearest'):
p0 = map_coordinates(img, coords, mode=mode, order=order)
p1 = warp(img, tform, mode=mode, order=order)
# import matplotlib.pyplot as plt
# f, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4)
# ax0.imshow(img)
# ax1.imshow(p0, cmap=plt.cm.gray)
# ax2.imshow(p1, cmap=plt.cm.gray)
# ax3.imshow(np.abs(p0 - p1), cmap=plt.cm.gray)
# plt.show()
d = np.mean(np.abs(p0 - p1))
assert d < 0.001
def test_rotate():
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
x90 = rotate(x, 90)
assert_array_almost_equal(x90, np.rot90(x))
def test_rotate_resize():
x = np.zeros((10, 10), dtype=np.double)
x45 = rotate(x, 45, resize=False)
assert x45.shape == (10, 10)
x45 = rotate(x, 45, resize=True)
# new dimension should be d = sqrt(2 * (10/2)^2)
assert x45.shape == (14, 14)
def test_rescale():
# same scale factor
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
scaled = rescale(x, 2, order=0)
ref = np.zeros((10, 10))
ref[2:4, 2:4] = 1
assert_array_almost_equal(scaled, ref)
# different scale factors
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
scaled = rescale(x, (2, 1), order=0)
ref = np.zeros((10, 5))
ref[2:4, 1] = 1
assert_array_almost_equal(scaled, ref)
def test_resize2d():
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
resized = resize(x, (10, 10), order=0)
ref = np.zeros((10, 10))
ref[2:4, 2:4] = 1
assert_array_almost_equal(resized, ref)
def test_resize3d_keep():
# keep 3rd dimension
x = np.zeros((5, 5, 3), dtype=np.double)
x[1, 1, :] = 1
resized = resize(x, (10, 10), order=0)
ref = np.zeros((10, 10, 3))
ref[2:4, 2:4, :] = 1
assert_array_almost_equal(resized, ref)
resized = resize(x, (10, 10, 3), order=0)
assert_array_almost_equal(resized, ref)
def test_resize3d_resize():
# resize 3rd dimension
x = np.zeros((5, 5, 3), dtype=np.double)
x[1, 1, :] = 1
resized = resize(x, (10, 10, 1), order=0)
ref = np.zeros((10, 10, 1))
ref[2:4, 2:4] = 1
assert_array_almost_equal(resized, ref)
def test_resize3d_bilinear():
# bilinear 3rd dimension
x = np.zeros((5, 5, 2), dtype=np.double)
x[1, 1, 0] = 0
x[1, 1, 1] = 1
resized = resize(x, (10, 10, 1), order=1)
ref = np.zeros((10, 10, 1))
ref[1:5, 1:5, :] = 0.03125
ref[1:5, 2:4, :] = 0.09375
ref[2:4, 1:5, :] = 0.09375
ref[2:4, 2:4, :] = 0.28125
assert_array_almost_equal(resized, ref)
def test_swirl():
image = img_as_float(data.checkerboard())
swirl_params = {'radius': 80, 'rotation': 0, 'order': 2, 'mode': 'reflect'}
swirled = tf.swirl(image, strength=10, **swirl_params)
unswirled = tf.swirl(swirled, strength=-10, **swirl_params)
assert np.mean(np.abs(image - unswirled)) < 0.01
def test_const_cval_out_of_range():
img = np.random.randn(100, 100)
cval = - 10
warped = warp(img, AffineTransform(translation=(10, 10)), cval=cval)
assert np.sum(warped == cval) == (2 * 100 * 10 - 10 * 10)
def test_warp_identity():
lena = img_as_float(rgb2gray(data.lena()))
assert len(lena.shape) == 2
assert np.allclose(lena, warp(lena, AffineTransform(rotation=0)))
assert not np.allclose(lena, warp(lena, AffineTransform(rotation=0.1)))
rgb_lena = np.transpose(np.asarray([lena, np.zeros_like(lena), lena]),
(1, 2, 0))
warped_rgb_lena = warp(rgb_lena, AffineTransform(rotation=0.1))
assert np.allclose(rgb_lena, warp(rgb_lena, AffineTransform(rotation=0)))
assert not np.allclose(rgb_lena, warped_rgb_lena)
# assert no cross-talk between bands
assert np.all(0 == warped_rgb_lena[:, :, 1])
def test_warp_coords_example():
image = data.lena().astype(np.float32)
assert 3 == image.shape[2]
tform = SimilarityTransform(translation=(0, -10))
coords = warp_coords(tform, (30, 30, 3))
map_coordinates(image[:, :, 0], coords[:2])
def test_downscale_local_mean():
image1 = np.arange(4 * 6).reshape(4, 6)
out1 = downscale_local_mean(image1, (2, 3))
expected1 = np.array([[ 4., 7.],
[ 16., 19.]])
assert_array_equal(expected1, out1)
image2 = np.arange(5 * 8).reshape(5, 8)
out2 = downscale_local_mean(image2, (4, 5))
expected2 = np.array([[ 14. , 10.8],
[ 8.5, 5.7]])
assert_array_equal(expected2, out2)
def test_invalid():
assert_raises(ValueError, warp, np.ones((4, )), SimilarityTransform())
assert_raises(ValueError, warp, np.ones((4, 3, 3, 3)),
SimilarityTransform())
def test_inverse():
tform = SimilarityTransform(scale=0.5, rotation=0.1)
inverse_tform = SimilarityTransform(matrix=np.linalg.inv(tform.params))
image = np.arange(10 * 10).reshape(10, 10).astype(np.double)
assert_array_equal(warp(image, inverse_tform), warp(image, tform.inverse))
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/IPython/core/magics/basic.py
|
3
|
21611
|
"""Implementation of basic magic functions."""
from __future__ import print_function
import io
import json
import sys
from pprint import pformat
from IPython.core import magic_arguments, page
from IPython.core.error import UsageError
from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
from IPython.utils.text import format_screen, dedent, indent
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.ipstruct import Struct
from IPython.utils.path import unquote_filename
from IPython.utils.py3compat import unicode_type
from IPython.utils.warn import warn, error
class MagicsDisplay(object):
def __init__(self, magics_manager):
self.magics_manager = magics_manager
def _lsmagic(self):
"""The main implementation of the %lsmagic"""
mesc = magic_escapes['line']
cesc = magic_escapes['cell']
mman = self.magics_manager
magics = mman.lsmagic()
out = ['Available line magics:',
mesc + (' '+mesc).join(sorted(magics['line'])),
'',
'Available cell magics:',
cesc + (' '+cesc).join(sorted(magics['cell'])),
'',
mman.auto_status()]
return '\n'.join(out)
def _repr_pretty_(self, p, cycle):
p.text(self._lsmagic())
def __str__(self):
return self._lsmagic()
def _jsonable(self):
"""turn magics dict into jsonable dict of the same structure
replaces object instances with their class names as strings
"""
magic_dict = {}
mman = self.magics_manager
magics = mman.lsmagic()
for key, subdict in magics.items():
d = {}
magic_dict[key] = d
for name, obj in subdict.items():
try:
classname = obj.__self__.__class__.__name__
except AttributeError:
classname = 'Other'
d[name] = classname
return magic_dict
def _repr_json_(self):
return self._jsonable()
@magics_class
class BasicMagics(Magics):
"""Magics that provide central IPython functionality.
These are various magics that don't fit into specific categories but that
are all part of the base 'IPython experience'."""
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-l', '--line', action='store_true',
help="""Create a line magic alias."""
)
@magic_arguments.argument(
'-c', '--cell', action='store_true',
help="""Create a cell magic alias."""
)
@magic_arguments.argument(
'name',
help="""Name of the magic to be created."""
)
@magic_arguments.argument(
'target',
help="""Name of the existing line or cell magic."""
)
@line_magic
def alias_magic(self, line=''):
"""Create an alias for an existing line or cell magic.
Examples
--------
::
In [1]: %alias_magic t timeit
Created `%t` as an alias for `%timeit`.
Created `%%t` as an alias for `%%timeit`.
In [2]: %t -n1 pass
1 loops, best of 3: 954 ns per loop
In [3]: %%t -n1
...: pass
...:
1 loops, best of 3: 954 ns per loop
In [4]: %alias_magic --cell whereami pwd
UsageError: Cell magic function `%%pwd` not found.
In [5]: %alias_magic --line whereami pwd
Created `%whereami` as an alias for `%pwd`.
In [6]: %whereami
Out[6]: u'/home/testuser'
"""
args = magic_arguments.parse_argstring(self.alias_magic, line)
shell = self.shell
mman = self.shell.magics_manager
escs = ''.join(magic_escapes.values())
target = args.target.lstrip(escs)
name = args.name.lstrip(escs)
# Find the requested magics.
m_line = shell.find_magic(target, 'line')
m_cell = shell.find_magic(target, 'cell')
if args.line and m_line is None:
raise UsageError('Line magic function `%s%s` not found.' %
(magic_escapes['line'], target))
if args.cell and m_cell is None:
raise UsageError('Cell magic function `%s%s` not found.' %
(magic_escapes['cell'], target))
# If --line and --cell are not specified, default to the ones
# that are available.
if not args.line and not args.cell:
if not m_line and not m_cell:
raise UsageError(
'No line or cell magic with name `%s` found.' % target
)
args.line = bool(m_line)
args.cell = bool(m_cell)
if args.line:
mman.register_alias(name, target, 'line')
print('Created `%s%s` as an alias for `%s%s`.' % (
magic_escapes['line'], name,
magic_escapes['line'], target))
if args.cell:
mman.register_alias(name, target, 'cell')
print('Created `%s%s` as an alias for `%s%s`.' % (
magic_escapes['cell'], name,
magic_escapes['cell'], target))
@line_magic
def lsmagic(self, parameter_s=''):
"""List currently available magic functions."""
return MagicsDisplay(self.shell.magics_manager)
def _magic_docs(self, brief=False, rest=False):
"""Return docstrings from magic functions."""
mman = self.shell.magics_manager
docs = mman.lsmagic_docs(brief, missing='No documentation')
if rest:
format_string = '**%s%s**::\n\n%s\n\n'
else:
format_string = '%s%s:\n%s\n'
return ''.join(
[format_string % (magic_escapes['line'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['line'].items())]
+
[format_string % (magic_escapes['cell'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['cell'].items())]
)
@line_magic
def magic(self, parameter_s=''):
"""Print information about the magic function system.
Supported formats: -latex, -brief, -rest
"""
mode = ''
try:
mode = parameter_s.split()[0][1:]
if mode == 'rest':
rest_docs = []
except IndexError:
pass
brief = (mode == 'brief')
rest = (mode == 'rest')
magic_docs = self._magic_docs(brief, rest)
if mode == 'latex':
print(self.format_latex(magic_docs))
return
else:
magic_docs = format_screen(magic_docs)
out = ["""
IPython's 'magic' functions
===========================
The magic function system provides a series of functions which allow you to
control the behavior of IPython itself, plus a lot of system-type
features. There are two kinds of magics, line-oriented and cell-oriented.
Line magics are prefixed with the % character and work much like OS
command-line calls: they get as an argument the rest of the line, where
arguments are passed without parentheses or quotes. For example, this will
time the given statement::
%timeit range(1000)
Cell magics are prefixed with a double %%, and they are functions that get as
an argument not only the rest of the line, but also the lines below it in a
separate argument. These magics are called with two arguments: the rest of the
call line and the body of the cell, consisting of the lines below the first.
For example::
%%timeit x = numpy.random.randn((100, 100))
numpy.linalg.svd(x)
will time the execution of the numpy svd routine, running the assignment of x
as part of the setup phase, which is not timed.
In a line-oriented client (the terminal or Qt console IPython), starting a new
input with %% will automatically enter cell mode, and IPython will continue
reading input until a blank line is given. In the notebook, simply type the
whole cell as one entity, but keep in mind that the %% escape can only be at
the very start of the cell.
NOTE: If you have 'automagic' enabled (via the command line option or with the
%automagic function), you don't need to type in the % explicitly for line
magics; cell magics always require an explicit '%%' escape. By default,
IPython ships with automagic on, so you should only rarely need the % escape.
Example: typing '%cd mydir' (without the quotes) changes you working directory
to 'mydir', if it exists.
For a list of the available magic functions, use %lsmagic. For a description
of any of them, type %magic_name?, e.g. '%cd?'.
Currently the magic system has the following functions:""",
magic_docs,
"Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
str(self.lsmagic()),
]
page.page('\n'.join(out))
@line_magic
def page(self, parameter_s=''):
"""Pretty print the object and display it through a pager.
%page [options] OBJECT
If no object is given, use _ (last output).
Options:
-r: page str(object), don't pretty-print it."""
# After a function contributed by Olivier Aubert, slightly modified.
# Process options/args
opts, args = self.parse_options(parameter_s, 'r')
raw = 'r' in opts
oname = args and args or '_'
info = self.shell._ofind(oname)
if info['found']:
txt = (raw and str or pformat)( info['obj'] )
page.page(txt)
else:
print('Object `%s` not found' % oname)
@line_magic
def profile(self, parameter_s=''):
"""Print your currently active IPython profile.
See Also
--------
prun : run code using the Python profiler
(:meth:`~IPython.core.magics.execution.ExecutionMagics.prun`)
"""
warn("%profile is now deprecated. Please use get_ipython().profile instead.")
from IPython.core.application import BaseIPythonApplication
if BaseIPythonApplication.initialized():
print(BaseIPythonApplication.instance().profile)
else:
error("profile is an application-level value, but you don't appear to be in an IPython application")
@line_magic
def pprint(self, parameter_s=''):
"""Toggle pretty printing on/off."""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.pprint = bool(1 - ptformatter.pprint)
print('Pretty printing has been turned',
['OFF','ON'][ptformatter.pprint])
@line_magic
def colors(self, parameter_s=''):
"""Switch color scheme for prompts, info system and exception handlers.
Currently implemented schemes: NoColor, Linux, LightBG.
Color scheme names are not case-sensitive.
Examples
--------
To get a plain black and white terminal::
%colors nocolor
"""
def color_switch_err(name):
warn('Error changing %s color schemes.\n%s' %
(name, sys.exc_info()[1]))
new_scheme = parameter_s.strip()
if not new_scheme:
raise UsageError(
"%colors: you must specify a color scheme. See '%colors?'")
# local shortcut
shell = self.shell
import IPython.utils.rlineimpl as readline
if not shell.colors_force and \
not readline.have_readline and \
(sys.platform == "win32" or sys.platform == "cli"):
msg = """\
Proper color support under MS Windows requires the pyreadline library.
You can find it at:
http://ipython.org/pyreadline.html
Defaulting color scheme to 'NoColor'"""
new_scheme = 'NoColor'
warn(msg)
# readline option is 0
if not shell.colors_force and not shell.has_readline:
new_scheme = 'NoColor'
# Set prompt colors
try:
shell.prompt_manager.color_scheme = new_scheme
except:
color_switch_err('prompt')
else:
shell.colors = \
shell.prompt_manager.color_scheme_table.active_scheme_name
# Set exception colors
try:
shell.InteractiveTB.set_colors(scheme = new_scheme)
shell.SyntaxTB.set_colors(scheme = new_scheme)
except:
color_switch_err('exception')
# Set info (for 'object?') colors
if shell.color_info:
try:
shell.inspector.set_active_scheme(new_scheme)
except:
color_switch_err('object inspector')
else:
shell.inspector.set_active_scheme('NoColor')
@line_magic
def xmode(self, parameter_s=''):
"""Switch modes for the exception handlers.
Valid modes: Plain, Context and Verbose.
If called without arguments, acts as a toggle."""
def xmode_switch_err(name):
warn('Error changing %s exception modes.\n%s' %
(name,sys.exc_info()[1]))
shell = self.shell
new_mode = parameter_s.strip().capitalize()
try:
shell.InteractiveTB.set_mode(mode=new_mode)
print('Exception reporting mode:',shell.InteractiveTB.mode)
except:
xmode_switch_err('user')
@line_magic
def quickref(self,arg):
""" Show a quick reference sheet """
from IPython.core.usage import quick_reference
qr = quick_reference + self._magic_docs(brief=True)
page.page(qr)
@line_magic
def doctest_mode(self, parameter_s=''):
"""Toggle doctest mode on and off.
This mode is intended to make IPython behave as much as possible like a
plain Python shell, from the perspective of how its prompts, exceptions
and output look. This makes it easy to copy and paste parts of a
session into doctests. It does so by:
- Changing the prompts to the classic ``>>>`` ones.
- Changing the exception reporting mode to 'Plain'.
- Disabling pretty-printing of output.
Note that IPython also supports the pasting of code snippets that have
leading '>>>' and '...' prompts in them. This means that you can paste
doctests from files or docstrings (even if they have leading
whitespace), and the code will execute correctly. You can then use
'%history -t' to see the translated history; this will give you the
input after removal of all the leading prompts and whitespace, which
can be pasted back into an editor.
With these features, you can switch into this mode easily whenever you
need to do testing and changes to doctests, without having to leave
your existing IPython session.
"""
# Shorthands
shell = self.shell
pm = shell.prompt_manager
meta = shell.meta
disp_formatter = self.shell.display_formatter
ptformatter = disp_formatter.formatters['text/plain']
# dstore is a data store kept in the instance metadata bag to track any
# changes we make, so we can undo them later.
dstore = meta.setdefault('doctest_mode',Struct())
save_dstore = dstore.setdefault
# save a few values we'll need to recover later
mode = save_dstore('mode',False)
save_dstore('rc_pprint',ptformatter.pprint)
save_dstore('xmode',shell.InteractiveTB.mode)
save_dstore('rc_separate_out',shell.separate_out)
save_dstore('rc_separate_out2',shell.separate_out2)
save_dstore('rc_prompts_pad_left',pm.justify)
save_dstore('rc_separate_in',shell.separate_in)
save_dstore('rc_active_types',disp_formatter.active_types)
save_dstore('prompt_templates',(pm.in_template, pm.in2_template, pm.out_template))
if mode == False:
# turn on
pm.in_template = '>>> '
pm.in2_template = '... '
pm.out_template = ''
# Prompt separators like plain python
shell.separate_in = ''
shell.separate_out = ''
shell.separate_out2 = ''
pm.justify = False
ptformatter.pprint = False
disp_formatter.active_types = ['text/plain']
shell.magic('xmode Plain')
else:
# turn off
pm.in_template, pm.in2_template, pm.out_template = dstore.prompt_templates
shell.separate_in = dstore.rc_separate_in
shell.separate_out = dstore.rc_separate_out
shell.separate_out2 = dstore.rc_separate_out2
pm.justify = dstore.rc_prompts_pad_left
ptformatter.pprint = dstore.rc_pprint
disp_formatter.active_types = dstore.rc_active_types
shell.magic('xmode ' + dstore.xmode)
# Store new mode and inform
dstore.mode = bool(1-int(mode))
mode_label = ['OFF','ON'][dstore.mode]
print('Doctest mode is:', mode_label)
@line_magic
def gui(self, parameter_s=''):
"""Enable or disable IPython GUI event loop integration.
%gui [GUINAME]
This magic replaces IPython's threaded shells that were activated
using the (pylab/wthread/etc.) command line flags. GUI toolkits
can now be enabled at runtime and keyboard
interrupts should work without any problems. The following toolkits
are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
%gui wx # enable wxPython event loop integration
%gui qt4|qt # enable PyQt4 event loop integration
%gui qt5 # enable PyQt5 event loop integration
%gui gtk # enable PyGTK event loop integration
%gui gtk3 # enable Gtk3 event loop integration
%gui tk # enable Tk event loop integration
%gui osx # enable Cocoa event loop integration
# (requires %matplotlib 1.1)
%gui # disable all event loop integration
WARNING: after any of these has been called you can simply create
an application object, but DO NOT start the event loop yourself, as
we have already handled that.
"""
opts, arg = self.parse_options(parameter_s, '')
if arg=='': arg = None
try:
return self.shell.enable_gui(arg)
except Exception as e:
# print simple error message, rather than traceback if we can't
# hook up the GUI
error(str(e))
@skip_doctest
@line_magic
def precision(self, s=''):
"""Set floating point precision for pretty printing.
Can set either integer precision or a format string.
If numpy has been imported and precision is an int,
numpy display precision will also be set, via ``numpy.set_printoptions``.
If no argument is given, defaults will be restored.
Examples
--------
::
In [1]: from math import pi
In [2]: %precision 3
Out[2]: u'%.3f'
In [3]: pi
Out[3]: 3.142
In [4]: %precision %i
Out[4]: u'%i'
In [5]: pi
Out[5]: 3
In [6]: %precision %e
Out[6]: u'%e'
In [7]: pi**10
Out[7]: 9.364805e+04
In [8]: %precision
Out[8]: u'%r'
In [9]: pi**10
Out[9]: 93648.047476082982
"""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.float_precision = s
return ptformatter.float_format
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-e', '--export', action='store_true', default=False,
help='Export IPython history as a notebook. The filename argument '
'is used to specify the notebook name and format. For example '
'a filename of notebook.ipynb will result in a notebook name '
'of "notebook" and a format of "json". Likewise using a ".py" '
'file extension will write the notebook as a Python script'
)
@magic_arguments.argument(
'filename', type=unicode_type,
help='Notebook name or filename'
)
@line_magic
def notebook(self, s):
"""Export and convert IPython notebooks.
This function can export the current IPython history to a notebook file.
For example, to export the history to "foo.ipynb" do "%notebook -e foo.ipynb".
To export the history to "foo.py" do "%notebook -e foo.py".
"""
args = magic_arguments.parse_argstring(self.notebook, s)
from nbformat import write, v4
args.filename = unquote_filename(args.filename)
if args.export:
cells = []
hist = list(self.shell.history_manager.get_range())
if(len(hist)<=1):
raise ValueError('History is empty, cannot export')
for session, execution_count, source in hist[:-1]:
cells.append(v4.new_code_cell(
execution_count=execution_count,
source=source
))
nb = v4.new_notebook(cells=cells)
with io.open(args.filename, 'w', encoding='utf-8') as f:
write(nb, f, version=4)
|
artistic-2.0
|
lancezlin/pylearn2
|
pylearn2/utils/image.py
|
39
|
18841
|
"""
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
|
bsd-3-clause
|
DailyActie/Surrogate-Model
|
01-codes/pyKriging-master/pyKriging/CrossValidation.py
|
1
|
10132
|
"""
@author: Giorgos
"""
import random
import scipy.stats as stats
from matplotlib import pyplot as plt
from pyKriging.krige import kriging
from pyKriging.utilities import *
class Cross_Validation():
def __init__(self, model, name=None):
"""
X- sampling plane
y- Objective function evaluations
name- the name of the model
"""
self.model = model
self.X = self.model.X
self.y = self.model.y
self.n, self.k = np.shape(self.X)
self.predict_list, self.predict_varr, self.scvr = [], [], []
self.name = name
def calculate_RMSE_Rsquared(self, optimiser, nt):
"""
this function calculates the root mean squared error of the interpola-
ted model for a sample of nt test data
Input:
optimiser- optimiser to be used
nt- the size of the sample test data
Output:
RMSE- the root mean squared error of nt sampling points
Rsquared- the correlation coefficient
"""
yi_p, yi, yi_dif, yiyi_p, yiyi, yi_pyi_p = [], [], [], [], [], []
Sample = random.sample([i for i in range(len(self.X))], nt)
Model = kriging(self.X, self.y, name='%s' % self.name)
Model.train(optimiser)
for i, j in enumerate(Sample):
yi_p.append(Model.predict(self.X[j]))
yi.append(self.y[j])
yi_dif.append(yi[i] - yi_p[i])
yiyi_p.append(yi[i] * yi_p[i])
yiyi.append(yi[i] * yi[i])
yi_pyi_p.append(yi_p[i] * yi_p[i])
RMSE = np.sqrt((sum(yi_dif) ** 2.) / float(nt))
Rsquared = ((float(nt) * sum(yiyi_p) - sum(yi) * sum(yi_p)) /
(np.sqrt((float(nt) * sum(yiyi) - sum(yi) ** 2.) *
(float(nt) * sum(yi_pyi_p) - sum(yi_p) ** 2.)))) ** 2.
return ['RMSE = %f' % RMSE, 'Rsquared = %f' % Rsquared]
def calculate_SCVR(self, optimiser='pso', plot=0):
"""
this function calculates the standardised cross-validated residual
(SCVR)
value for each sampling point.
Return an nx1 array with the SCVR value of each sampling point. If plot
is 1, then plot scvr vs doe and y_pred vs y.
Input:
optimiser- optimiser to be used
plot- if 1 plots scvr vs doe and y_pred vs y
Output:
predict_list- list with different interpolated kriging models
excluding
each time one point of the sampling plan
predict_varr- list with the square root of the posterior variance
scvr- the scvr as proposed by Jones et al. (Journal of global
optimisation, 13: 455-492, 1998)
"""
y_normalised = (self.y - np.min(self.y)) / (np.max(self.y) -
np.min(self.y))
y_ = np.copy(self.y)
Kriging_models_i, list_arrays, list_ys, train_list = [], [], [], []
for i in range(self.n):
exclude_value = [i]
idx = list(set(range(self.n)) - set(exclude_value))
list_arrays.append(self.X[idx])
list_ys.append(y_[idx])
Kriging_models_i.append(kriging(list_arrays[i], list_ys[i],
name='%s' % self.name))
train_list.append(Kriging_models_i[i].train(optimizer=optimiser))
self.predict_list.append(Kriging_models_i[i].predict(self.X[i]))
self.predict_varr.append(Kriging_models_i[i].predict_var(
self.X[i]))
self.scvr.append((y_normalised[i] - Kriging_models_i[i].normy(
self.predict_list[i])) /
self.predict_varr[i][0, 0])
if plot == 0:
return self.predict_list, self.predict_varr, self.scvr
elif plot == 1:
fig = plt.figure(figsize=(12, 8), facecolor='w', edgecolor='k',
linewidth=2.0, frameon=True)
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter([i for i in range(1, self.n + 1)], self.scvr, alpha=0.5,
edgecolor='black', facecolor='b', linewidth=2.)
ax1.plot([i for i in range(0, self.n + 3)], [3] * (self.n + 3), 'r')
ax1.plot([i for i in range(0, self.n + 3)], [-3] * (self.n + 3), 'r')
ax1.set_xlim(0, self.n + 2)
ax1.set_ylim(-4, 4)
ax1.set_xlabel('DoE individual')
ax1.set_ylabel('SCVR')
ax2 = fig.add_subplot(1, 2, 2)
ax2.scatter(self.predict_list, self.y, alpha=0.5,
edgecolor='black', facecolor='b', linewidth=2.)
if np.max(self.y) > 0:
ax2.set_ylim(0, np.max(self.y) + 0.00001)
ax2.set_xlim(0, max(self.predict_list) + 0.00001)
else:
ax2.set_ylim(0, np.min(self.y) - 0.00001)
ax2.set_xlim(0, min(self.predict_list) - 0.00001)
ax2.plot(ax2.get_xlim(), ax2.get_ylim(), ls="-", c=".3")
ax2.set_xlabel('predicted y')
ax2.set_ylabel('y')
plt.show()
return self.predict_list, self.predict_varr, self.scvr
else:
raise ValueError('value for plot should be either 0 or 1')
def calculate_transformed_SCVR(self, transformation, optimiser='pso',
plot=0):
"""
this function calculates the transformed standardised cross-validated
residual (SCVR) value for each sampling point. This helps to improve
the model.
Return an nx1 array with the SCVR value of each sampling point. If plot
is 1, then plot scvr vs doe and y_pred vs y.
Input:
optimiser- optimiser to be used
plot- if 1 plots scvr vs doe and y_pred vs y
transformation- the tranformation of the objective function
(logarithmic or inverse)
Output:
predict_list- list with different interpolated kriging models
excluding
each time one point of the sampling plan
predict_varr- list with the square root of the posterior variance
scvr- the scvr as proposed by Jones et al. (Journal of global
optimisation, 13: 455-492, 1998)
"""
y_ = np.copy(self.y)
if transformation == 'logarithmic':
y_ = np.log(y_)
elif transformation == 'inverse':
y_ = -(1.0 / y_)
y_normalised = (y_ - np.min(y_)) / (np.max(y_) -
np.min(y_))
Kriging_models_i, list_arrays, list_ys, train_list = [], [], [], []
for i in range(self.n):
exclude_value = [i]
idx = list(set(range(self.n)) - set(exclude_value))
list_arrays.append(self.X[idx])
list_ys.append(y_[idx])
Kriging_models_i.append(kriging(list_arrays[i], list_ys[i],
name='%s' % self.name))
train_list.append(Kriging_models_i[i].train(optimizer=optimiser))
self.predict_list.append(Kriging_models_i[i].predict(self.X[i]))
self.predict_varr.append(Kriging_models_i[i].predict_var(
self.X[i]))
self.scvr.append((y_normalised[i] - Kriging_models_i[i].normy(
self.predict_list[i])) /
self.predict_varr[i][0, 0])
if plot == 0:
return self.predict_list, self.predict_varr, self.scvr
elif plot == 1:
fig = plt.figure(figsize=(12, 8), facecolor='w', edgecolor='k',
linewidth=2.0, frameon=True)
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter([i for i in range(1, self.n + 1)], self.scvr, alpha=0.5,
edgecolor='black', facecolor='b', linewidth=2.)
ax1.plot([i for i in range(0, self.n + 3)], [3] * (self.n + 3), 'r')
ax1.plot([i for i in range(0, self.n + 3)], [-3] * (self.n + 3), 'r')
ax1.set_xlim(0, self.n + 2)
ax1.set_ylim(-4, 4)
ax1.set_xlabel('DoE individual')
ax1.set_ylabel('SCVR')
ax2 = fig.add_subplot(1, 2, 2)
ax2.scatter(self.predict_list, y_, alpha=0.5,
edgecolor='black', facecolor='b', linewidth=2.)
if np.max(y_) > 0:
ax2.set_ylim(0, np.max(y_) + 0.00001)
ax2.set_xlim(0, max(self.predict_list) + 0.00001)
else:
ax2.set_ylim(0, np.min(y_) - 0.00001)
ax2.set_xlim(0, min(self.predict_list) - 0.00001)
ax2.plot(ax2.get_xlim(), ax2.get_ylim(), ls="-", c=".3")
ax2.set_xlabel('predicted %s' % 'ln(y)' if transformation ==
'logarithmic' else '-1/y')
ax2.set_ylabel('predicted %s' % 'ln(y)' if transformation ==
'logarithmic' else '-1/y')
plt.show()
return self.predict_list, self.predict_varr, self.scvr
else:
raise ValueError('value for plot should be either 0 or 1')
def QQ_plot(self):
"""
returns the QQ-plot with normal distribution
"""
plt.figure(figsize=(12, 8), facecolor='w', edgecolor='k',
linewidth=2.0, frameon=True)
stats.probplot(self.scvr, dist="norm", plot=plt)
plt.xlabel('SCVR')
plt.ylabel('Standard quantile')
plt.show()
def leave_n_out(self, q=5):
'''
:param q: the numer of groups to split the model data inot
:return:
'''
mseArray = []
for i in splitArrays(self.model, 5):
testk = kriging(i[0], i[1])
testk.train()
for j in range(len(i[2])):
mseArray.append(mse(i[3][j], testk.predict(i[2][j])))
del (testk)
return np.average(mseArray), np.std(mseArray)
## Example Use Case:
|
mit
|
MoamerEncsConcordiaCa/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/linear_test.py
|
17
|
69086
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
WafaaT/spark-tk
|
regression-tests/sparktkregtests/testcases/frames/cumulative_tally_test.py
|
13
|
5020
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test cumulative tally functions, hand calculated baselines"""
import unittest
from sparktkregtests.lib import sparktk_test
class TestCumulativeTally(sparktk_test.SparkTKTestCase):
def setUp(self):
super(TestCumulativeTally, self).setUp()
data_tally = self.get_file("cumu_tally_seq.csv")
schema_tally = [("sequence", int),
("user_id", int),
("vertex_type", str),
("movie_id", int),
("rating", int),
("splits", str),
("count", int),
("percent_count", float)]
self.tally_frame = self.context.frame.import_csv(data_tally,
schema=schema_tally)
def test_tally_and_tally_percent(self):
"""Test tally and tally percent"""
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
pd_frame = self.tally_frame.to_pandas(self.tally_frame.count())
for index, row in pd_frame.iterrows():
self.assertAlmostEqual(
row['percent_count'], row['rating_tally_percent'], delta=.0001)
self.assertEqual(row['count'], row['rating_tally'])
def test_tally_colname_collision(self):
"""Test tally column names collide gracefully"""
# repeatedly run tally to force collisions
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
columns = [u'sequence',
u'user_id',
u'vertex_type',
u'movie_id',
u'rating',
u'splits',
u'count',
u'percent_count',
u'rating_tally',
u'rating_tally_percent',
u'rating_tally_0',
u'rating_tally_percent_0',
u'rating_tally_1',
u'rating_tally_percent_1']
self.assertItemsEqual(self.tally_frame.column_names, columns)
def test_tally_no_column(self):
"""Test errors on non-existant column"""
with self.assertRaisesRegexp(Exception, "Invalid column name"):
self.tally_frame.tally("no_such_column", '5')
def test_tally_no_column_percent(self):
with self.assertRaisesRegexp(Exception, "Invalid column name"):
self.tally_frame.tally_percent("no_such_column", '5')
def test_tally_none(self):
"""Test tally none column errors"""
with self.assertRaisesRegexp(Exception,
"column name for sample is required"):
self.tally_frame.tally(None, '5')
def test_tally_none_percent(self):
with self.assertRaisesRegexp(Exception,
"column name for sample is required"):
self.tally_frame.tally_percent(None, '5')
def test_tally_bad_type(self):
"""Test tally on incorrect type errors"""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.tally_frame.tally("rating", 5)
def test_tally_bad_type_percent(self):
with self.assertRaisesRegexp(Exception, "does not exist"):
self.tally_frame.tally_percent("rating", 5)
def test_tally_value_none(self):
"""Test tally on none errors"""
with self.assertRaisesRegexp(Exception,
"count value for the sample is required"):
self.tally_frame.tally("rating", None)
def test_tally_value_none_percent(self):
with self.assertRaisesRegexp(Exception,
"count value for the sample is required"):
self.tally_frame.tally_percent("rating", None)
def test_tally_no_element(self):
"""Test tallying on non-present element is correct"""
self.tally_frame.tally_percent("rating", "12")
local_frame = self.tally_frame.to_pandas(self.tally_frame.count())
for index, row in local_frame.iterrows():
self.assertEqual(row["rating_tally_percent"], 1.0)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
jmbeuken/abinit
|
tests/pymods/memprof.py
|
1
|
12121
|
from __future__ import print_function, division, unicode_literals
from pprint import pprint
from itertools import groupby
from functools import wraps
from collections import namedtuple, deque
# OrderedDict was added in 2.7. ibm6 still uses python2.6
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
def group_entries_bylocus(entries):
d = {}
for e in entries:
if e.locus not in d:
d[e.locus] = [e]
else:
d[e.locus].append(e)
return d
class Entry(namedtuple("Entry", "vname, ptr, action, size, file, func, line, tot_memory, sidx")):
@classmethod
def from_line(cls, line, sidx):
args = line.split()
args.append(sidx)
return cls(*args)
def __new__(cls, *args):
"""Extends the base class adding type conversion of arguments."""
# write(logunt,'(a,t60,a,1x,2(i0,1x),2(a,1x),2(i0,1x))')&
# trim(vname), trim(act), addr, isize, trim(basename(file)), trim(func), line, memtot_abi%memory
return super(cls, Entry).__new__(cls,
vname=args[0],
action=args[1],
ptr=int(args[2]),
size=int(args[3]),
file=args[4],
func=args[5],
line=int(args[6]),
tot_memory=int(args[7]),
sidx=args[8],
)
def __repr__(self):
return self.as_repr(with_addr=True)
def as_repr(self, with_addr=True):
if with_addr:
return "<var=%s, %s@%s:%s:%s, addr=%s, size=%d, idx=%d>" % (
self.vname, self.action, self.file, self.func, self.line, hex(self.ptr), self.size, self.sidx)
else:
return "<var=%s, %s@%s:%s:%s, size=%d, idx=%d>" % (
self.vname, self.action, self.file, self.func, self.line, self.size, self.sidx)
@property
def basename(self):
return self.vname.split("%")[-1]
@property
def isalloc(self):
"""True if entry represents an allocation."""
return self.action == "A"
@property
def isfree(self):
"""True if entry represents a deallocation."""
return self.action == "D"
@property
def iszerosized(self):
"""True if this is a zero-sized alloc/free."""
return self.size == 0
@property
def locus(self):
"""This is almost unique"""
return self.func + "@" + self.file
def frees_onheap(self, other):
if (not self.isfree) or other.isalloc: return False
if self.size + other.size != 0: return False
return True
def frees_onstack(self, other):
if (not self.isfree) or other.isalloc: return False
if self.size + other.size != 0: return False
if self.locus != other.locus: return False
return True
class Heap(dict):
def show(self):
print("=== HEAP OF LEN %s ===" % len(self))
if not self: return
# for p, elist in self.items():
pprint(self, indent=4)
print("")
def pop_alloc(self, entry):
if not entry.isfree: return 0
elist = self.get[entry.ptr]
if elist is None: return 0
for i, olde in elist:
if entry.size + olde.size != 0:
elist.pop(i)
return 1
return 0
class Stack(dict):
def show(self):
print("=== STACK OF LEN %s ===)" % len(self))
if not self: return
pprint(self)
print("")
def catchall(method):
@wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
try:
return method(*args, **kwargs)
except Exception as exc:
# Add info on file and re-raise.
msg = "Exception while parsing file: %s\n" % self.path
raise exc.__class__(msg + str(exc))
return wrapper
class AbimemParser(object):
def __init__(self, path):
self.path = path
#def __str__(self):
# lines = []
# app = lines.append
# return "\n".join(lines)
@catchall
def summarize(self):
with open(self.path, "rt") as fh:
l = fh.read()
print(l)
@catchall
def find_small_allocs(self, nbytes=160):
"""Zero sized allocations are not counted."""
smalles = []
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
e = Entry.from_line(line, lineno)
if not e.isalloc: continue
if 0 < e.size <= nbytes: smalles.append(e)
pprint(smalles)
return smalles
@catchall
def find_intensive(self, threshold=2000):
d = {}
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
e = Entry.from_line(line, lineno)
loc = e.locus
if loc not in d:
d[loc] = [e]
else:
d[loc].append(e)
# Remove entries below the threshold and perform DSU sort
dsu_list = [(elist, len(elist)) for _, elist in d.items() if len(elist) >= threshold]
intensive = [t[0] for t in sorted(dsu_list, key=lambda x: x[1], reverse=True)]
for elist in intensive:
loc = elist[0].locus
# assert all(e.locus == loc for e in elist)
print("[%s] has %s allocations/frees" % (loc, len(elist)))
return intensive
#def show_peaks(self):
@catchall
def find_zerosized(self):
elist = []
eapp = elist.append
for e in self.yield_all_entries():
if e.size == 0: eapp(e)
if elist:
print("Found %d zero-sized entries:" % len(elist))
pprint(elist)
else:
print("No zero-sized found")
return elist
@catchall
def find_weird_ptrs(self):
elist = []
eapp = elist.append
for e in self.yield_all_entries():
if e.ptr <= 0: eapp(e)
if elist:
print("Found %d weird entries:" % len(elist))
pprint(elist)
else:
print("No weird entries found")
return elist
def yield_all_entries(self):
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
yield Entry.from_line(line, lineno)
@catchall
def find_peaks(self, maxlen=20):
# the deque is bounded to the specified maximum length. Once a bounded length deque is full,
# when new items are added, a corresponding number of items are discarded from the opposite end.
peaks = deque(maxlen=maxlen)
for e in self.yield_all_entries():
size = e.size
if size == 0 or not e.isalloc: continue
if len(peaks) == 0:
peaks.append(e); continue
# TODO: Should remove redondant entries.
if size > peaks[0].size:
peaks.append(e)
peaks = deque(sorted(peaks, key=lambda x: x.size), maxlen=maxlen)
peaks = deque(sorted(peaks, key=lambda x: x.size, reverse=True), maxlen=maxlen)
for peak in peaks:
print(peak)
return peaks
@catchall
def plot_memory_usage(self, show=True):
memory = [e.tot_memory for e in self.yield_all_entries()]
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(memory)
if show: plt.show()
return fig
#def get_dataframe(self):
# import pandas as pd
# frame = pd.DataFrame()
# return frame
@catchall
def find_memleaks(self):
heap, stack = Heap(), Stack()
reallocs = []
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
newe = Entry.from_line(line, lineno)
p = newe.ptr
if newe.size == 0: continue
# Store new entry in list if the ptr is not in d
# else we check if there's an allocation that matches a previous allocation
# (zero-sized arrays are not included)
# else there's a possible memory leak or some undected problems.
if p not in heap:
if newe.isalloc:
heap[p] = [newe]
else:
# Likely reallocation
reallocs.append(newe)
else:
if newe.isfree and len(heap[p]) == 1 and heap[p][0].size + newe.size == 0:
heap.pop(p)
else:
# In principle this should never happen but there are exceptions:
#
# 1) The compiler could decide to put the allocatable on the stack
# In this case the ptr reported by gfortran is 0.
#
# 2) The allocatable variable is "reallocated" by the compiler (F2003).
# Example:
#
# allocate(foo(2,1)) ! p0 = &foo
# foo = reshape([0,0], [2,1]) ! p1 = &foo. Reallocation of the LHS.
# ! Use foo(:) to avoid that
# deallocate(foo) ! p2 = &foo
#
# In this case, p2 != p0
#print("WARN:", newe.ptr, newe, "ptr already on the heap")
#print("HEAP:", heap[newe.ptr])
locus = newe.locus
if locus not in stack:
stack[locus] = [newe]
else:
#if newe.ptr != 0: print(newe)
stack_loc = stack[locus]
ifind = -1
for i, olde in enumerate(stack_loc):
if newe.frees_onstack(olde):
ifind = i
break
if ifind != -1:
stack_loc.pop(ifind)
#else:
# print(newe)
#if p == 0:
# stack[p] = newe
#else:
# print("varname", newe.vname, "in heap with size ",newe.size)
# for weirde in heap[p]:
# print("\tweird entry:", weirde)
# heap[p].append(newe)
if False and heap:
# Possible memory leaks.
count = -1
keyfunc = lambda e: abs(e.size)
for a, entries in heap.items():
count += 1
entries = [e for e in entries if e.size != 0]
entries = sorted(entries, key=keyfunc)
#if any(int(e.size) != 0 for e in l):
#msizes = []
for key, group in groupby(entries, keyfunc):
group = list(group)
#print([e.name for e in g])
pos_size = [e for e in group if e.size >0]
neg_size = [e for e in group if e.size <0]
if len(pos_size) != len(neg_size):
print("key", key)
for e in group:
print(e)
#print(list(g))
#for i, e in enumerate(entries):
# print("\t[%d]" % i, e)
#print("Count=%d" % count, 60 * "=")
if heap: heap.show()
if stack: stack.show()
if reallocs:
print("Possible reallocations:")
pprint(reallocs)
return len(heap) + len(stack) + len(reallocs)
|
gpl-3.0
|
DonBeo/scikit-learn
|
sklearn/neighbors/tests/test_neighbors.py
|
13
|
40881
|
from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
wazeerzulfikar/scikit-learn
|
examples/neighbors/plot_lof.py
|
23
|
2013
|
"""
=================================================
Anomaly detection with Local Outlier Factor (LOF)
=================================================
This example presents the Local Outlier Factor (LOF) estimator. The LOF
algorithm is an unsupervised outlier detection method which computes the local
density deviation of a given data point with respect to its neighbors.
It considers as outlier samples that have a substantially lower density than
their neighbors.
The number of neighbors considered, (parameter n_neighbors) is typically
chosen 1) greater than the minimum number of objects a cluster has to contain,
so that other objects can be local outliers relative to this cluster, and 2)
smaller than the maximum number of close by objects that can potentially be
local outliers.
In practice, such informations are generally not available, and taking
n_neighbors=20 appears to work well in general.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
np.random.seed(42)
# Generate train data
X = 0.3 * np.random.randn(100, 2)
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X + 2, X - 2, X_outliers]
# fit the model
clf = LocalOutlierFactor(n_neighbors=20)
y_pred = clf.fit_predict(X)
y_pred_outliers = y_pred[200:]
# plot the level sets of the decision function
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Local Outlier Factor (LOF)")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
a = plt.scatter(X[:200, 0], X[:200, 1], c='white',
edgecolor='k', s=20)
b = plt.scatter(X[200:, 0], X[200:, 1], c='red',
edgecolor='k', s=20)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a, b],
["normal observations",
"abnormal observations"],
loc="upper left")
plt.show()
|
bsd-3-clause
|
xiedidan/luna-network
|
vnet/VnetDataLayer.py
|
1
|
7844
|
# -*- coding:utf-8 -*-
import sys
sys.path.append("../luna-data-pre-processing")
import NoduleCropper
import NoduleSerializer
from Plotter import Plotter
import os
from glob import glob
import multiprocessing
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib
import caffe
class VnetDataLayer(caffe.Layer):
def setup(self, bottom, top):
self.top_names = ["data", "label"]
params = eval(self.param_str)
# check_params(params)
self.batch_size = params["batch_size"]
self.vol_size = params["vol_size"]
self.batch_loader = BatchLoader(params)
top[0].reshape(self.batch_size, 1, self.vol_size, self.vol_size, self.vol_size)
# top[1].reshape(self.batch_size, 1, int(round(self.vol_size / 2)), int(round(self.vol_size / 2)), int(round(self.vol_size / 2)))
top[1].reshape(self.batch_size, 1, self.vol_size, self.vol_size, self.vol_size)
def forward(self, bottom, top):
for i in range(self.batch_size):
image, groundTruth = self.batch_loader.load()
top[0].data[i, 0, ...] = image
top[1].data[i, 0, ...] = groundTruth
def reshape(self, bottom, top):
pass
def backward(self, bottom, top):
pass
class BatchLoader(object):
def __init__(self, params):
self.batchSize = params["batch_size"]
self.volSize = params["vol_size"]
self.iterationCount = params["iter_count"]
self.queueSize = params["queue_size"]
self.shiftRatio = params["shift_ratio"]
self.rotateRatio = params["rotate_ratio"]
self.histogramShiftRatio = params["histogram_shift_ratio"]
self.dataPath = params["data_path"]
self.netPath = params["net_path"]
self.phase = params["phase"]
self.phaseSubPath = self.phase + "/"
self.dataQueue = multiprocessing.Queue(self.queueSize)
self.plotter = Plotter()
self.plotter.initDataAndLabel2D(interval = 20)
if self.phase == "deploy":
# scan
pass
else:
# load all nodules and groundTruths
dataProcess = multiprocessing.Process(target = self.dataProcessor, args = (self.dataQueue,))
dataProcess.daemon = True
dataProcess.start()
# interface
def load(self):
[nodule, groundTruth] = self.dataQueue.get()
return nodule, groundTruth
# helper
def loadSample(self, filename):
serializer = NoduleSerializer.NoduleSerializer(self.dataPath, self.phase)
sample = {}
image = serializer.readFromNpy("nodules/", filename)
groundTruth = serializer.readFromNpy("labels/", filename)
sample["image"] = image
sample["groundTruth"] = groundTruth
return sample
def loadAllSamples(self, filenames):
samples = []
for filename in enumerate(tqdm(filenames)):
filename = filename[1]
sample = self.loadSample(filename)
samples.append(sample)
return samples
def setWindow(self, image, upperBound=400.0, lowerBound=-1000.0):
image[image > upperBound] = upperBound
image[image < lowerBound] = lowerBound
return image
def normalize(self, image):
mean = np.mean(image)
std = np.std(image)
image = image.astype(np.float32)
image -= mean.astype(np.float32)
image /= std.astype(np.float32)
return image
def randomizedHistogramShift(self, sample, shiftRatio):
image = sample["image"]
if np.random.random() < shiftRatio:
# shift +- 5%
shiftPercent = (np.random.random() - 0.5) / 10.0
image = image * (1.0 + shiftPercent)
sample["image"] = image
return sample
def randomizedCrop(self, sample, rotateRatio, shiftRatio):
image = sample["image"]
groundTruth = sample["groundTruth"]
if np.random.random() < rotateRatio:
# rotate - p(3, 3) - 1 possibles
rotateList = [[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0],
[0, 2, 1]]
dir = np.random.randint(0, 4)
rotate = rotateList[dir]
image = np.transpose(image, rotate)
groundTruth = np.transpose(groundTruth, rotate)
dataRange = np.array([32, 96])
shiftx = 0
shifty = 0
shiftz = 0
if np.random.random() < shiftRatio:
# shift - we shift +-16 max along each axis
shiftx = np.random.randint(-16, 16)
shifty = np.random.randint(-16, 16)
shiftz = np.random.randint(-16, 16)
xRange = dataRange + np.array([shiftx, shiftx])
yRange = dataRange + np.array([shifty, shifty])
zRange = dataRange + np.array([shiftz, shiftz])
else:
xRange = dataRange
yRange = dataRange
zRange = dataRange
crop = {}
crop["image"] = image[zRange[0]:zRange[1], yRange[0]:yRange[1], xRange[0]:xRange[1]]
# crop["groundTruth"] = groundTruth[(zRange[0] + 16):(zRange[1] - 16), (yRange[0] + 16):(yRange[1] - 16), (xRange[0] + 16):(xRange[1] - 16)]
crop["groundTruth"] = groundTruth[zRange[0]:zRange[1], yRange[0]:yRange[1],xRange[0]:xRange[1]]
# visually check data augment
self.plotter.plotDataAndLabel2D(crop["image"], crop["groundTruth"], 32 - shiftz)
return crop
def dataProcessor(self, dataQueue):
npyFileList = glob(self.dataPath + self.phaseSubPath + "nodules/*.npy")
npyFileList = map(lambda filePath: os.path.basename(filePath), npyFileList)
# load all samples
samples = self.loadAllSamples(npyFileList)
for sample in enumerate(tqdm(samples)):
sample = sample[1]
image = sample["image"]
# image = self.setWindow(image)
image = self.normalize(image)
sample["image"] = image
# crop on the fly since we want randomized input
np.random.seed()
print(self.iterationCount)
for i in range(self.iterationCount):
for j in range(self.batchSize):
# get random sample
noduleIndex = np.random.randint(0, len(samples) - 1)
sample = samples[noduleIndex]
crop = {}
# randomized cropping
if self.phase == "train":
crop = self.randomizedCrop(sample, self.rotateRatio, self.shiftRatio)
crop = self.randomizedHistogramShift(crop, self.histogramShiftRatio)
elif self.phase == "test":
# groundTruth = sample["groundTruth"]
# labelRange = np.array([int(round(self.volSize / 4)), int(round(self.volSize * 3 / 4))])
# crop["groundTruth"] = groundTruth[labelRange[0]:labelRange[1], labelRange[0]:labelRange[1], labelRange[0]:labelRange[1]]
crop["groundTruth"] = sample["groundTruth"]
crop["image"] = sample["image"]
# visually check data
self.plotter.plotDataAndLabel2D(crop["image"], crop["groundTruth"], 32)
if crop["groundTruth"].shape[0] != 64:
# print("{0}, {1}, {2}, {3}, {4}".format(self.phase, noduleIndex, labelRange, crop["image"].shape, crop["groundTruth"].shape))
print("{0}, {1}, {2}, {3}".format(self.phase, noduleIndex, crop["image"].shape,
crop["groundTruth"].shape))
else:
dataQueue.put(tuple((crop["image"], crop["groundTruth"])))
|
gpl-3.0
|
kyleabeauchamp/EnsemblePaper
|
code/model_building/fit_models_logdet_H.py
|
1
|
1350
|
import pandas as pd
import numpy as np
from fitensemble import bayesian_weighting, belt
import experiment_loader
import ALA3
ff = "amber96"
regularization_strength = 1E-6
stride = 40
thin = 500
steps = 150000
predictions, measurements, uncertainties = experiment_loader.load(ff, stride=stride)
phi, psi, ass_raw, state_ind = experiment_loader.load_rama(ff, stride)
regularization_strength = 10.0
model2 = belt.MaxEntBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength, log_det_correction=True)
model2.sample(steps, thin=thin, burn=ALA3.burn)
regularization_strength = 10.0
model = belt.MaxEntBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength)
model.sample(steps, thin=thin, burn=ALA3.burn)
ai = model.mcmc.trace("alpha")[:]
ai2 = model2.mcmc.trace("alpha")[:]
p = model.accumulate_populations()
p2 = model2.accumulate_populations()
mu = predictions.T.dot(p)
mu2 = predictions.T.dot(p2)
z = (mu - measurements) / uncertainties
z2 = (mu2 - measurements) / uncertainties
state_pops_trace = model.trace_observable(state_ind.T)
state_pops_trace2 = model2.trace_observable(state_ind.T)
data = []
for p in model.iterate_populations():
mu2s = (predictions ** 2.).T.dot(p)
mu2 = predictions.T.dot(p)
data.append(-0.5 * np.log((mu2s - mu2 ** 2)).sum())
|
gpl-3.0
|
tplusx/stdma-ns3
|
sources/src/flow-monitor/examples/wifi-olsr-flowmon.py
|
108
|
7439
|
# -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gpl-2.0
|
zfrenchee/pandas
|
setup.py
|
1
|
27220
|
#!/usr/bin/env python
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import os
from os.path import join as pjoin
import pkg_resources
import sys
import shutil
from distutils.version import LooseVersion
from setuptools import setup, Command, find_packages
# versioning
import versioneer
cmdclass = versioneer.get_cmdclass()
def is_platform_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
def is_platform_linux():
return sys.platform == 'linux2'
def is_platform_mac():
return sys.platform == 'darwin'
min_cython_ver = '0.24'
try:
import Cython
ver = Cython.__version__
_CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_INSTALLED = False
min_numpy_ver = '1.9.0'
setuptools_kwargs = {
'install_requires': [
'python-dateutil >= 2.5.0',
'pytz >= 2011k',
'numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver),
],
'setup_requires': ['numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver)],
'zip_safe': False,
}
from distutils.extension import Extension # noqa:E402
from distutils.command.build import build # noqa:E402
from distutils.command.build_ext import build_ext as _build_ext # noqa:E402
try:
if not _CYTHON_INSTALLED:
raise ImportError('No supported version of Cython installed.')
try:
from Cython.Distutils.old_build_ext import old_build_ext as _build_ext # noqa:F811,E501
except ImportError:
# Pre 0.25
from Cython.Distutils import build_ext as _build_ext
cython = True
except ImportError:
cython = False
if cython:
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise ImportError('Building pandas requires Tempita: '
'pip install Tempita')
_pxi_dep_template = {
'algos': ['_libs/algos_common_helper.pxi.in',
'_libs/algos_take_helper.pxi.in',
'_libs/algos_rank_helper.pxi.in'],
'groupby': ['_libs/groupby_helper.pxi.in'],
'join': ['_libs/join_helper.pxi.in', '_libs/join_func_helper.pxi.in'],
'reshape': ['_libs/reshape_helper.pxi.in'],
'hashtable': ['_libs/hashtable_class_helper.pxi.in',
'_libs/hashtable_func_helper.pxi.in'],
'index': ['_libs/index_class_helper.pxi.in'],
'sparse': ['_libs/sparse_op_helper.pxi.in'],
'interval': ['_libs/intervaltree.pxi.in']}
_pxifiles = []
_pxi_dep = {}
for module, files in _pxi_dep_template.items():
pxi_files = [pjoin('pandas', x) for x in files]
_pxifiles.extend(pxi_files)
_pxi_dep[module] = pxi_files
class build_ext(_build_ext):
def build_extensions(self):
# if builing from c files, don't need to
# generate template output
if cython:
for pxifile in _pxifiles:
# build pxifiles first, template extension must be .pxi.in
assert pxifile.endswith('.pxi.in')
outfile = pxifile[:-3]
if (os.path.exists(outfile) and
os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime):
# if .pxi.in is not updated, no need to output .pxi
continue
with open(pxifile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(outfile, "w") as f:
f.write(pyxcontent)
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
DESCRIPTION = ("Powerful data structures for data analysis, time series,"
"and statistics")
LONG_DESCRIPTION = """
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with structured (tabular, multidimensional,
potentially heterogeneous) and time series data both easy and intuitive. It
aims to be the fundamental high-level building block for doing practical,
**real world** data analysis in Python. Additionally, it has the broader goal
of becoming **the most powerful and flexible open source data analysis /
manipulation tool available in any language**. It is already well on its way
toward this goal.
pandas is well suited for many different kinds of data:
- Tabular data with heterogeneously-typed columns, as in an SQL table or
Excel spreadsheet
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
column labels
- Any other form of observational / statistical data sets. The data actually
need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, Series (1-dimensional) and DataFrame
(2-dimensional), handle the vast majority of typical use cases in finance,
statistics, social science, and many areas of engineering. For R users,
DataFrame provides everything that R's ``data.frame`` provides and much
more. pandas is built on top of `NumPy <http://www.numpy.org>`__ and is
intended to integrate well within a scientific computing environment with many
other 3rd party libraries.
Here are just a few of the things that pandas does well:
- Easy handling of **missing data** (represented as NaN) in floating point as
well as non-floating point data
- Size mutability: columns can be **inserted and deleted** from DataFrame and
higher dimensional objects
- Automatic and explicit **data alignment**: objects can be explicitly
aligned to a set of labels, or the user can simply ignore the labels and
let `Series`, `DataFrame`, etc. automatically align the data for you in
computations
- Powerful, flexible **group by** functionality to perform
split-apply-combine operations on data sets, for both aggregating and
transforming data
- Make it **easy to convert** ragged, differently-indexed data in other
Python and NumPy data structures into DataFrame objects
- Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
of large data sets
- Intuitive **merging** and **joining** data sets
- Flexible **reshaping** and pivoting of data sets
- **Hierarchical** labeling of axes (possible to have multiple labels per
tick)
- Robust IO tools for loading data from **flat files** (CSV and delimited),
Excel files, databases, and saving / loading data from the ultrafast **HDF5
format**
- **Time series**-specific functionality: date range generation and frequency
conversion, moving window statistics, moving window linear regressions,
date shifting and lagging, etc.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments. For data
scientists, working with data is typically divided into multiple stages:
munging and cleaning data, analyzing / modeling it, then organizing the results
of the analysis into a form suitable for plotting or tabular display. pandas is
the ideal tool for all of these tasks.
"""
DISTNAME = 'pandas'
LICENSE = 'BSD'
AUTHOR = "The PyData Development Team"
EMAIL = "[email protected]"
URL = "http://pandas.pydata.org"
DOWNLOAD_URL = ''
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering']
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
base = pjoin('pandas', '_libs', 'src')
dt = pjoin(base, 'datetime')
src = base
util = pjoin('pandas', 'util')
parser = pjoin(base, 'parser')
ujson_python = pjoin(base, 'ujson', 'python')
ujson_lib = pjoin(base, 'ujson', 'lib')
self._clean_exclude = [pjoin(dt, 'np_datetime.c'),
pjoin(dt, 'np_datetime_strings.c'),
pjoin(src, 'period_helper.c'),
pjoin(parser, 'tokenizer.c'),
pjoin(parser, 'io.c'),
pjoin(ujson_python, 'ujson.c'),
pjoin(ujson_python, 'objToJSON.c'),
pjoin(ujson_python, 'JSONtoObj.c'),
pjoin(ujson_lib, 'ultrajsonenc.c'),
pjoin(ujson_lib, 'ultrajsondec.c'),
pjoin(util, 'move.c'),
]
for root, dirs, files in os.walk('pandas'):
for f in files:
filepath = pjoin(root, f)
if filepath in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(filepath)
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
# clean the generated pxi files
for pxifile in _pxifiles:
pxifile = pxifile.replace(".pxi.in", ".pxi")
self._clean_me.append(pxifile)
for d in ('build', 'dist'):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except Exception:
pass
# we need to inherit from the versioneer
# class as it encodes the version info
sdist_class = cmdclass['sdist']
class CheckSDist(sdist_class):
"""Custom sdist that ensures Cython has compiled all pyx files to c."""
_pyxfiles = ['pandas/_libs/lib.pyx',
'pandas/_libs/hashtable.pyx',
'pandas/_libs/tslib.pyx',
'pandas/_libs/index.pyx',
'pandas/_libs/algos.pyx',
'pandas/_libs/join.pyx',
'pandas/_libs/indexing.pyx',
'pandas/_libs/interval.pyx',
'pandas/_libs/hashing.pyx',
'pandas/_libs/missing.pyx',
'pandas/_libs/testing.pyx',
'pandas/_libs/window.pyx',
'pandas/_libs/skiplist.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
'pandas/_libs/tslibs/ccalendar.pyx',
'pandas/_libs/tslibs/period.pyx',
'pandas/_libs/tslibs/strptime.pyx',
'pandas/_libs/tslibs/np_datetime.pyx',
'pandas/_libs/tslibs/timedeltas.pyx',
'pandas/_libs/tslibs/timestamps.pyx',
'pandas/_libs/tslibs/timezones.pyx',
'pandas/_libs/tslibs/conversion.pyx',
'pandas/_libs/tslibs/fields.pyx',
'pandas/_libs/tslibs/offsets.pyx',
'pandas/_libs/tslibs/frequencies.pyx',
'pandas/_libs/tslibs/resolution.pyx',
'pandas/_libs/tslibs/parsing.pyx',
'pandas/io/sas/sas.pyx']
def initialize_options(self):
sdist_class.initialize_options(self)
def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
else:
for pyxfile in self._pyxfiles:
cfile = pyxfile[:-3] + 'c'
msg = ("C-source file '{source}' not found.\n"
"Run 'setup.py cython' before sdist.".format(
source=cfile))
assert os.path.isfile(cfile), msg
sdist_class.run(self)
class CheckingBuildExt(build_ext):
"""
Subclass build_ext to get clearer report if Cython is necessary.
"""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
print("{}: -> [{}]".format(ext.name, ext.sources))
raise Exception("""Cython-generated file '{src}' not found.
Cython is required to compile pandas from a development branch.
Please install Cython or download a release package of pandas.
""".format(src=src))
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class CythonCommand(build_ext):
"""Custom distutils command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op."""
def build_extension(self, ext):
pass
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass.update({'clean': CleanCommand,
'build': build})
try:
from wheel.bdist_wheel import bdist_wheel
class BdistWheel(bdist_wheel):
def get_tag(self):
tag = bdist_wheel.get_tag(self)
repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64'
if tag[2] == 'macosx_10_6_intel':
tag = (tag[0], tag[1], repl)
return tag
cmdclass['bdist_wheel'] = BdistWheel
except ImportError:
pass
if cython:
suffix = '.pyx'
cmdclass['build_ext'] = CheckingBuildExt
cmdclass['cython'] = CythonCommand
else:
suffix = '.c'
cmdclass['build_src'] = DummyBuildSrc
cmdclass['build_ext'] = CheckingBuildExt
lib_depends = ['reduce', 'inference']
def srcpath(name=None, suffix='.pyx', subdir='src'):
return pjoin('pandas', subdir, name + suffix)
if suffix == '.pyx':
lib_depends = [srcpath(f, suffix='.pyx', subdir='_libs/src')
for f in lib_depends]
lib_depends.append('pandas/_libs/src/util.pxd')
else:
lib_depends = []
plib_depends = []
common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src']
def pxd(name):
return os.path.abspath(pjoin('pandas', name + '.pxd'))
# args to ignore warnings
if is_platform_windows():
extra_compile_args = []
else:
extra_compile_args = ['-Wno-unused-function']
lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h',
'pandas/_libs/src/parse_helper.h',
'pandas/_libs/src/compat_helper.h']
np_datetime_headers = ['pandas/_libs/src/datetime/np_datetime.h',
'pandas/_libs/src/datetime/np_datetime_strings.h']
np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c']
tseries_depends = np_datetime_headers + ['pandas/_libs/tslibs/np_datetime.pxd']
# some linux distros require it
libraries = ['m'] if not is_platform_windows() else []
ext_data = {
'_libs.algos': {
'pyxfile': '_libs/algos',
'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'],
'depends': _pxi_dep['algos']},
'_libs.groupby': {
'pyxfile': '_libs/groupby',
'pxdfiles': ['_libs/src/util', '_libs/algos'],
'depends': _pxi_dep['groupby']},
'_libs.hashing': {
'pyxfile': '_libs/hashing'},
'_libs.hashtable': {
'pyxfile': '_libs/hashtable',
'pxdfiles': ['_libs/hashtable', '_libs/missing', '_libs/khash'],
'depends': (['pandas/_libs/src/klib/khash_python.h'] +
_pxi_dep['hashtable'])},
'_libs.index': {
'pyxfile': '_libs/index',
'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
'depends': _pxi_dep['index'],
'sources': np_datetime_sources},
'_libs.indexing': {
'pyxfile': '_libs/indexing'},
'_libs.interval': {
'pyxfile': '_libs/interval',
'pxdfiles': ['_libs/hashtable'],
'depends': _pxi_dep['interval']},
'_libs.join': {
'pyxfile': '_libs/join',
'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
'depends': _pxi_dep['join']},
'_libs.lib': {
'pyxfile': '_libs/lib',
'pxdfiles': ['_libs/src/util',
'_libs/missing',
'_libs/tslibs/conversion'],
'depends': lib_depends + tseries_depends},
'_libs.missing': {
'pyxfile': '_libs/missing',
'pxdfiles': ['_libs/src/util'],
'depends': tseries_depends},
'_libs.parsers': {
'pyxfile': '_libs/parsers',
'depends': ['pandas/_libs/src/parser/tokenizer.h',
'pandas/_libs/src/parser/io.h',
'pandas/_libs/src/numpy_helper.h'],
'sources': ['pandas/_libs/src/parser/tokenizer.c',
'pandas/_libs/src/parser/io.c']},
'_libs.tslibs.period': {
'pyxfile': '_libs/tslibs/period',
'pxdfiles': ['_libs/src/util',
'_libs/lib',
'_libs/tslibs/timedeltas',
'_libs/tslibs/timezones',
'_libs/tslibs/nattype'],
'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'],
'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']},
'_libs.properties': {
'pyxfile': '_libs/properties',
'include': []},
'_libs.reshape': {
'pyxfile': '_libs/reshape',
'depends': _pxi_dep['reshape']},
'_libs.skiplist': {
'pyxfile': '_libs/skiplist',
'depends': ['pandas/_libs/src/skiplist.h']},
'_libs.sparse': {
'pyxfile': '_libs/sparse',
'depends': _pxi_dep['sparse']},
'_libs.tslib': {
'pyxfile': '_libs/tslib',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/conversion',
'_libs/tslibs/timedeltas',
'_libs/tslibs/timestamps',
'_libs/tslibs/timezones',
'_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.ccalendar': {
'pyxfile': '_libs/tslibs/ccalendar'},
'_libs.tslibs.conversion': {
'pyxfile': '_libs/tslibs/conversion',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/nattype',
'_libs/tslibs/timezones',
'_libs/tslibs/timedeltas'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.fields': {
'pyxfile': '_libs/tslibs/fields',
'pxdfiles': ['_libs/tslibs/ccalendar',
'_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.frequencies': {
'pyxfile': '_libs/tslibs/frequencies',
'pxdfiles': ['_libs/src/util']},
'_libs.tslibs.nattype': {
'pyxfile': '_libs/tslibs/nattype',
'pxdfiles': ['_libs/src/util']},
'_libs.tslibs.np_datetime': {
'pyxfile': '_libs/tslibs/np_datetime',
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.offsets': {
'pyxfile': '_libs/tslibs/offsets',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/conversion',
'_libs/tslibs/frequencies',
'_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.parsing': {
'pyxfile': '_libs/tslibs/parsing',
'pxdfiles': ['_libs/src/util']},
'_libs.tslibs.resolution': {
'pyxfile': '_libs/tslibs/resolution',
'pxdfiles': ['_libs/src/util',
'_libs/khash',
'_libs/tslibs/frequencies',
'_libs/tslibs/timezones'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.strptime': {
'pyxfile': '_libs/tslibs/strptime',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.timedeltas': {
'pyxfile': '_libs/tslibs/timedeltas',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/nattype'],
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.timestamps': {
'pyxfile': '_libs/tslibs/timestamps',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/ccalendar',
'_libs/tslibs/conversion',
'_libs/tslibs/nattype',
'_libs/tslibs/timedeltas',
'_libs/tslibs/timezones'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.timezones': {
'pyxfile': '_libs/tslibs/timezones',
'pxdfiles': ['_libs/src/util']},
'_libs.testing': {
'pyxfile': '_libs/testing'},
'_libs.window': {
'pyxfile': '_libs/window',
'pxdfiles': ['_libs/skiplist', '_libs/src/util']},
'io.sas._sas': {
'pyxfile': 'io/sas/sas'}}
extensions = []
for name, data in ext_data.items():
sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')]
pxds = [pxd(x) for x in data.get('pxdfiles', [])]
if suffix == '.pyx' and pxds:
sources.extend(pxds)
sources.extend(data.get('sources', []))
include = data.get('include', common_include)
obj = Extension('pandas.{name}'.format(name=name),
sources=sources,
depends=data.get('depends', []),
include_dirs=include,
extra_compile_args=extra_compile_args)
extensions.append(obj)
# ----------------------------------------------------------------------
# msgpack
if sys.byteorder == 'big':
macros = [('__BIG_ENDIAN__', '1')]
else:
macros = [('__LITTLE_ENDIAN__', '1')]
msgpack_include = ['pandas/_libs/src/msgpack'] + common_include
msgpack_suffix = suffix if suffix == '.pyx' else '.cpp'
unpacker_depends = ['pandas/_libs/src/msgpack/unpack.h',
'pandas/_libs/src/msgpack/unpack_define.h',
'pandas/_libs/src/msgpack/unpack_template.h']
packer_ext = Extension('pandas.io.msgpack._packer',
depends=['pandas/_libs/src/msgpack/pack.h',
'pandas/_libs/src/msgpack/pack_template.h'],
sources=[srcpath('_packer',
suffix=msgpack_suffix,
subdir='io/msgpack')],
language='c++',
include_dirs=msgpack_include,
define_macros=macros,
extra_compile_args=extra_compile_args)
unpacker_ext = Extension('pandas.io.msgpack._unpacker',
depends=unpacker_depends,
sources=[srcpath('_unpacker',
suffix=msgpack_suffix,
subdir='io/msgpack')],
language='c++',
include_dirs=msgpack_include,
define_macros=macros,
extra_compile_args=extra_compile_args)
extensions.append(packer_ext)
extensions.append(unpacker_ext)
# ----------------------------------------------------------------------
# ujson
if suffix == '.pyx':
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
if ext.sources[0].endswith(('.c', '.cpp')):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension('pandas._libs.json',
depends=['pandas/_libs/src/ujson/lib/ultrajson.h',
'pandas/_libs/src/numpy_helper.h'],
sources=(['pandas/_libs/src/ujson/python/ujson.c',
'pandas/_libs/src/ujson/python/objToJSON.c',
'pandas/_libs/src/ujson/python/JSONtoObj.c',
'pandas/_libs/src/ujson/lib/ultrajsonenc.c',
'pandas/_libs/src/ujson/lib/ultrajsondec.c'] +
np_datetime_sources),
include_dirs=(['pandas/_libs/src/ujson/python',
'pandas/_libs/src/ujson/lib',
'pandas/_libs/src/datetime'] +
common_include),
extra_compile_args=(['-D_GNU_SOURCE'] +
extra_compile_args))
extensions.append(ujson_ext)
# ----------------------------------------------------------------------
# util
# extension for pseudo-safely moving bytes into mutable buffers
_move_ext = Extension('pandas.util._move',
depends=[],
sources=['pandas/util/move.c'])
extensions.append(_move_ext)
# The build cache system does string matching below this point.
# if you change something, be careful.
setup(name=DISTNAME,
maintainer=AUTHOR,
version=versioneer.get_version(),
packages=find_packages(include=['pandas', 'pandas.*']),
package_data={'': ['data/*', 'templates/*'],
'pandas.tests.io': ['data/legacy_hdf/*.h5',
'data/legacy_pickle/*/*.pickle',
'data/legacy_msgpack/*/*.msgpack',
'data/html_encoding/*.html']},
ext_modules=extensions,
maintainer_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
cmdclass=cmdclass,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
**setuptools_kwargs)
|
bsd-3-clause
|
SanPen/GridCal
|
src/GridCal/Engine/Devices/bus.py
|
1
|
25409
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from GridCal.Engine.basic_structures import BusMode
from GridCal.Engine.Devices.editable_device import EditableDevice, DeviceType, GCProp
from GridCal.Engine.Devices.groupings import Area, Substation, Zone, Country
class Bus(EditableDevice):
"""
The Bus object is the container of all the possible devices that can be attached to
a bus bar or substation. Such objects can be loads, voltage controlled generators,
static generators, batteries, shunt elements, etc.
Arguments:
**name** (str, "Bus"): Name of the bus
**vnom** (float, 10.0): Nominal voltage in kV
**vmin** (float, 0.9): Minimum per unit voltage
**vmax** (float, 1.1): Maximum per unit voltage
**r_fault** (float, 0.0): Resistance of the fault in per unit (SC only)
**x_fault** (float, 0.0): Reactance of the fault in per unit (SC only)
**xpos** (int, 0): X position in pixels (GUI only)
**ypos** (int, 0): Y position in pixels (GUI only)
**height** (int, 0): Height of the graphic object (GUI only)
**width** (int, 0): Width of the graphic object (GUI only)
**active** (bool, True): Is the bus active?
**is_slack** (bool, False): Is this bus a slack bus?
**area** (str, "Default"): Name of the area
**zone** (str, "Default"): Name of the zone
**substation** (str, "Default"): Name of the substation
Additional Properties:
**Qmin_sum** (float, 0): Minimum reactive power of this bus (inferred from the devices)
**Qmax_sum** (float, 0): Maximum reactive power of this bus (inferred from the devices)
**loads** (list, list()): List of loads attached to this bus
**controlled_generators** (list, list()): List of controlled generators attached to this bus
**shunts** (list, list()): List of shunts attached to this bus
**batteries** (list, list()): List of batteries attached to this bus
**static_generators** (list, list()): List of static generators attached to this bus
**measurements** (list, list()): List of measurements
"""
def __init__(self, name="Bus", idtag=None, code='', vnom=10, vmin=0.9, vmax=1.1, r_fault=0.0, x_fault=0.0,
xpos=0, ypos=0, height=0, width=0, active=True, is_slack=False, is_dc=False,
area=None, zone=None, substation=None, country=None, longitude=0.0, latitude=0.0):
EditableDevice.__init__(self,
name=name,
idtag=idtag,
active=active,
code=code,
device_type=DeviceType.BusDevice,
editable_headers={'name': GCProp('', str, 'Name of the bus'),
'idtag': GCProp('', str, 'Unique ID'),
'code': GCProp('', str, 'Some code to further identify the bus'),
'active': GCProp('', bool,
'Is the bus active? used to disable the bus.'),
'is_slack': GCProp('', bool, 'Force the bus to be of slack type.'),
'is_dc': GCProp('', bool, 'Is this bus of DC type?.'),
'Vnom': GCProp('kV', float,
'Nominal line voltage of the bus.'),
'Vmin': GCProp('p.u.', float,
'Lower range of allowed voltage.'),
'Vmax': GCProp('p.u.', float,
'Higher range of allowed range.'),
'r_fault': GCProp('p.u.', float,
'Resistance of the fault.\n'
'This is used for short circuit studies.'),
'x_fault': GCProp('p.u.', float, 'Reactance of the fault.\n'
'This is used for short circuit studies.'),
'x': GCProp('px', float, 'x position in pixels.'),
'y': GCProp('px', float, 'y position in pixels.'),
'h': GCProp('px', float, 'height of the bus in pixels.'),
'w': GCProp('px', float, 'Width of the bus in pixels.'),
'country': GCProp('', DeviceType.CountryDevice, 'Country of the bus'),
'area': GCProp('', DeviceType.AreaDevice, 'Area of the bus'),
'zone': GCProp('', DeviceType.ZoneDevice, 'Zone of the bus'),
'substation': GCProp('', DeviceType.SubstationDevice, 'Substation of the bus.'),
'longitude': GCProp('deg', float, 'longitude of the bus.'),
'latitude': GCProp('deg', float, 'latitude of the bus.')},
non_editable_attributes=['idtag'],
properties_with_profile={'active': 'active_prof'})
# Nominal voltage (kV)
self.Vnom = vnom
# minimum voltage limit
self.Vmin = vmin
# maximum voltage limit
self.Vmax = vmax
# summation of lower reactive power limits connected
self.Qmin_sum = 0
# summation of upper reactive power limits connected
self.Qmax_sum = 0
# short circuit impedance
self.r_fault = r_fault
self.x_fault = x_fault
# is the bus active?
self.active = active
self.active_prof = None
self.country = country
self.area = area
self.zone = zone
self.substation = substation
# List of load s attached to this bus
self.loads = list()
# List of Controlled generators attached to this bus
self.controlled_generators = list()
# List of shunt s attached to this bus
self.shunts = list()
# List of batteries attached to this bus
self.batteries = list()
# List of static generators attached tot this bus
self.static_generators = list()
# List of External grid devices
self.external_grids = list()
# List of measurements
self.measurements = list()
# Bus type
self.type = BusMode.PQ
# Flag to determine if the bus is a slack bus or not
self.is_slack = is_slack
# determined if this bus is an AC or DC bus
self.is_dc = is_dc
# if true, the presence of storage devices turn the bus into a Reference bus in practice
# So that P +jQ are computed
self.dispatch_storage = False
# position and dimensions
self.x = xpos
self.y = ypos
self.h = height
self.w = width
self.longitude = longitude
self.latitude = latitude
def delete_children(self):
"""
Delete all the children
"""
self.batteries.clear()
self.shunts.clear()
self.static_generators.clear()
self.loads.clear()
self.controlled_generators.clear()
def add_device(self, device):
"""
Add device to the bus in the corresponding list
:param device:
:return:
"""
if device.device_type == DeviceType.BatteryDevice:
self.batteries.append(device)
elif device.device_type == DeviceType.ShuntDevice:
self.shunts.append(device)
elif device.device_type == DeviceType.StaticGeneratorDevice:
self.static_generators.append(device)
elif device.device_type == DeviceType.LoadDevice:
self.loads.append(device)
elif device.device_type == DeviceType.GeneratorDevice:
self.controlled_generators.append(device)
elif device.device_type == DeviceType.ExternalGridDevice:
self.external_grids.append(device)
else:
raise Exception('Device type not understood:' + str(device.device_type))
def determine_bus_type(self):
"""
Infer the bus type from the devices attached to it
@return: self.type
"""
if self.is_slack:
# if it is set as slack, set the bus as slack and exit
self.type = BusMode.Slack
return self.type
elif len(self.external_grids) > 0: # there are devices setting this as a slack bus
# count the number of active external grids
ext_on = 0
for elm in self.external_grids:
if elm.active:
ext_on += 1
# if there ar any active external grids, set as slack and exit
if ext_on > 0:
self.type = BusMode.Slack
return self.type
# if we got here, determine what to do...
# count the active and controlled generators
gen_on = 0
for elm in self.controlled_generators:
if elm.active and elm.is_controlled:
gen_on += 1
# count the active and controlled batteries
batt_on = 0
for elm in self.batteries:
if elm.active and elm.is_controlled:
batt_on += 1
shunt_on = 0
for elm in self.shunts:
if elm.active and elm.is_controlled:
shunt_on += 1
if (gen_on + batt_on + shunt_on) > 0:
self.type = BusMode.PV
else:
# Nothing special; set it as PQ
self.type = BusMode.PQ
return self.type
def determine_bus_type_at(self, t):
"""
Infer the bus type from the devices attached to it
:param t: time index
@return: self.type
"""
if self.is_slack:
# if it is set as slack, set the bus as slack and exit
return BusMode.Slack
elif len(self.external_grids) > 0: # there are devices setting this as a slack bus
# count the number of active external grids
ext_on = 0
for elm in self.external_grids:
if elm.active_prof[t]:
ext_on += 1
# if there ar any active external grids, set as slack and exit
if ext_on > 0:
return BusMode.Slack
else:
# if we got here, determine what to do...
# count the active and controlled generators
gen_on = 0
for elm in self.controlled_generators:
if elm.active_prof[t] and elm.is_controlled:
gen_on += 1
# count the active and controlled batteries
batt_on = 0
for elm in self.batteries:
if elm.active_prof[t] and elm.is_controlled:
batt_on += 1
shunt_on = 0
for elm in self.shunts:
if elm.active_prof[t] and elm.is_controlled:
shunt_on += 1
if (gen_on + batt_on + shunt_on) > 0:
return BusMode.PV
else:
# Nothing special; set it as PQ
return BusMode.PQ
return BusMode.PQ
def determine_bus_type_prof(self):
"""
Array of bus types according to the profile
:return: array of bus type numbers
"""
if self.active_prof is not None:
nt = self.active_prof.shape[0]
values = np.zeros(nt, dtype=int)
for t in range(nt):
values[t] = self.determine_bus_type_at(t).value
return values
else:
raise Exception('Asked the profile types with no profile!')
def get_reactive_power_limits(self):
"""
get the summation of reactive power
@return: Qmin, Qmax
"""
Qmin = 0.0
Qmax = 0.0
# count the active and controlled generators
for elm in self.controlled_generators + self.batteries:
if elm.active:
if elm.is_controlled:
Qmin += elm.Qmin
Qmax += elm.Qmax
for elm in self.shunts:
if elm.active:
if elm.is_controlled:
Qmin += elm.Bmin
Qmax += elm.Bmax
return Qmin, Qmax
def initialize_lp_profiles(self):
"""
Dimension the LP var profiles
:return: Nothing
"""
for elm in (self.controlled_generators + self.batteries):
elm.initialize_lp_vars()
def plot_profiles(self, time_profile, ax_load=None, ax_voltage=None, time_series_driver=None, my_index=0):
"""
plot the profiles of this bus
:param time_profile: Master profile of time steps (stored in the MultiCircuit)
:param time_series_driver: time series driver
:param ax_load: Load axis, if not provided one will be created
:param ax_voltage: Voltage axis, if not provided one will be created
:param my_index: index of this object in the time series results
"""
if ax_load is None:
fig = plt.figure(figsize=(12, 8))
fig.suptitle(self.name, fontsize=20)
if time_series_driver is not None:
# 2 plots: load + voltage
ax_load = fig.add_subplot(211)
ax_voltage = fig.add_subplot(212, sharex=ax_load)
else:
# only 1 plot: load
ax_load = fig.add_subplot(111)
ax_voltage = None
show_fig = True
else:
show_fig = False
if time_series_driver is not None:
v = np.abs(time_series_driver.results.voltage[:, my_index])
p = np.abs(time_series_driver.results.S[:, my_index])
t = time_series_driver.results.time
pd.DataFrame(data=v, index=t, columns=['Voltage (p.u.)']).plot(ax=ax_voltage)
pd.DataFrame(data=p, index=t, columns=['Computed power (p.u.)']).plot(ax=ax_load)
# plot the objects' active power profiles
devices = self.loads + self.controlled_generators + self.batteries + self.static_generators
if len(devices) > 0:
dta = dict()
for elm in devices:
dta[elm.name + ' defined'] = elm.P_prof
pd.DataFrame(data=dta, index=t).plot(ax=ax_load)
ax_load.set_ylabel('Power [MW]', fontsize=11)
ax_load.legend()
else:
pass
if ax_voltage is not None:
ax_voltage.set_ylabel('Voltage module [p.u.]', fontsize=11)
ax_voltage.legend()
if show_fig:
plt.show()
def get_active_injection_profiles_dictionary(self):
"""
Get the devices' profiles in a dictionary with the correct sign
:return:
"""
dta = dict()
devices = self.controlled_generators + self.batteries + self.static_generators
if len(devices) > 0:
for elm in devices:
dta[elm.name] = elm.P_prof
for elm in self.loads:
dta[elm.name] = -elm.P_prof
return dta
def copy(self):
"""
Deep copy of this object
:return: New instance of this object
"""
bus = Bus()
bus.name = self.name
# Nominal voltage (kV)
bus.Vnom = self.Vnom
bus.vmin = self.Vmin
bus.Vmax = self.Vmax
bus.r_fault = self.r_fault
bus.x_fault = self.x_fault
bus.Qmin_sum = self.Qmin_sum
bus.Qmax_sum = self.Qmax_sum
bus.active = self.active
# List of load s attached to this bus
for elm in self.loads:
bus.loads.append(elm.copy())
# List of Controlled generators attached to this bus
for elm in self.controlled_generators:
bus.controlled_generators.append(elm.copy())
# List of shunt s attached to this bus
for elm in self.shunts:
bus.shunts.append(elm.copy())
# List of batteries attached to this bus
for elm in self.batteries:
bus.batteries.append(elm.copy())
# List of static generators attached tot this bus
for g in self.static_generators:
bus.static_generators.append(g.copy())
# Bus type
bus.type = self.type
# Flag to determine if the bus is a slack bus or not
bus.is_slack = self.is_slack
# if true, the presence of storage devices turn the bus into a Reference bus in practice
# So that P +jQ are computed
bus.dispatch_storage = self.dispatch_storage
bus.x = self.x
bus.y = self.y
bus.h = self.h
bus.w = self.w
bus.area = self.area
bus.zone = self.zone
bus.substation = self.substation
bus.measurements = self.measurements
bus.active_prof = self.active_prof
return bus
def get_properties_dict(self):
"""
Return Json-like dictionary
:return: Dictionary
"""
d = {'id': self.idtag,
'type': self.determine_bus_type().value,
'phases': 'ps',
'name': self.name,
'name_code': self.code,
'active': self.active,
'is_slack': self.is_slack,
'vnom': self.Vnom,
'vmin': self.Vmin,
'vmax': self.Vmax,
'rf': self.r_fault,
'xf': self.x_fault,
'x': self.x,
'y': self.y,
'h': self.h,
'w': self.w,
'lat': self.latitude,
'lon': self.longitude,
'alt': 0.0,
'country': self.country.idtag,
'area': self.area.idtag,
'zone': self.zone.idtag,
'substation': self.substation.idtag
}
return d
def get_profiles_dict(self):
"""
:return:
"""
if self.active_prof is not None:
active_profile = self.active_prof.tolist()
else:
active_profile = list()
return {'id': self.idtag,
'active': active_profile}
def get_units_dict(self):
"""
Get units of the values
"""
return {'vnom': 'kV',
'vmin': 'p.u.',
'vmax': 'p.u.',
'rf': 'p.u.',
'xf': 'p.u.',
'x': 'px',
'y': 'px',
'h': 'px',
'w': 'px',
'lat': 'degrees',
'lon': 'degrees',
'alt': 'm'}
def set_state(self, t):
"""
Set the profiles state of the objects in this bus to the value given in the profiles at the index t
:param t: index of the profile
:return: Nothing
"""
for elm in self.loads:
elm.P = elm.P_prof[t]
elm.Q = elm.Q_prof[t]
elm.Ir = elm.Ir_prof[t]
elm.Ii = elm.Ii_prof[t]
elm.G = elm.G_prof[t]
elm.B = elm.B_prof[t]
for elm in self.static_generators:
elm.P = elm.P_prof[t]
elm.Q = elm.Q_prof[t]
for elm in self.batteries:
elm.P = elm.P_prof[t]
elm.Vset = elm.Vset_prof[t]
for elm in self.controlled_generators:
elm.P = elm.P_prof[t]
elm.Vset = elm.Vset_prof[t]
for elm in self.shunts:
elm.G = elm.G_prof[t]
elm.B = elm.B_prof[t]
def retrieve_graphic_position(self):
"""
Get the position set by the graphic object into this object's variables
:return: Nothing
"""
if self.graphic_obj is not None:
self.x = self.graphic_obj.pos().x()
self.y = self.graphic_obj.pos().y()
self.w, self.h = self.graphic_obj.rect().getCoords()[2:4]
def delete_profiles(self):
"""
Delete all profiles
"""
for elm in self.loads:
elm.delete_profiles()
for elm in self.static_generators:
elm.delete_profiles()
for elm in self.batteries:
elm.delete_profiles()
for elm in self.controlled_generators:
elm.delete_profiles()
for elm in self.shunts:
elm.delete_profiles()
def create_profiles(self, index):
"""
Format all profiles
"""
# create the profiles of this very object
super().create_profiles(index)
for elm in self.loads:
elm.create_profiles(index)
for elm in self.static_generators:
elm.create_profiles(index)
for elm in self.batteries:
elm.create_profiles(index)
for elm in self.controlled_generators:
elm.create_profiles(index)
for elm in self.shunts:
elm.create_profiles(index)
def set_profile_values(self, t):
"""
Set the default values from the profiles at time index t
:param t: profile time index
"""
for elm in self.loads:
elm.set_profile_values(t)
for elm in self.static_generators:
elm.set_profile_values(t)
for elm in self.batteries:
elm.set_profile_values(t)
for elm in self.controlled_generators:
elm.set_profile_values(t)
for elm in self.shunts:
elm.set_profile_values(t)
def apply_lp_profiles(self, Sbase):
"""
Sets the lp solution to the regular generators profile
"""
for elm in self.batteries + self.controlled_generators:
elm.apply_lp_profile(Sbase)
def merge(self, other_bus):
"""
Add the elements of the "Other bus" to this bus
:param other_bus: Another instance of Bus
"""
# List of load s attached to this bus
self.loads += other_bus.loads.copy()
# List of Controlled generators attached to this bus
self.controlled_generators += other_bus.controlled_generators.copy()
# List of shunt s attached to this bus
self.shunts += other_bus.shunts.copy()
# List of batteries attached to this bus
self.batteries += other_bus.batteries.copy()
# List of static generators attached tot this bus
self.static_generators += other_bus.static_generators.copy()
# List of measurements
self.measurements += other_bus.measurements.copy()
def get_fault_impedance(self):
"""
Get the fault impedance
:return: complex value of fault impedance
"""
return complex(self.r_fault, self.x_fault)
def get_coordinates(self):
"""
Get tuple of the bus coordinates (latitude, longitude)
"""
return self.latitude, self.longitude
def get_devices_list(self):
"""
Return a list of all the connected objects
:return: list of connected objects
"""
return self.loads + self.controlled_generators + self.batteries + self.static_generators + self.shunts
def get_device_number(self):
"""
Return a list of all the connected objects
:return: list of connected objects
"""
return len(self.loads) + len(self.controlled_generators) + len(self.batteries) + len(self.static_generators) + len(self.shunts)
def ensure_area_objects(self, circuit):
"""
Ensure that every grouping parameter has an object
:param circuit: MultiCircuit instance
"""
if self.area is None:
self.area = circuit.areas[0]
if self.zone is None:
self.zone = circuit.zones[0]
if self.substation is None:
self.substation = circuit.substations[0]
if self.country is None:
self.country = circuit.countries[0]
|
gpl-3.0
|
goddoe/CADL
|
session-5/libs/inception.py
|
13
|
4890
|
"""
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import os
import numpy as np
from tensorflow.python.platform import gfile
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.transform import resize as imresize
from .utils import download_and_extract_tar, download_and_extract_zip
def inception_download(data_dir='inception', version='v5'):
"""Download a pretrained inception network.
Parameters
----------
data_dir : str, optional
Location of the pretrained inception network download.
version : str, optional
Version of the model: ['v3'] or 'v5'.
"""
if version == 'v3':
download_and_extract_tar(
'https://s3.amazonaws.com/cadl/models/inception-2015-12-05.tgz',
data_dir)
return (os.path.join(data_dir, 'classify_image_graph_def.pb'),
os.path.join(data_dir, 'imagenet_synset_to_human_label_map.txt'))
else:
download_and_extract_zip(
'https://s3.amazonaws.com/cadl/models/inception5h.zip', data_dir)
return (os.path.join(data_dir, 'tensorflow_inception_graph.pb'),
os.path.join(data_dir, 'imagenet_comp_graph_label_strings.txt'))
def get_inception_model(data_dir='inception', version='v5'):
"""Get a pretrained inception network.
Parameters
----------
data_dir : str, optional
Location of the pretrained inception network download.
version : str, optional
Version of the model: ['v3'] or 'v5'.
Returns
-------
net : dict
{'graph_def': graph_def, 'labels': synsets}
where the graph_def is a tf.GraphDef and the synsets
map an integer label from 0-1000 to a list of names
"""
# Download the trained net
model, labels = inception_download(data_dir, version)
# Parse the ids and synsets
txt = open(labels).readlines()
synsets = [(key, val.strip()) for key, val in enumerate(txt)]
# Load the saved graph
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
try:
graph_def.ParseFromString(f.read())
except:
print('try adding PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python' +
'to environment. e.g.:\n' +
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ipython\n' +
'See here for info: ' +
'https://github.com/tensorflow/tensorflow/issues/582')
return {
'graph_def': graph_def,
'labels': synsets,
'preprocess': preprocess,
'deprocess': deprocess
}
def preprocess(img, crop=True, resize=True, dsize=(299, 299)):
if img.dtype != np.uint8:
img *= 255.0
if crop:
crop = np.min(img.shape[:2])
r = (img.shape[0] - crop) // 2
c = (img.shape[1] - crop) // 2
cropped = img[r: r + crop, c: c + crop]
else:
cropped = img
if resize:
rsz = imresize(cropped, dsize, preserve_range=True)
else:
rsz = cropped
if rsz.ndim == 2:
rsz = rsz[..., np.newaxis]
rsz = rsz.astype(np.float32)
# subtract imagenet mean
return (rsz - 117)
def deprocess(img):
return np.clip(img + 117, 0, 255).astype(np.uint8)
def test_inception():
"""Loads the inception network and applies it to a test image.
"""
with tf.Session() as sess:
net = get_inception_model()
tf.import_graph_def(net['graph_def'], name='inception')
g = tf.get_default_graph()
names = [op.name for op in g.get_operations()]
x = g.get_tensor_by_name(names[0] + ':0')
softmax = g.get_tensor_by_name(names[-3] + ':0')
from skimage import data
img = preprocess(data.coffee())[np.newaxis]
res = np.squeeze(softmax.eval(feed_dict={x: img}))
print([(res[idx], net['labels'][idx])
for idx in res.argsort()[-5:][::-1]])
"""Let's visualize the network's gradient activation
when backpropagated to the original input image. This
is effectively telling us which pixels contribute to the
predicted class or given neuron"""
pools = [name for name in names if 'pool' in name.split('/')[-1]]
fig, axs = plt.subplots(1, len(pools))
for pool_i, poolname in enumerate(pools):
pool = g.get_tensor_by_name(poolname + ':0')
pool.get_shape()
neuron = tf.reduce_max(pool, 1)
saliency = tf.gradients(neuron, x)
neuron_idx = tf.arg_max(pool, 1)
this_res = sess.run([saliency[0], neuron_idx],
feed_dict={x: img})
grad = this_res[0][0] / np.max(np.abs(this_res[0]))
axs[pool_i].imshow((grad * 128 + 128).astype(np.uint8))
axs[pool_i].set_title(poolname)
|
apache-2.0
|
akrherz/iem
|
htdocs/plotting/auto/scripts/p38.py
|
1
|
5635
|
"""Radiation Plot"""
import calendar
import datetime
from collections import OrderedDict
import numpy as np
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = OrderedDict(
[
("best", "Use NARR, then MERRA, then HRRR"),
("narr_srad", "Use NARR (1979-2015)"),
("merra_srad", "Use MERRA v2"),
("hrrr_srad", "Use HRRR (2013+)"),
]
)
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This plot presents yearly estimates of daily
solar radiation for the 'climodat' stations tracked by the IEM. These
stations only report temperature, precipitation, and snowfall, but many
users are interested in solar radiation data as well. So estimates
are pulled from various reanalysis and forecast model analyses to generate
the numbers presented. There are three sources of solar radiation made
available for this plot. The HRRR data is the only one in 'real-time',
the MERRAv2 lags by about a month, and the NARR is no longer produced."""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IA0200",
label="Select Station:",
network="IACLIMATE",
),
dict(
type="select",
options=PDICT,
default="best",
name="var",
label="Select Radiation Source",
),
dict(
type="year",
name="year",
default=datetime.date.today().year,
min=1979,
label="Select Year to Plot:",
),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
year = ctx["year"]
varname = ctx["var"]
table = "alldata_%s" % (station[:2],)
df = read_sql(
f"""
WITH agg as (
SELECT sday, max(coalesce(narr_srad, 0))
from {table} where
station = %s and year > 1978 GROUP by sday),
obs as (
SELECT sday, day, narr_srad, merra_srad, hrrr_srad
from {table} WHERE
station = %s and year = %s)
SELECT a.sday, a.max as max_narr, o.day, o.narr_srad, o.merra_srad,
o.hrrr_srad from agg a LEFT JOIN obs o on (a.sday = o.sday)
ORDER by a.sday ASC
""",
pgconn,
params=(station, station, year),
index_col="sday",
)
if df.empty:
raise NoDataFound("No Data Found.")
df["max_narr_smooth"] = (
df["max_narr"].rolling(window=7, min_periods=1, center=True).mean()
)
df["best"] = (
df["narr_srad"].fillna(df["merra_srad"]).fillna(df["hrrr_srad"])
)
# hack for leap day here
if df["best"].loc["0229"] is None:
df = df.drop("0229")
fig = plt.figure(figsize=(8, 6))
ax = plt.axes([0.1, 0.1, 0.6, 0.8])
ax.fill_between(
range(len(df.index)),
0,
df["max_narr_smooth"],
color="tan",
label="Max",
)
if not np.isnan(df[varname].max()):
ax.bar(
range(len(df.index)),
df[varname],
fc="g",
ec="g",
label="%s" % (year,),
)
ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.set_xlim(0, 366)
lyear = datetime.date.today().year - 1
ax.set_title(
("[%s] %s Daily Solar Radiation\n" "1979-%s NARR Climatology w/ %s ")
% (station, ctx["_nt"].sts[station]["name"], lyear, year)
)
ax.legend()
ax.grid(True)
ax.set_ylabel("Shortwave Solar Radiation $MJ$ $d^{-1}$")
# Do the x,y scatter plots
for i, combo in enumerate(
[
("narr_srad", "merra_srad"),
("narr_srad", "hrrr_srad"),
("hrrr_srad", "merra_srad"),
]
):
ax3 = plt.axes([0.78, 0.1 + (0.3 * i), 0.2, 0.2])
xmax = df[combo[0]].max()
xlabel = combo[0].replace("_srad", "").upper()
ylabel = combo[1].replace("_srad", "").upper()
ymax = df[combo[1]].max()
if np.isnan(xmax) or np.isnan(ymax):
ax3.text(
0.5,
0.5,
"%s or %s\nis missing" % (xlabel, ylabel),
ha="center",
va="center",
)
ax3.get_xaxis().set_visible(False)
ax3.get_yaxis().set_visible(False)
continue
c = df[[combo[0], combo[1]]].corr()
ax3.text(
0.5,
1.01,
"Pearson Corr: %.2f" % (c.iat[1, 0],),
fontsize=10,
ha="center",
transform=ax3.transAxes,
)
ax3.scatter(
df[combo[0]], df[combo[1]], edgecolor="None", facecolor="green"
)
maxv = max([ax3.get_ylim()[1], ax3.get_xlim()[1]])
ax3.set_ylim(0, maxv)
ax3.set_xlim(0, maxv)
ax3.plot([0, maxv], [0, maxv], color="k")
ax3.set_xlabel(
r"%s $\mu$=%.1f" % (xlabel, df[combo[0]].mean()),
labelpad=0,
fontsize=12,
)
ax3.set_ylabel(
r"%s $\mu$=%.1f" % (ylabel, df[combo[1]].mean()), fontsize=12
)
return fig, df
if __name__ == "__main__":
plotter(dict(year=2010, network="TNCLIMATE", station="TN6402"))
|
mit
|
gamaanderson/artview
|
artview/components/correlation.py
|
1
|
26276
|
"""
plot_radar.py
Class instance used to make Display.
"""
# Load the needed packages
import numpy as np
import scipy
import os
import pyart
from matplotlib.backends.qt_compat import is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
from matplotlib.colors import Normalize as mlabNormalize
from matplotlib.colorbar import ColorbarBase as mlabColorbarBase
from matplotlib.pyplot import cm
from ..core import (Variable, Component, common, VariableChoose, QtCore,
QtGui, QtWidgets)
from ..core.points import Points
# Save image file type and DPI (resolution)
IMAGE_EXT = 'png'
DPI = 200
# ========================================================================
class Correlation(Component):
'''
Class to create a correlation plot, using a returned Radar structure
from the PyArt pyart.graph package.
'''
Vradar = None #: see :ref:`shared_variable`
VfieldVertical = None #: see :ref:`shared_variable`
VfieldHorizontal = None #: see :ref:`shared_variable`
Vtilt = None #: see :ref:`shared_variable`
Vgatefilter = None #: see :ref:`shared_variable`
VplotAxes = None #: see :ref:`shared_variable` (no internal use)
@classmethod
def guiStart(self, parent=None):
'''Graphical interface for starting this class'''
kwargs, independent = \
common._SimplePluginStart("Correlation").startDisplay()
kwargs['parent'] = parent
return self(**kwargs), independent
def __init__(self, Vradar=None, VfieldVertical=None, VfieldHorizontal=None,
Vgatefilter=None, name="Correlation", parent=None):
'''
Initialize the class to create display.
Parameters
----------
[Optional]
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable. If None start new one with None.
VfieldVertical, \
VfieldHorizontal : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one with empty string.
Vgatefilter : :py:class:`~artview.core.core.Variable` instance
Gatefilter signal variable.
A value of None will instantiate a empty variable.
name : string
Display window name.
parent : PyQt instance
Parent instance to associate to Display window.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
'''
super(Correlation, self).__init__(name=name, parent=parent)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
# Set up signal, so that DISPLAY can react to
# external (or internal) changes in radar, field,
# lims and tilt (expected to be Core.Variable instances)
# The capital V so people remember using ".value"
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
if VfieldVertical is None:
self.VfieldVertical = Variable('')
else:
self.VfieldVertical = VfieldVertical
if VfieldHorizontal is None:
self.VfieldHorizontal = Variable('')
else:
self.VfieldHorizontal = VfieldHorizontal
if Vgatefilter is None:
self.Vgatefilter = Variable(None)
else:
self.Vgatefilter = Vgatefilter
self.VplotAxes = Variable(None)
self.sharedVariables = {"Vradar": self.NewRadar,
"VfieldVertical": self.NewField,
"VfieldHorizontal": self.NewField,
"Vgatefilter": self.NewGatefilter,
"VplotAxes": None,}
# Connect the components
self.connectAllVariables()
self.parameters = {
"marker": 'o',
"facecolors": "blue",
"edgecolors": "none",
"s": 20,
"color": "red",
"xmin": None,
"xmax": None,
"ymin": None,
"ymax": None,
}
self.parameters_type = [
("marker", str, "marker type"),
("facecolors", str, "marker color"),
("edgecolors", str, "marker edge color"),
("s", int, "marker size"),
("color", str, "line color"),
("xmin", common.float_or_none, "Min X Value"),
("xmax", common.float_or_none, "Max X Value"),
("ymin", common.float_or_none, "Min Y Value"),
("ymax", common.float_or_none, "Max Y Value"),
]
# Set plot title and colorbar units to defaults
self.title = self._get_default_title()
self.unitsVertical, self.unitsHorizontal = self._get_default_units()
# Create display image text dictionary
self.disp_text = {}
# Create a figure for output
self._set_fig_ax()
# Launch the GUI interface
self.LaunchGUI()
# Initialize radar variable
self.NewRadar(None, True)
self.show()
####################
# GUI methods #
####################
def LaunchGUI(self):
'''Launches a GUI interface.'''
# Create layout
self.layout = QtWidgets.QGridLayout()
self.layout.setSpacing(8)
# Create the widget
self.central_widget = QtWidgets.QWidget()
self.setCentralWidget(self.central_widget)
self._set_figure_canvas()
self.central_widget.setLayout(self.layout)
# Add buttons along display for user control
self.addButtons()
# Set the status bar to display messages
self.statusbar = self.statusBar()
def setParameters(self):
'''Open set parameters dialog.'''
parm = common.get_options(self.parameters_type, self.parameters)
for key in parm.keys():
self.parameters[key] = parm[key]
self._update_plot()
##################################
# User display interface methods #
##################################
def addButtons(self):
'''Add a series of buttons for user control over display.'''
# Create the Display controls
self._add_displayBoxUI()
# Create the Field controls
self._add_fieldBoxUI()
# Create the Tools controls
#self._add_toolsBoxUI()
# Create the Informational label at top
#self._add_infolabel()
self.layout.addWidget(self.fieldVerticalBox, 0, 0, 1, 2)
label = QtWidgets.QLabel("VS.")
self.layout.addWidget(label, 0, 2, 1, 1)
self.layout.setAlignment(label, QtCore.Qt.AlignHCenter)
self.layout.addWidget(self.fieldHorizontalBox, 0, 3, 1, 2)
self.layout.addWidget(self.dispButton, 0, 6)
self.layout.setAlignment(self.dispButton, QtCore.Qt.AlignRight)
#self.layout.addWidget(self.toolsButton, 0, 3)
#self.layout.addWidget(self.infolabel, 0, 4)
#############################
# Functionality methods #
#############################
def _fillFieldBox(self):
'''Fill in the Field Window Box with current variable names.'''
for box in (self.fieldVerticalBox, self.fieldHorizontalBox):
box.clear()
box.addItem("Field Select")
# Loop through and create each field button
for field in self.fieldnames:
box.addItem(field)
def _fieldVerticalAction(self, text):
'''Define action for Field Button selection.'''
if text == "Field Select":
from .field import FieldButtonWindow
self.fieldbuttonwindow = FieldButtonWindow(
self.Vradar, self.VfieldVertical,
name=self.name+"Vertical Field Selection", parent=self.parent)
else:
self.VfieldVertical.change(str(text))
def _fieldHorizontalAction(self, text):
'''Define action for Field Button selection.'''
if text == "Field Select":
from .field import FieldButtonWindow
self.fieldbuttonwindow = FieldButtonWindow(
self.Vradar, self.VfieldHorizontal,
name=self.name+"Horizontal Field Selection", parent=self.parent)
else:
self.VfieldHorizontal.change(str(text))
def _GateFilterToggleAction(self):
'''Define action for GateFilterToggle menu selection.'''
if self.gatefilterToggle.isChecked():
self.gatefilterToggle.setText("GateFilter On")
else:
self.gatefilterToggle.setText("GateFilter Off")
self._update_plot()
def _title_input(self):
'''Retrieve new plot title.'''
val, entry = common.string_dialog_with_reset(
self.title, "Plot Title", "Title:", self._get_default_title())
if entry is True:
self.title = val
self._update_plot()
def _units_input(self):
'''Retrieve new plot units.'''
val0, entry0 = common.string_dialog_with_reset(
self.unitsVertical, "Plot Units", "Vertical Units:",
self._get_default_units())
val1, entry1 = common.string_dialog_with_reset(
self.unitsHorizontal, "Plot Units", "Horizontal Units:",
self._get_default_units())
if entry0 is True:
self.unitsVertical = val0
if entry1 is True:
self.unitsHorizontal = val1
if entry0 is True or entry1 is True:
self._update_plot()
def _add_ImageText(self):
'''Add a text box to display.'''
from .image_text import ImageTextBox
itext = ImageTextBox(self, parent=self.parent)
return itext
def _add_displayBoxUI(self):
'''Create the Display Options Button menu.'''
parentdir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
config_icon = QtGui.QIcon(os.sep.join(
[parentdir, 'icons', "categories-applications-system-icon.png"]))
self.dispButton = QtWidgets.QPushButton(config_icon, "", self)
self.dispButton.setToolTip("Adjust display properties")
self.dispButton.setFocusPolicy(QtCore.Qt.NoFocus)
dispmenu = QtWidgets.QMenu(self)
self.sweepMenu = dispmenu.addMenu("sweep")
vertical_scale_menu = dispmenu.addMenu("vertical scale")
self.vertical_scale_menu_group = QtWidgets.QActionGroup(self, exclusive=True)
self.vertical_scale_menu_group.triggered.connect(self._update_plot)
# linear
action = self.vertical_scale_menu_group.addAction("linear")
action.setCheckable(True)
action.setChecked(True)
vertical_scale_menu.addAction(action)
# log
action = self.vertical_scale_menu_group.addAction("log")
action.setCheckable(True)
vertical_scale_menu.addAction(action)
horizontal_scale_menu = dispmenu.addMenu("horizontal scale")
self.horizontal_scale_menu_group = QtWidgets.QActionGroup(self, exclusive=True)
self.horizontal_scale_menu_group.triggered.connect(self._update_plot)
# linear
action = self.horizontal_scale_menu_group.addAction("linear")
action.setCheckable(True)
action.setChecked(True)
horizontal_scale_menu.addAction(action)
# log
action = self.horizontal_scale_menu_group.addAction("log")
action.setCheckable(True)
horizontal_scale_menu.addAction(action)
dispTitle = dispmenu.addAction("Change Title")
dispTitle.setToolTip("Change plot title")
dispUnit = dispmenu.addAction("Change Units")
dispUnit.setToolTip("Change units string")
self.gatefilterToggle = QtWidgets.QAction(
'GateFilter On', dispmenu, checkable=True,
triggered=self._GateFilterToggleAction)
dispmenu.addAction(self.gatefilterToggle)
self.gatefilterToggle.setChecked(True)
self.regressionLineToggle = QtWidgets.QAction(
'Linear Regression', dispmenu, checkable=True,
triggered=self._update_plot)
dispmenu.addAction(self.regressionLineToggle)
self.dispImageText = dispmenu.addAction("Add Text to Image")
self.dispImageText.setToolTip("Add Text Box to Image")
dispQuickSave = dispmenu.addAction("Quick Save Image")
dispQuickSave.setShortcut("Ctrl+D")
dispQuickSave.setToolTip(
"Save Image to local directory with default name")
dispSaveFile = dispmenu.addAction("Save Image")
dispSaveFile.setShortcut("Ctrl+S")
dispSaveFile.setStatusTip("Save Image using dialog")
dispmenu.addAction(QtWidgets.QAction("Set Parameters", self,
triggered=self.setParameters))
dispTitle.triggered.connect(self._title_input)
dispUnit.triggered.connect(self._units_input)
self.dispImageText.triggered.connect(self._add_ImageText)
dispQuickSave.triggered.connect(self._quick_savefile)
dispSaveFile.triggered.connect(self._savefile)
self.dispButton.setMenu(dispmenu)
def _add_tiltBoxUI(self):
'''Create the Tilt Selection ComboBox.'''
self.tiltBox = QtWidgets.QComboBox()
self.tiltBox.setFocusPolicy(QtCore.Qt.NoFocus)
self.tiltBox.setToolTip("Select tilt elevation angle to display.\n"
"'Tilt Window' will launch popup.\n"
"Up/Down arrow keys Increase/Decrease tilt.")
self.tiltBox.activated[str].connect(self._tiltAction)
def _add_fieldBoxUI(self):
'''Create the Field Selection ComboBox.'''
self.fieldVerticalBox = QtWidgets.QComboBox()
self.fieldVerticalBox.setFocusPolicy(QtCore.Qt.NoFocus)
self.fieldVerticalBox.setToolTip("Select variable/field in data file.\n"
"'Field Window' will launch popup.\n")
self.fieldVerticalBox.activated[str].connect(self._fieldVerticalAction)
self.fieldHorizontalBox = QtWidgets.QComboBox()
self.fieldHorizontalBox.setFocusPolicy(QtCore.Qt.NoFocus)
self.fieldHorizontalBox.setToolTip("Select variable/field in data file.\n"
"'Field Window' will launch popup.\n")
self.fieldHorizontalBox.activated[str].connect(self._fieldHorizontalAction)
def _add_toolsBoxUI(self):
'''Create the Tools Button menu.'''
self.toolsButton = QtWidgets.QPushButton("Toolbox")
self.toolsButton.setFocusPolicy(QtCore.Qt.NoFocus)
self.toolsButton.setToolTip("Choose a tool to apply")
toolmenu = QtWidgets.QMenu(self)
toolZoomPan = toolmenu.addAction("Zoom/Pan")
toolValueClick = toolmenu.addAction("Click for Value")
toolSelectRegion = toolmenu.addAction("Select a Region of Interest")
toolReset = toolmenu.addAction("Reset Tools")
toolDefault = toolmenu.addAction("Reset File Defaults")
toolZoomPan.triggered.connect(self.toolZoomPanCmd)
toolValueClick.triggered.connect(self.toolValueClickCmd)
toolSelectRegion.triggered.connect(self.toolSelectRegionCmd)
toolReset.triggered.connect(self.toolResetCmd)
toolDefault.triggered.connect(self.toolDefaultCmd)
self.toolsButton.setMenu(toolmenu)
def _select_all_sweeps(self):
''' Check all sweeps if action 'all sweeps' is checked.'''
check = self.sweep_actions[0].isChecked()
for action in self.sweep_actions[1:]:
action.setChecked(check)
self._update_plot()
def _sweep_checked(self, checked):
if checked is False:
self.sweep_actions[0].setChecked(False)
self._update_plot()
########################
# Selectionion methods #
########################
def NewRadar(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vradar <artview.core.core.Variable>`.
This will:
* Update fields and tilts lists and MenuBoxes
* Check radar scan type and reset limits if needed
* Reset units and title
* If strong update: update plot
'''
if self.Vradar.value is None:
self.fieldVerticalBox.clear()
self.fieldHorizontalBox.clear()
self.sweepMenu.clear()
return
# Get the tilt angles
self.rTilts = self.Vradar.value.sweep_number['data'][:]
# Get field names
self.fieldnames = self.Vradar.value.fields.keys()
# Update field and tilt MenuBox
self._fillFieldBox()
self.sweepMenu.clear()
self.sweep_actions = []
action = self.sweepMenu.addAction("all sweeps")
self.sweep_actions.append(action)
action.triggered.connect(self._select_all_sweeps)
action.setCheckable(True)
action.setChecked(True)
self.sweepMenu.addAction(action)
for sweep in range(len(self.rTilts)):
action = self.sweepMenu.addAction("sweep " + str(sweep))
self.sweep_actions.append(action)
action.triggered.connect(self._sweep_checked)
action.setCheckable(True)
action.setChecked(True)
self.sweepMenu.addAction(action)
self.unitsVertical, self.unitsHorizontal = self._get_default_units()
self.title = self._get_default_title()
if strong:
self._update_plot()
def NewField(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Reset colormap
* Reset units
* Update fields MenuBox
* If strong update: update plot
'''
# XXX diferenciate between vertical and horizontal
self.unitsVertical, self.unitsHorizontal = self._get_default_units()
self.title = self._get_default_title()
#idx = self.fieldBox.findText(variable.value)
#self.fieldBox.setCurrentIndex(idx)
if strong:
self._update_plot()
def NewGatefilter(self, variable, strong):
'''
Slot for 'ValueChanged' signal of
:py:class:`Vgatefilter <artview.core.core.Variable>`.
This will:
* If strong update: update plot
'''
if strong:
self._update_plot()
####################
# Plotting methods #
####################
def _set_fig_ax(self):
'''Set the figure and axis to plot.'''
self.XSIZE = 8
self.YSIZE = 8
self.fig = Figure(figsize=(self.XSIZE, self.YSIZE))
self.ax = self.fig.add_axes([0.1, 0.1, 0.8, 0.8])
self.VplotAxes.change(self.ax)
def _set_figure_canvas(self):
'''Set the figure canvas to draw in window area.'''
self.canvas = FigureCanvasQTAgg(self.fig)
# Add the widget to the canvas
self.layout.addWidget(self.canvas, 1, 0, 8, 7)
@staticmethod
def _get_xy_values(radar, field_horizontal, field_vertical,
sweeps, gatefilter):
xvalues = radar.fields[field_horizontal]['data']
yvalues = radar.fields[field_vertical]['data']
if gatefilter is None:
gates = np.ma.getmaskarray(xvalues) | np.ma.getmaskarray(yvalues)
else:
gates = gatefilter.gate_excluded
if sweeps is not None:
sweep_filter = gates | True
for sweep, (start, end) in enumerate(radar.iter_start_end()):
if sweep in sweeps:
sweep_filter[start:end+1,:] = False
gates = gates | sweep_filter
xvalues = np.ma.MaskedArray(xvalues, mask=gates)
yvalues = np.ma.MaskedArray(yvalues, mask=gates)
return xvalues, yvalues
@staticmethod
def plot_correlation(radar, field_horizontal, field_vertical,
sweeps, gatefilter, ax, title, **kwargs):
xvalues, yvalues = Correlation._get_xy_values(
radar, field_horizontal, field_vertical, sweeps, gatefilter)
ax.scatter(xvalues,yvalues,**kwargs)
ax.set_title(title)
@staticmethod
def plot_regression(radar, field_horizontal, field_vertical,
sweeps, gatefilter, ax, vmin, vmax, xscale="linear",
yscale="linear", **kwargs):
xvalues, yvalues = Correlation._get_xy_values(
radar, field_horizontal, field_vertical, sweeps, gatefilter)
if xscale=="log":
xvalues = np.ma.masked_where( xvalues <= 0, xvalues)
xvalues = np.ma.log10(xvalues)
if yscale=="log":
yvalues = np.ma.masked_where( yvalues <= 0, yvalues)
yvalues = np.ma.log10(yvalues)
m, b, r, _, _ = scipy.stats.linregress(xvalues[~xvalues.mask],
yvalues[~xvalues.mask])
if xscale=="log":
x = np.linspace(max(vmin,0.0001),vmax,50)
y = m * np.log10(x) + b
else:
x = np.linspace(vmin,vmax,50)
y = m * x + b
if yscale=="log":
y=10**y
line = ax.plot(x,y, linestyle="--",
label='y = %f x + %f\n'%(m,b) +
'r value = %f'%(r), **kwargs)
ax.legend()
return (m,b)
def _update_plot(self):
'''Draw/Redraw the plot.'''
if self.Vradar.value is None:
return
# Create the plot with PyArt RadarDisplay
self.ax.cla() # Clear the plot axes
self.VplotAxes.update()
if ((self.VfieldVertical.value not in self.fieldnames) or
(self.VfieldHorizontal.value not in self.fieldnames)):
self.canvas.draw()
self.statusbar.setStyleSheet("QStatusBar{padding-left:8px;" +
"background:rgba(255,0,0,255);" +
"color:black;font-weight:bold;}")
self.statusbar.showMessage("Field not Found in Radar", msecs=5000)
return
else:
self.statusbar.setStyleSheet("QStatusBar{padding-left:8px;" +
"background:rgba(0,0,0,0);" +
"color:black;font-weight:bold;}")
self.statusbar.clearMessage()
if self.gatefilterToggle.isChecked():
gatefilter = self.Vgatefilter.value
else:
gatefilter = None
if self.sweep_actions[0].isChecked():
sweeps = None
else:
sweeps = []
for sweep, action in enumerate(self.sweep_actions[1:]):
if action.isChecked():
sweeps.append(sweep)
self.plot_correlation(
self.Vradar.value, self.VfieldHorizontal.value,
self.VfieldVertical.value, sweeps, gatefilter, self.ax, self.title,
**{k: self.parameters[k] for k in
('s','facecolors', 'edgecolors', 'marker')}
)
self.ax.set_xscale(
str(self.horizontal_scale_menu_group.checkedAction().text()))
self.ax.set_yscale(
str(self.vertical_scale_menu_group.checkedAction().text()))
self.ax.set_xlabel(self.unitsHorizontal)
self.ax.set_ylabel(self.unitsVertical)
self.ax.set_xlim(self.parameters["xmin"], self.parameters["xmax"])
self.ax.set_ylim(self.parameters["ymin"], self.parameters["ymax"])
if self.regressionLineToggle.isChecked():
vmin, vmax = self.ax.get_xlim()
self.plot_regression(
self.Vradar.value, self.VfieldHorizontal.value,
self.VfieldVertical.value, sweeps, gatefilter, self.ax,
vmin + 0.05 * (vmax-vmin), vmax - 0.05 * (vmax-vmin),
str(self.horizontal_scale_menu_group.checkedAction().text()),
str(self.vertical_scale_menu_group.checkedAction().text()),
color=self.parameters["color"])
self.canvas.draw()
#########################
# Check methods #
#########################
def _get_default_title(self):
'''Get default title from pyart.'''
return 'Correlation'
def _get_default_units(self):
'''Get default units for current radar and field.'''
vertical = ' '
horizontal = ' '
if self.Vradar.value is not None:
try:
vertical += self.Vradar.value.fields[
self.VfieldVertical.value]['units']
except:
pass
try:
horizontal += self.Vradar.value.fields[
self.VfieldHorizontal.value]['units']
except:
pass
return (self.VfieldVertical.value + vertical,
self.VfieldHorizontal.value + horizontal)
########################
# Image save methods #
########################
def _quick_savefile(self, PTYPE=IMAGE_EXT):
'''Save the current display via PyArt interface.'''
imagename = (str(self.VfieldVertical.value) + "VS." +
str(self.VfieldHorizontal.value) + ".png")
self.canvas.print_figure(os.path.join(os.getcwd(), imagename), dpi=DPI)
self.statusbar.showMessage(
'Saved to %s' % os.path.join(os.getcwd(), imagename))
def _savefile(self, PTYPE=IMAGE_EXT):
'''Save the current display using PyQt dialog interface.'''
imagename = (str(self.VfieldVertical.value) + "VS." +
str(self.VfieldHorizontal.value) + ".png")
file_choices = "PNG (*.png)|*.png"
path = unicode(QtWidgets.QFileDialog.getSaveFileName(
self, 'Save file', imagename, file_choices))
if path:
self.canvas.print_figure(path, dpi=DPI)
self.statusbar.showMessage('Saved to %s' % path)
|
bsd-3-clause
|
rmcgibbo/scipy
|
scipy/signal/windows.py
|
32
|
53971
|
"""The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
|
bsd-3-clause
|
PatrickChrist/scikit-learn
|
sklearn/utils/tests/test_extmath.py
|
130
|
16270
|
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
|
bsd-3-clause
|
QuantSoftware/QuantSoftwareToolkit
|
Examples/FeatureSearch/code.py
|
6
|
6374
|
''' Python imports '''
import datetime as dt
''' 3rd party imports '''
import numpy as np
import pandas as pand
import matplotlib.pyplot as plt
''' QSTK imports '''
from QSTK.qstkutil import DataAccess as da
from QSTK.qstkutil import qsdateutil as du
from QSTK.qstkfeat.features import *
from QSTK.qstkfeat.classes import class_fut_ret
import QSTK.qstkfeat.featutil as ftu
import sys
import time
from functions import *
if __name__ == '__main__':
''' Use Dow 30 '''
#lsSym = ['AA', 'AXP', 'BA', 'BAC', 'CAT', 'CSCO', 'CVX', 'DD', 'DIS', 'GE', 'HD', 'HPQ', 'IBM', 'INTC', 'JNJ', \
# 'JPM', 'KFT', 'KO', 'MCD', 'MMM', 'MRK', 'MSFT', 'PFE', 'PG', 'T', 'TRV', 'UTX', 'WMT', 'XOM' ]
#lsSymTrain = lsSym[0:4] + ['$SPX']
#lsSymTest = lsSym[4:8] + ['$SPX']
f = open('2008Dow30.txt')
lsSymTrain = f.read().splitlines() + ['$SPX']
f.close()
f = open('2010Dow30.txt')
lsSymTest = f.read().splitlines() + ['$SPX']
f.close()
lsSym = list(set(lsSymTrain).union(set(lsSymTest)))
dtStart = dt.datetime(2008,01,01)
dtEnd = dt.datetime(2010,12,31)
norObj = da.DataAccess('Norgate')
ldtTimestamps = du.getNYSEdays( dtStart, dtEnd, dt.timedelta(hours=16) )
lsKeys = ['open', 'high', 'low', 'close', 'volume']
ldfData = norObj.get_data( ldtTimestamps, lsSym, lsKeys ) #this line is important even though the ret value is not used
for temp in ldfData:
temp.fillna(method="ffill").fillna(method="bfill")
ldfDataTrain = norObj.get_data( ldtTimestamps, lsSymTrain, lsKeys )
ldfDataTest = norObj.get_data( ldtTimestamps, lsSymTest, lsKeys)
for temp in ldfDataTrain:
temp.fillna(method="ffill").fillna(method="bfill")
for temp in ldfDataTest:
temp.fillna(method="ffill").fillna(method="bfill")
dDataTrain = dict(zip(lsKeys, ldfDataTrain))
dDataTest = dict(zip(lsKeys, ldfDataTest))
''' Imported functions from qstkfeat.features, NOTE: last function is classification '''
lfcFeatures = [ featMA, featMA, featMA, featMA, featMA, featMA, \
featRSI, featRSI, featRSI, featRSI, featRSI, featRSI, \
featDrawDown, featDrawDown, featDrawDown, featDrawDown, featDrawDown, featDrawDown, \
featRunUp, featRunUp, featRunUp, featRunUp, featRunUp, featRunUp, \
featVolumeDelta, featVolumeDelta, featVolumeDelta, featVolumeDelta, featVolumeDelta, featVolumeDelta, \
featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, featAroon, \
#featStochastic, featStochastic, featStochastic, featStochastic, featStochastic, featStochastic,featStochastic, featStochastic, featStochastic, featStochastic, featStochastic, featStochastic, \
featBeta, featBeta, featBeta, featBeta, featBeta, featBeta,\
featBollinger, featBollinger, featBollinger, featBollinger, featBollinger, featBollinger,\
featCorrelation, featCorrelation, featCorrelation, featCorrelation, featCorrelation, featCorrelation,\
featPrice, \
featVolume, \
class_fut_ret]
ldArgs = [ {'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5,'bDown':True},{'lLookback':10,'bDown':True},{'lLookback':20,'bDown':True},{'lLookback':5,'bDown':False},{'lLookback':10,'bDown':False},{'lLookback':20,'bDown':False},{'lLookback':5,'bDown':True,'MR':True},{'lLookback':10,'bDown':True,'MR':True},{'lLookback':20,'bDown':True,'MR':True},{'lLookback':5,'bDown':False,'MR':True},{'lLookback':10,'bDown':False,'MR':True},{'lLookback':20,'bDown':False,'MR':True},\
#{'lLookback':5,'bFast':True},{'lLookback':10,'bFast':True},{'lLookback':20,'bFast':True},{'lLookback':5,'bFast':False},{'lLookback':10,'bFast':False},{'lLookback':20,'bFast':False},{'lLookback':5,'bFast':True,'MR':True},{'lLookback':10,'bFast':True,'MR':True},{'lLookback':20,'bFast':True,'MR':True},{'lLookback':5,'bFast':False,'MR':True},{'lLookback':10,'bFast':False,'MR':True},{'lLookback':20,'bFast':False,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{'lLookback':5},{'lLookback':10},{'lLookback':20}, {'lLookback':5,'MR':True},{'lLookback':10,'MR':True},{'lLookback':20,'MR':True},\
{},\
{},\
{'i_lookforward':5}
]
''' Generate a list of DataFrames, one for each feature, with the same index/column structure as price data '''
ldfFeaturesTrain = ftu.applyFeatures( dDataTrain, lfcFeatures, ldArgs, '$SPX')
ldfFeaturesTest = ftu.applyFeatures( dDataTest, lfcFeatures, ldArgs, '$SPX')
''' Pick Test and Training Points '''
dtStartTrain = dt.datetime(2008,01,01)
dtEndTrain = dt.datetime(2009,12,31)
dtStartTest = dt.datetime(2010,01,01)
dtEndTest = dt.datetime(2010,12,31)
''' Stack all information into one Numpy array '''
naFeatTrain = ftu.stackSyms( ldfFeaturesTrain, dtStartTrain, dtEndTrain )
naFeatTest = ftu.stackSyms( ldfFeaturesTest, dtStartTest, dtEndTest )
''' Normalize features, use same normalization factors for testing data as training data '''
ltWeights = ftu.normFeatures( naFeatTrain, -1.0, 1.0, False )
''' Normalize query points with same weights that come from test data '''
ftu.normQuery( naFeatTest[:,:-1], ltWeights )
lFeatures = range(0,len(lfcFeatures)-1)
classLabelIndex = len(lfcFeatures) - 1
funccall = sys.argv[1] + '(naFeatTrain,naFeatTest,lFeatures,classLabelIndex)'
timestart = time.time()
clockstart = time.clock()
eval(funccall)
clockend = time.clock()
timeend = time.time()
sys.stdout.write('\n\nclock diff: '+str(clockend-clockstart)+'sec\n')
sys.stdout.write('time diff: '+str(timeend-timestart)+'sec\n')
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.