repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nanophotonics/nplab | nplab/analysis/SPC_to_h5.py | 1 | 12364 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 03 13:04:50 2018
@author: car72
Gathers Raman data and attributes from directory full of .spc files and turns it into an h5 file'''
Put this script in the same folder as a list of .spc files (must be exported directly from WiRE at time of measurement), set cwd and run
"""
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
from builtins import object
import os
import spc
import h5py
import numpy as np
import matplotlib.pyplot as plt
class Raman_Spectrum(object):
#Object class containing spectral data and metadata for single Raman spectrum
def __init__(self, filename, timestamp, metadata, laserWl, laserPower, absLaserPower, integrationTime,
accumulations, nScans, wavenumbers, ramanIntensities, absRamanIntensities):
self.filename = filename
self.timestamp = timestamp
self.metadata = metadata
self.laserWl = laserWl
self.laserPower = laserPower
self.absLaserPower = absLaserPower
self.integrationTime = integrationTime
self.accumulations = accumulations
self.nScans = nScans
self.wavenumbers = wavenumbers
self.ramanIntensities = ramanIntensities
self.absRamanIntensities = absRamanIntensities
def extractRamanSpc(path, bg_path = False, combine_statics = False):
'''Takes all .spc files from a directory and creates Raman_Spectrum object for each and also background subtracts, if specified
.spc files must be directly exported at time of measurement.
Also plots table for background files if user specifies bg_table = True'''
'''Actual power values for each % laser power in μW. Measured on 09/05/2017.'''
print('Gathering .spc (meta)data from %s...' % path.split('\\')[-1])
p532 = { 0.0001 : 0.01,
0.05 : 4.75,
0.1 : 12.08,
0.5 : 49.6 ,
1.0 : 88.1 ,
5.0 : 666. ,
10.0 : 1219. ,
50.0 : 5360. ,
100.0 : 9650. }
p633 = { 0.0001 : 0.01,
0.05 : 1. ,
0.1 : 2. ,
0.5 : 10. ,
1.0 : 20. ,
5.0 : 112. ,
10.0 : 226. ,
50.0 : 1130. ,
100.0 : 2200. }
p785 = { 0.0001 : 0.17,
0.05 : 8.8 ,
0.1 : 19.1 ,
0.5 : 47.8 ,
1.0 : 104. ,
5.0 : 243. ,
10.0 : 537. ,
50.0 : 1210. ,
100.0 : 2130. }
powerConverter = {532 : p532, 633 : p633, 785 : p785} #Assigns each laser power dictionary to the appropriate wavelength.
os.chdir(path)
spcFiles = sorted([f for f in os.listdir('.') if f.endswith('.spc')])
spectra = []
for n, spcFile in enumerate(spcFiles):
filename = spcFile[:-4] #Removes extension from filename string
#print(filename)
f = spc.File(spcFile)
plt.show()
#try:
# f = spc.File(spcFile) #Create File object from .spc file
#except:
# print(filename)
# f = spc.File(spcFile) #Create File object from .spc file
metadata = {}
fLogDict = {}
fDicts = [f.__dict__, f.log_dict]#main dictionaries containing spectral metadata
newFDicts = [metadata, fLogDict]
for dictN, fDict in enumerate(fDicts):
for k in list(fDict.keys()):
i = fDict[k]
#print('%s (%s) = %s (%s)' % (k, type(k), i, type(i)))
if type(k) == bytes:
k = k.decode()#spc module is written in python 2 and hasn't been updated yet; this ensures all strings are in the same (unicode) format
if type(i) == bytes:
try:
i = i.decode()
except:
continue
if k.startswith(' '):#spc module imperfectly pulls data from some files and includes extra spaces in the dict keys
k = k[1:]
if k.endswith(' '):
k = k[:-1]
if k in ['log_content', 'log_other', 'x']:
continue
newFDicts[dictN][k] = i
#print('%s (%s) = %s (%s)' % (k, type(k), i, type(i)))
metadata.update(fLogDict)
tStamp = []
for unit in ['year', 'month', 'day', 'hour', 'minute']:#the timestamp values are actually arbitrary, so this is obsolete
if unit == 'year':
zFill = 4
else:
zFill = 2
try:
metadata[unit]
tStamp.append(str(metadata[unit]).zfill(zFill))
except:
tStamp.append(str(0).zfill(zFill))
try:
timestamp = np.datetime64('%s-%s-%sT%s:%s' % tuple(tStamp))
except:
timestamp = 'N/A'
try:
laserWl = int(fLogDict['Laser'][7:10]) #Grabs appropriate part of laser wavelength entry from log and converts to integer (must be 3 characters long)
except:
laserWl = 'N/A'
if 'Laser_power' in list(fLogDict.keys()):
laserPower = float(fLogDict['Laser_power'][13:-1]) #Grabs numeric part of string containing laser power info and converts to float
elif 'ND Transmission' in list(fLogDict.keys()):
laserPower = float(('').join([char for char in fLogDict['ND Transmission'].split(' ')[1] if char == '.' or char.isdigit()]))
else:
print(fLogDict.keys())
laserPower = 'Undefined'
if laserPower in [0.0001, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 50.0, 100.0]:
absLaserPower = float(powerConverter[laserWl][laserPower]) #Returns absolute laser power (in uW), given laser wavelength and % laser power.
else:
absLaserPower = 'Undefined' #To avoid errors if laser power is not recorded correctly
integrationTime = float(fLogDict['Exposure_time'][6:])
accumulations = int(fLogDict['Accumulations'].split(': ')[-1])#number of scans
wavenumbers = f.x #Pulls x data from spc file
nScans = int(metadata['fnsub']) #Number of Raman spectra contained within the spc file (>1 if file contains a kinetic scan)
ramanIntensities = np.array([f.sub[i].y for i in range(nScans)]) #Builds list of y data arrays
if absLaserPower != 'Undefined':
#print(filename, absLaserPower)
absRamanIntensities = np.array([spectrum*1000/(absLaserPower*integrationTime*float(accumulations/1000)) for spectrum in ramanIntensities])
else:
absRamanIntensities = ['N/A'] * nScans
if nScans == 1:
ramanIntensities = ramanIntensities[0] #Reduces to single array if not a kinetic scan
absRamanIntensities = absRamanIntensities[0] #Also for this
spectra.append(Raman_Spectrum(filename, timestamp, metadata, laserWl, laserPower, absLaserPower, integrationTime, accumulations, nScans,
wavenumbers, ramanIntensities, absRamanIntensities))
#except Exception as e:
# print 'Something went wrong with %s:' % filename
# print e
# continue
return spectra
def populateH5(spectra, h5File):
print('\nPopulating h5 file...')
with h5py.File(h5File, 'a') as f:
gSpectra = f.create_group('Spectra')
for n, spectrum in enumerate(spectra):
if len(spectra) < 10:
name = 'Spectrum %01d: %s' % (n, spectrum.filename)
elif len(spectra) < 100:
name = 'Spectrum %02d: %s' % (n, spectrum.filename)
elif len(spectra) < 1000:
name = 'Spectrum %03d: %s' % (n, spectrum.filename)
elif len(spectra) < 10000:
name = 'Spectrum %04d: %s' % (n, spectrum.filename)
gSpectrum = gSpectra.create_group(name)
attrs = spectrum.metadata
mainAttrs = {'Original Filename' : spectrum.filename,
'Laser Wavelength' : spectrum.laserWl,
'Laser Power (%)' : spectrum.laserPower,
'Laser Power (uW)' : spectrum.absLaserPower,
'Integration Time' : spectrum.integrationTime,
'Accumulations' : spectrum.accumulations,
'Number of Scans' : spectrum.nScans,
'Wavenumbers' : spectrum.wavenumbers,
'Timestamp' : str(spectrum.timestamp)}
attrs.update(mainAttrs)
for key in attrs:
try:
gSpectrum.attrs[key] = attrs[key]
except:
continue
x = spectrum.wavenumbers
yRaw = spectrum.ramanIntensities
yAbs = spectrum.absRamanIntensities
if type(yAbs) == str or type(yAbs[0]) == str:
yAbs = np.zeros(len(x))
if spectrum.nScans == 1:
if spectrum.ramanIntensities.max() != 0:
yNorm = spectrum.ramanIntensities/spectrum.ramanIntensities.max()
else:
yNorm = spectrum.ramanIntensities
else:
yNorm = []
for yData in spectrum.ramanIntensities:
if np.count_nonzero(yData) > 0:
yDataNorm = yData/yData.max()
else:
yDataNorm = yData
yNorm.append(yDataNorm)
yNorm = np.array(yNorm)
dRaw = gSpectrum.create_dataset('Raman (cts)', data = yRaw)
dRaw.attrs['wavelengths'] = x
dAbs = gSpectrum.create_dataset('Raman (cts mw^-1 s^-1)', data = np.array(yAbs))
dAbs.attrs['wavelengths'] = dRaw.attrs['wavelengths']
dNorm = gSpectrum.create_dataset('Raman (normalised)', data = yNorm)
dNorm.attrs['wavelengths'] = dRaw.attrs['wavelengths']
gRaw = f.create_group('All Raw')
gAbs = f.create_group('All Abs')
gNorm = f.create_group('All Norm')
spectraNames = sorted(list(f['Spectra'].keys()), key = lambda spectrumName: int(spectrumName.split(':')[0][9:]))
for spectrumName in spectraNames:
dRaw = f['Spectra'][spectrumName]['Raman (cts)']
dRaw = gRaw.create_dataset(spectrumName, data = dRaw)
dRaw.attrs.update(f['Spectra'][spectrumName].attrs)
dRaw.attrs.update(f['Spectra'][spectrumName]['Raman (cts)'].attrs)
dAbs = f['Spectra'][spectrumName]['Raman (cts mw^-1 s^-1)']
dAbs = gAbs.create_dataset(spectrumName, data = dAbs)
dAbs.attrs.update(f['Spectra'][spectrumName].attrs)
dAbs.attrs.update(f['Spectra'][spectrumName]['Raman (cts mw^-1 s^-1)'].attrs)
dNorm = f['Spectra'][spectrumName]['Raman (normalised)']
dNorm = gNorm.create_dataset(spectrumName, data = dNorm)
dNorm.attrs.update(f['Spectra'][spectrumName].attrs)
dNorm.attrs.update(f['Spectra'][spectrumName]['Raman (normalised)'].attrs)
print('\th5 file populated\n')
def createOutputFile(filename):
'''Auto-increments new filename if file exists'''
print('\nCreating output file...')
outputFile = '%s.h5' % filename
if outputFile in os.listdir('.'):
print('\n%s already exists' % outputFile)
n = 0
outputFile = '%s_%s.h5' % (filename, n)
while outputFile in os.listdir('.'):
print('%s already exists' % outputFile)
n += 1
outputFile = '%s_%s.h5' % (filename, n)
print('\tOutput file %s created' % outputFile)
return outputFile
def run():
rootDir = os.getcwd()
spectra = extractRamanSpc(rootDir)
dirName = '%s Raman Data' % rootDir.split('\\')[-1]
h5FileName = createOutputFile(dirName)
populateH5(spectra, h5FileName)
if __name__ == '__main__':
run() | gpl-3.0 |
mattilyra/scikit-learn | sklearn/base.py | 11 | 18381 | """Base classes for all estimators."""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import copy
import warnings
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from .utils.deprecation import deprecated
from .exceptions import ChangedBehaviorWarning as _ChangedBehaviorWarning
@deprecated("ChangedBehaviorWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class ChangedBehaviorWarning(_ChangedBehaviorWarning):
pass
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class DensityMixin(object):
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Returns the score of the model on the data X
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
score: float
"""
pass
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| bsd-3-clause |
gtesei/fast-furious | competitions/jigsaw-toxic-comment-classification-challenge/gru4.py | 1 | 5596 | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from keras.models import Model
from keras.layers import Dense, Embedding, Input , Activation
from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, GRU
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.layers import Flatten , Conv1D , GlobalMaxPooling1D , GlobalAveragePooling1D, MaxPooling1D
from keras.models import Sequential
import re
# conf
max_features = 20000
maxlen = 100
# load data
train = pd.read_csv("data/train.csv")
#train = train[:1000]
test = pd.read_csv("data/test.csv")
#test = test[:1000]
train = train.sample(frac=1)
# pre-processing
print(">> pre-processing ... ")
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
list_sentences_train = train["comment_text"].fillna("__NA__").values
y = train[list_classes].values
list_sentences_test = test["comment_text"].fillna("__NA__").values
tokenizer = text.Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences_train) + list(list_sentences_test))
list_tokenized_train = tokenizer.texts_to_sequences(list(list_sentences_train))
list_tokenized_test = tokenizer.texts_to_sequences(list(list_sentences_test))
X_t = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)
X_te = sequence.pad_sequences(list_tokenized_test, maxlen=maxlen)
def get_model_bidirectional_avg(embed_size = 200 ,
num_lstm = 50 ,
rate_drop_lstm = 0,
rate_drop_dense = 0.1,
num_dense = 50):
print(">> get_model_bidirectional_avg <<")
inp = Input(shape=(maxlen, ))
x = Embedding(max_features, embed_size)(inp)
x = Bidirectional(GRU(num_lstm, return_sequences=True))(x)
x = Dropout(rate_drop_dense)(x)
#add a GlobalAveragePooling1D, which will average the embeddings of all words in the document
x = GlobalAveragePooling1D()(x)
x = Dense(num_dense, activation="relu")(x)
x = Dropout(rate_drop_dense)(x)
#x = BatchNormalization()(x)
x = Dense(6, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def get_model_conv(embed_size = 200 ,
rate_drop_dense = 0.2,
filters = 250,
kernel_size = 3,
num_dense = 50):
print(">> Conv1D <<")
model = Sequential()
model.add(Embedding(max_features, embed_size, input_length=maxlen))
model.add(Dropout(rate_drop_dense))
model.add(Conv1D(filters,kernel_size,padding='valid',activation='relu',strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(num_dense))
model.add(Dropout(rate_drop_dense))
model.add(Activation('relu'))
# output layer
model.add(Dense(6, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def get_model_bidirectional(embed_size = 200 ,
num_lstm = 64 ,
rate_drop_lstm = 0,
rate_drop_dense = 0.1,
num_dense = 50):
model = Sequential()
model.add(Embedding(max_features, embed_size, input_length=maxlen))
model.add(Bidirectional(GRU(num_lstm)))
model.add(Dropout(rate_drop_dense))
model.add(Dense(num_dense, activation='relu'))
model.add(Dropout(rate_drop_dense))
model.add(Dense(6, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def get_cnn_lstm(embed_size = 200 ,
rate_drop_dense = 0.2,
filters = 64,
lstm_output_size = 70,
kernel_size = 5,
num_dense = 50):
print(">>> cnn + gru <<")
model = Sequential()
model.add(Embedding(max_features, embed_size, input_length=maxlen))
model.add(Dropout(rate_drop_dense))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=4))
model.add(Bidirectional(GRU(lstm_output_size)))
model.add(Dense(num_dense, activation='relu'))
model.add(Dropout(rate_drop_dense))
model.add(Dense(6, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# train
batch_size = 32
epochs = 10
model = get_cnn_lstm()
print(model.summary())
file_path="weights_base.best.hdf5"
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor="val_loss", mode="min", patience=0)
callbacks_list = [checkpoint, early] #early
model.fit(X_t, y, batch_size=batch_size, epochs=epochs, validation_split=0.1, callbacks=callbacks_list)
# predict
print(">>> predicting on test set ... ")
model.load_weights(file_path)
y_test = model.predict(X_te)
#sub
sample_submission = pd.read_csv("data/sample_submission.csv")
sample_submission[list_classes] = y_test
sample_submission.to_csv("sub_gru5_.csv.gz", index=False , compression='gzip') | mit |
shanot/imp | modules/pmi/pyext/src/io/crosslink.py | 1 | 79188 | """@namespace IMP.pmi.io.crosslink
Handles cross-link data sets.
Utilities are also provided to help in the analysis of models that
contain cross-links.
"""
from __future__ import print_function
import IMP
import IMP.pmi
import IMP.pmi.output
import IMP.atom
import IMP.core
import IMP.algebra
import IMP.rmf
import RMF
import IMP.display
import operator
import math
import sys
# json default serializations
def set_json_default(obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, IMP.pmi.topology.Molecule):
return str(obj)
raise TypeError
# Handle and return data that must be a string
if sys.version_info[0] >= 3:
def _handle_string_input(inp):
if not isinstance(inp, str):
raise TypeError("expecting a string")
return inp
else:
def _handle_string_input(inp):
if not isinstance(inp, (str, unicode)):
raise TypeError("expecting a string or unicode")
# Coerce to non-unicode representation (str)
if isinstance(inp, unicode):
return str(inp)
else:
return inp
class _CrossLinkDataBaseStandardKeys(object):
'''
This class setup all the standard keys needed to
identify the crosslink features from the data sets
'''
def __init__(self):
self.type={}
self.protein1_key="Protein1"
self.type[self.protein1_key]=str
self.protein2_key="Protein2"
self.type[self.protein2_key]=str
self.residue1_key="Residue1"
self.type[self.residue1_key]=int
self.residue2_key="Residue2"
self.type[self.residue2_key]=int
self.residue1_amino_acid_key="Residue1AminoAcid"
self.type[self.residue1_amino_acid_key]=str
self.residue2_amino_acid_key="Residue2AminoAcid"
self.type[self.residue2_amino_acid_key]=str
self.residue1_moiety_key="Residue1Moiety"
self.type[self.residue1_moiety_key]=str
self.residue2_moiety_key="Residue2Moiety"
self.type[self.residue2_moiety_key]=str
self.site_pairs_key="SitePairs"
self.type[self.site_pairs_key]=str
self.unique_id_key="XLUniqueID"
self.type[self.unique_id_key]=str
self.unique_sub_index_key="XLUniqueSubIndex"
self.type[self.unique_sub_index_key]=int
self.unique_sub_id_key="XLUniqueSubID"
self.type[self.unique_sub_id_key]=str
self.data_set_name_key="DataSetName"
self.type[self.data_set_name_key]=str
self.cross_linker_chemical_key="CrossLinkerChemical"
self.type[self.cross_linker_chemical_key]=str
self.id_score_key="IDScore"
self.type[self.id_score_key]=float
self.fdr_key="FDR"
self.type[self.fdr_key]=float
self.quantitation_key="Quantitation"
self.type[self.quantitation_key]=float
self.redundancy_key="Redundancy"
self.type[self.redundancy_key]=int
self.redundancy_list_key="RedundancyList"
self.type[self.redundancy_key]=list
self.ambiguity_key="Ambiguity"
self.type[self.ambiguity_key]=int
self.residue1_links_number_key="Residue1LinksNumber"
self.type[self.residue1_links_number_key]=int
self.residue2_links_number_key="Residue2LinksNumber"
self.type[self.residue2_links_number_key]=int
self.type[self.ambiguity_key]=int
self.state_key="State"
self.type[self.state_key]=int
self.sigma1_key="Sigma1"
self.type[self.sigma1_key]=str
self.sigma2_key="Sigma2"
self.type[self.sigma2_key]=str
self.psi_key="Psi"
self.type[self.psi_key]=str
self.distance_key="Distance"
self.type[self.distance_key]=float
self.min_ambiguous_distance_key="MinAmbiguousDistance"
self.type[self.distance_key]=float
#link types are either Monolink, Intralink or Interlink
self.link_type_key="LinkType"
self.type[self.link_type_key]=str
self.ordered_key_list =[self.data_set_name_key,
self.unique_id_key,
self.unique_sub_index_key,
self.unique_sub_id_key,
self.protein1_key,
self.protein2_key,
self.residue1_key,
self.residue2_key,
self.residue1_amino_acid_key,
self.residue2_amino_acid_key,
self.residue1_moiety_key,
self.residue2_moiety_key,
self.cross_linker_chemical_key,
self.id_score_key,
self.fdr_key,
self.quantitation_key,
self.redundancy_key,
self.redundancy_list_key,
self.state_key,
self.sigma1_key,
self.sigma2_key,
self.psi_key,
self.distance_key,
self.min_ambiguous_distance_key,
self.link_type_key]
class _ProteinsResiduesArray(tuple):
'''
This class is inherits from tuple, and it is a shorthand for a cross-link
(p1,p2,r1,r2) or a monolink (p1,r1) where p1 and p2 are protein1 and protein2, r1 and r2 are
residue1 and residue2.
'''
def __new__(self,input_data):
'''
@input_data can be a dict or a tuple
'''
self.cldbsk=_CrossLinkDataBaseStandardKeys()
if type(input_data) is dict:
monolink=False
p1=input_data[self.cldbsk.protein1_key]
try:
p2=input_data[self.cldbsk.protein2_key]
except KeyError:
monolink=True
r1=input_data[self.cldbsk.residue1_key]
try:
r2=input_data[self.cldbsk.residue2_key]
except KeyError:
monolink=True
if not monolink:
t=(p1,p2,r1,r2)
else:
t=(p1,"",r1,None)
elif type(input_data) is tuple:
if len(input_data)>4 or len(input_data)==3 or len(input_data)==1:
raise TypeError("_ProteinsResiduesArray: must have only 4 elements")
if len(input_data)==4:
p1 = _handle_string_input(input_data[0])
p2 = _handle_string_input(input_data[1])
r1=input_data[2]
r2=input_data[3]
if (not (type(r1) is int)) and (not (r1 is None)):
raise TypeError("_ProteinsResiduesArray: residue1 must be a integer")
if (not (type(r2) is int)) and (not (r1 is None)):
raise TypeError("_ProteinsResiduesArray: residue2 must be a integer")
t=(p1,p2,r1,r2)
if len(input_data) == 2:
p1 = _handle_string_input(input_data[0])
r1 = input_data[1]
if type(r1) is not int:
raise TypeError("_ProteinsResiduesArray: residue1 must be a integer")
t = (p1,"",r1,None)
else:
raise TypeError("_ProteinsResiduesArray: input must be a dict or tuple")
return tuple.__new__(_ProteinsResiduesArray, t)
def get_inverted(self):
'''
Returns a _ProteinsResiduesArray instance with protein1 and protein2 inverted
'''
return _ProteinsResiduesArray((self[1],self[0],self[3],self[2]))
def __str__(self):
outstr=self.cldbsk.protein1_key+" "+str(self[0])
outstr+=" "+self.cldbsk.protein2_key+" "+str(self[1])
outstr+=" "+self.cldbsk.residue1_key+" "+str(self[2])
outstr+=" "+self.cldbsk.residue2_key+" "+str(self[3])
return outstr
class FilterOperator(object):
'''
This class allows to create filter functions that can be passed to the CrossLinkDataBase
in this way:
fo=FilterOperator(cldb.protein1_key,operator.eq,"AAA")|FilterOperator(cldb.protein2_key,operator.eq,"BBB")
where cldb is CrossLinkDataBase instance and it is only needed to get the standard keywords
A filter operator can be evaluate on a CrossLinkDataBase item xl and returns a boolean
fo.evaluate(xl)
and it is used to filter the database
'''
def __init__(self, argument1, operator, argument2):
'''
(argument1,operator,argument2) can be either a (keyword,operator.eq|lt|gt...,value)
or (FilterOperator1,operator.or|and...,FilterOperator2)
'''
if isinstance(argument1, FilterOperator):
self.operations = [argument1, operator, argument2]
else:
self.operations = []
self.values = (argument1, operator, argument2)
def __or__(self, FilterOperator2):
return FilterOperator(self, operator.or_, FilterOperator2)
def __and__(self, FilterOperator2):
return FilterOperator(self, operator.and_, FilterOperator2)
def __invert__(self):
return FilterOperator(self, operator.not_, None)
def evaluate(self, xl_item):
if len(self.operations) == 0:
keyword, operator, value = self.values
return operator(xl_item[keyword], value)
FilterOperator1, op, FilterOperator2 = self.operations
if FilterOperator2 is None:
return op(FilterOperator1.evaluate(xl_item))
else:
return op(FilterOperator1.evaluate(xl_item), FilterOperator2.evaluate(xl_item))
'''
def filter_factory(xl_):
class FilterOperator(object):
import operator
xl = xl_
def __new__(self,key,value,oper=operator.eq):
return oper(self.xl[key],value)
return FilterOperator
'''
class CrossLinkDataBaseKeywordsConverter(_CrossLinkDataBaseStandardKeys):
'''
This class is needed to convert the keywords from a generic database
to the standard ones
'''
def __init__(self, list_parser=None):
'''
@param list_parser an instance of ResiduePairListParser, if any is needed
'''
self.converter={}
self.backward_converter={}
_CrossLinkDataBaseStandardKeys.__init__(self)
self.rplp = list_parser
if self.rplp is None:
# either you have protein1, protein2, residue1, residue2
self.compulsory_keys=set([self.protein1_key,
self.protein2_key,
self.residue1_key,
self.residue2_key])
else:
self.compulsory_keys=self.rplp.get_compulsory_keys()
self.setup_keys=set()
def check_keys(self):
'''
Is a function that check whether necessary keys are setup
'''
setup_keys=set(self.get_setup_keys())
if self.compulsory_keys & setup_keys != self.compulsory_keys:
raise KeyError("CrossLinkDataBaseKeywordsConverter: must setup all necessary keys")
def get_setup_keys(self):
'''
Returns the keys that have been setup so far
'''
return self.backward_converter.keys()
def set_standard_keys(self):
"""
This sets up the standard compulsory keys as defined by
_CrossLinkDataBaseStandardKeys
"""
for ck in self.compulsory_keys:
self.converter[ck]=ck
self.backward_converter[ck]=ck
def set_unique_id_key(self,origin_key):
self.converter[origin_key]=self.unique_id_key
self.backward_converter[self.unique_id_key]=origin_key
def set_protein1_key(self,origin_key):
self.converter[origin_key]=self.protein1_key
self.backward_converter[self.protein1_key]=origin_key
def set_protein2_key(self,origin_key):
self.converter[origin_key]=self.protein2_key
self.backward_converter[self.protein2_key]=origin_key
def set_residue1_key(self,origin_key):
self.converter[origin_key]=self.residue1_key
self.backward_converter[self.residue1_key]=origin_key
def set_residue2_key(self,origin_key):
self.converter[origin_key]=self.residue2_key
self.backward_converter[self.residue2_key]=origin_key
def set_residue1_amino_acid_key(self, origin_key):
self.converter[origin_key] = self.residue1_amino_acid_key
self.backward_converter[self.residue1_amino_acid_key] = origin_key
def set_residue2_amino_acid_key(self, origin_key):
self.converter[origin_key] = self.residue2_amino_acid_key
self.backward_converter[self.residue2_amino_acid_key] = origin_key
def set_residue1_moiety_key(self, origin_key):
self.converter[origin_key] = self.residue1_moiety_key
self.backward_converter[self.residue1_moiety_key] = origin_key
def set_residue2_moiety_key(self, origin_key):
self.converter[origin_key] = self.residue2_moiety_key
self.backward_converter[self.residue2_moiety_key] = origin_key
def set_site_pairs_key(self,origin_key):
self.converter[origin_key]=self.site_pairs_key
self.backward_converter[self.site_pairs_key]=origin_key
def set_id_score_key(self,origin_key):
self.converter[origin_key]=self.id_score_key
self.backward_converter[self.id_score_key]=origin_key
def set_fdr_key(self,origin_key):
self.converter[origin_key]=self.fdr_key
self.backward_converter[self.fdr_key]=origin_key
def set_quantitation_key(self,origin_key):
self.converter[origin_key]=self.quantitation_key
self.backward_converter[self.quantitation_key]=origin_key
def set_psi_key(self,origin_key):
self.converter[origin_key]=self.psi_key
self.backward_converter[self.psi_key]=origin_key
def set_link_type_key(self,link_type_key):
self.converter[link_type_key]=self.link_type_key
self.backward_converter[self.link_type_key]=link_type_key
def get_converter(self):
'''
Returns the dictionary that convert the old keywords to the new ones
'''
self.check_keys()
return self.converter
def get_backward_converter(self):
'''
Returns the dictionary that convert the new keywords to the old ones
'''
self.check_keys()
return self.backward_converter
class ResiduePairListParser(_CrossLinkDataBaseStandardKeys):
'''
A class to handle different styles of site pairs parsers.
Implemented styles:
MSSTUDIO: [Y3-S756;Y3-K759;K4-S756;K4-K759] for crosslinks
[Y3-;K4-] for dead-ends
QUANTITATION: sp|P33298|PRS6B_YEAST:280:x:sp|P33298|PRS6B_YEAST:337
QUANTITATION (with ambiguity separator :|:): Fbw7:107:|:StrepII2x-Fbw7fl:408:x:Nedd8:48
'''
import re
def __init__(self,style):
_CrossLinkDataBaseStandardKeys.__init__(self)
if style is "MSSTUDIO":
self.style=style
self.compulsory_keys= set([self.protein1_key,
self.protein2_key,
self.site_pairs_key])
elif style is "XTRACT" or style is "QUANTITATION":
self.style=style
self.compulsory_keys= set([self.site_pairs_key])
else:
raise Error("ResiduePairListParser: unknown list parser style")
def get_compulsory_keys(self):
return self.compulsory_keys
def get_list(self,input_string):
'''
This function returns a list of cross-linked residues and the corresponding list of
cross-linked chains. The latter list can be empty, if the style doesn't have the
corresponding information.
'''
if self.style == "MSSTUDIO":
input_string=input_string.replace("[","")
input_string=input_string.replace("]","")
input_string_pairs=input_string.split(";")
residue_pair_indexes=[]
chain_pair_indexes=[]
for s in input_string_pairs:
m1=self.re.search(r'^(A|C|D|E|F|G|H|I|K|L|M|N|O|P|Q|R|S|T|Y|X|W)(\d+)-(A|C|D|E|F|G|H|I|K|L|M|N|O|P|Q|R|S|T|Y|X|W)(\d+)$',s)
m2=self.re.search(r'^(A|C|D|E|F|G|H|I|K|L|M|N|O|P|Q|R|S|T|Y|X|W)(\d+)-$',s)
if m1:
# cross link
residue_type_1,residue_index_1,residue_type_2,residue_index_2=m1.group(1,2,3,4)
residue_pair_indexes.append((residue_index_1,residue_index_2))
elif m2:
# dead end
residue_type_1,residue_index_1=m2.group(1,2)
# at this stage chain_pair_indexes is empty
return residue_pair_indexes,chain_pair_indexes
if self.style is "XTRACT" or self.style is "QUANTITATION":
if ":x:" in input_string:
# if it is a crosslink....
input_strings=input_string.split(":x:")
first_peptides=input_strings[0].split(":|:")
second_peptides=input_strings[1].split(":|:")
first_peptides_indentifiers=[(f.split(":")[0],f.split(":")[1]) for f in first_peptides]
second_peptides_indentifiers=[(f.split(":")[0],f.split(":")[1]) for f in second_peptides]
residue_pair_indexes=[]
chain_pair_indexes=[]
for fpi in first_peptides_indentifiers:
for spi in second_peptides_indentifiers:
chain1=fpi[0]
chain2=spi[0]
residue1=fpi[1]
residue2=spi[1]
residue_pair_indexes.append((residue1,residue2))
chain_pair_indexes.append((chain1,chain2))
return residue_pair_indexes, chain_pair_indexes
else:
# if it is a monolink....
first_peptides = input_string.split(":|:")
first_peptides_indentifiers = [(f.split(":")[0], f.split(":")[1]) for f in first_peptides]
residue_indexes = []
chain_indexes = []
for fpi in first_peptides_indentifiers:
chain1=fpi[0]
residue1=fpi[1]
residue_indexes.append((residue1,))
chain_indexes.append((chain1,))
return residue_indexes, chain_indexes
class FixedFormatParser(_CrossLinkDataBaseStandardKeys):
'''
A class to handle different XL format with fixed format
currently support ProXL
'''
def __init__(self,format):
_CrossLinkDataBaseStandardKeys.__init__(self)
if format is "PROXL":
self.format=format
else:
raise Error("FixedFormatParser: unknown list format name")
def get_data(self,input_string):
if self.format is "PROXL":
tockens=input_string.split("\t")
xl={}
if tockens[0]=="SEARCH ID(S)":
return None
else:
xl[self.protein1_key]=tockens[2]
xl[self.protein2_key]=tockens[4]
xl[self.residue1_key]=int(tockens[3])
xl[self.residue2_key]=int(tockens[5])
return xl
class CrossLinkDataBase(_CrossLinkDataBaseStandardKeys):
import operator
'''
this class handles a cross-link dataset and do filtering
operations, adding cross-links, merge datasets...
'''
def __init__(self, converter=None, data_base=None, fasta_seq=None, linkable_aa=('K',)):
'''
Constructor.
@param converter an instance of CrossLinkDataBaseKeywordsConverter
@param data_base an instance of CrossLinkDataBase to build the new database on
@param fasta_seq an instance of IMP.pmi.topology.Sequences containing protein fasta sequences to check
crosslink consistency. If not given consistency will not be checked
@param linkable_aa a tuple containing one-letter amino acids which are linkable by the crosslinker;
only used if the database DOES NOT provide a value for a certain residueX_amino_acid_key
and if a fasta_seq is given
'''
if data_base is None:
self.data_base = {}
else:
self.data_base=data_base
_CrossLinkDataBaseStandardKeys.__init__(self)
if converter is not None:
self.cldbkc = converter #type: CrossLinkDataBaseKeywordsConverter
self.list_parser=self.cldbkc.rplp
self.converter = converter.get_converter()
else:
self.cldbkc = None #type: CrossLinkDataBaseKeywordsConverter
self.list_parser=None
self.converter = None
# default amino acids considered to be 'linkable' if none are given
self.def_aa_tuple = linkable_aa
self.fasta_seq = fasta_seq #type: IMP.pmi.topology.Sequences
self._update()
def _update(self):
'''
Update the whole dataset after changes
'''
self.update_cross_link_unique_sub_index()
self.update_cross_link_redundancy()
self.update_residues_links_number()
self.check_cross_link_consistency()
def __iter__(self):
sorted_ids=sorted(self.data_base.keys())
for k in sorted_ids:
for xl in self.data_base[k]:
yield xl
def xlid_iterator(self):
sorted_ids=sorted(self.data_base.keys())
for xlid in sorted_ids:
yield xlid
def __getitem__(self,xlid):
return self.data_base[xlid]
def __len__(self):
return len([xl for xl in self])
def get_name(self):
return self.name
def set_name(self,name):
new_data_base={}
for k in self.data_base:
new_data_base[k+"."+name]=self.data_base[k]
self.data_base=new_data_base
self.name=name
self._update()
def get_number_of_xlid(self):
return len(self.data_base)
def create_set_from_file(self,file_name,converter=None,FixedFormatParser=None):
'''
if FixedFormatParser is not specified, the file is comma-separated-values
@param file_name a txt file to be parsed
@param converter an instance of CrossLinkDataBaseKeywordsConverter
@param FixedFormatParser a parser for a fixed format
'''
if not FixedFormatParser:
xl_list=IMP.pmi.tools.get_db_from_csv(file_name)
if converter is not None:
self.cldbkc = converter
self.list_parser=self.cldbkc.rplp
self.converter = converter.get_converter()
if not self.list_parser:
# normal procedure without a list_parser
# each line is a cross-link
new_xl_dict={}
for nxl,xl in enumerate(xl_list):
new_xl={}
for k in xl:
if k in self.converter:
new_xl[self.converter[k]]=self.type[self.converter[k]](xl[k])
else:
new_xl[k]=xl[k]
if self.unique_id_key in self.cldbkc.get_setup_keys():
if new_xl[self.unique_id_key] not in new_xl_dict:
new_xl_dict[new_xl[self.unique_id_key]]=[new_xl]
else:
new_xl_dict[new_xl[self.unique_id_key]].append(new_xl)
else:
if str(nxl) not in new_xl_dict:
new_xl_dict[str(nxl)]=[new_xl]
else:
new_xl_dict[str(nxl)].append(new_xl)
else:
# with a list_parser, a line can be a list of ambiguous crosslinks
new_xl_dict={}
for nxl,entry in enumerate(xl_list):
# first get the translated keywords
new_dict={}
if self.site_pairs_key not in self.cldbkc.get_setup_keys():
raise Error("CrossLinkDataBase: expecting a site_pairs_key for the site pair list parser")
for k in entry:
if k in self.converter:
new_dict[self.converter[k]]=self.type[self.converter[k]](entry[k])
else:
new_dict[k]=entry[k]
residue_pair_list,chain_pair_list=self.list_parser.get_list(new_dict[self.site_pairs_key])
# then create the crosslinks
for n,p in enumerate(residue_pair_list):
is_monolink=False
if len(p)==1:
is_monolink=True
new_xl={}
for k in new_dict:
new_xl[k]=new_dict[k]
new_xl[self.residue1_key]=self.type[self.residue1_key](p[0])
if not is_monolink:
new_xl[self.residue2_key]=self.type[self.residue2_key](p[1])
if len(chain_pair_list)==len(residue_pair_list):
new_xl[self.protein1_key]=self.type[self.protein1_key](chain_pair_list[n][0])
if not is_monolink:
new_xl[self.protein2_key]=self.type[self.protein2_key](chain_pair_list[n][1])
if not is_monolink:
new_xl[self.link_type_key]="CROSSLINK"
else:
new_xl[self.link_type_key]="MONOLINK"
if self.unique_id_key in self.cldbkc.get_setup_keys():
if new_xl[self.unique_id_key] not in new_xl_dict:
new_xl_dict[new_xl[self.unique_id_key]]=[new_xl]
else:
new_xl_dict[new_xl[self.unique_id_key]].append(new_xl)
else:
if str(nxl) not in new_xl_dict:
new_xl[self.unique_id_key]=str(nxl+1)
new_xl_dict[str(nxl)]=[new_xl]
else:
new_xl[self.unique_id_key]=str(nxl+1)
new_xl_dict[str(nxl)].append(new_xl)
else:
'''
if FixedFormatParser is defined
'''
new_xl_dict={}
f=open(file_name,"r")
nxl=0
for line in f:
xl=FixedFormatParser.get_data(line)
if xl:
xl[self.unique_id_key]=str(nxl+1)
new_xl_dict[str(nxl)]=[xl]
nxl+=1
self.data_base=new_xl_dict
self.name=file_name
self._update()
def update_cross_link_unique_sub_index(self):
for k in self.data_base:
for n,xl in enumerate(self.data_base[k]):
xl[self.ambiguity_key]=len(self.data_base[k])
xl[self.unique_sub_index_key]=n+1
xl[self.unique_sub_id_key]=k+"."+str(n+1)
def update_cross_link_redundancy(self):
redundancy_data_base={}
for xl in self:
pra=_ProteinsResiduesArray(xl)
if pra not in redundancy_data_base:
redundancy_data_base[pra]=[xl[self.unique_sub_id_key]]
redundancy_data_base[pra.get_inverted()]=[xl[self.unique_sub_id_key]]
else:
redundancy_data_base[pra].append(xl[self.unique_sub_id_key])
redundancy_data_base[pra.get_inverted()].append(xl[self.unique_sub_id_key])
for xl in self:
pra=_ProteinsResiduesArray(xl)
xl[self.redundancy_key]=len(redundancy_data_base[pra])
xl[self.redundancy_list_key]=redundancy_data_base[pra]
def update_residues_links_number(self):
residue_links={}
for xl in self:
(p1,p2,r1,r2)=_ProteinsResiduesArray(xl)
if (p1,r1) not in residue_links:
residue_links[(p1,r1)]=set([(p2,r2)])
else:
residue_links[(p1,r1)].add((p2,r2))
if (p2,r2) not in residue_links:
residue_links[(p2,r2)]=set([(p1,r1)])
else:
residue_links[(p2,r2)].add((p1,r1))
for xl in self:
(p1,p2,r1,r2)=_ProteinsResiduesArray(xl)
xl[self.residue1_links_number_key]=len(residue_links[(p1,r1)])
xl[self.residue2_links_number_key]=len(residue_links[(p2,r2)])
def check_cross_link_consistency(self):
"""This function checks the consistency of the dataset with the amino acid sequence"""
if self.cldbkc and self.fasta_seq:
cnt_matched, cnt_matched_file = 0, 0
matched = {}
non_matched = {}
for xl in self:
(p1, p2, r1, r2) = _ProteinsResiduesArray(xl)
b_matched_file = False
if self.residue1_amino_acid_key in xl:
# either you know the residue type and aa_tuple is a single entry
aa_from_file = (xl[self.residue1_amino_acid_key].upper(),)
b_matched = self._match_xlinks(p1, r1, aa_from_file)
b_matched_file = b_matched
else:
# or pass the possible list of types that can be crosslinked
b_matched = self._match_xlinks(p1, r1, self.def_aa_tuple)
matched, non_matched = self._update_matched_xlinks(b_matched, p1, r1, matched, non_matched)
if self.residue2_amino_acid_key in xl:
aa_from_file = (xl[self.residue2_amino_acid_key].upper(), )
b_matched = self._match_xlinks(p2, r2, aa_from_file)
b_matched_file = b_matched
else:
b_matched = self._match_xlinks(p2, r2, self.def_aa_tuple)
matched, non_matched = self._update_matched_xlinks(b_matched, p2, r2, matched, non_matched)
if b_matched: cnt_matched += 1
if b_matched_file: cnt_matched_file += 1
if len(self) > 0:
percentage_matched = round(100*cnt_matched/len(self),1)
percentage_matched_file = round(100 * cnt_matched_file / len(self), 1)
#if matched: print "Matched xlinks:", matched
if matched or non_matched: print("check_cross_link_consistency: Out of %d crosslinks "
"%d were matched to the fasta sequence (%f %%).\n "
"%d were matched by using the crosslink file (%f %%)."%
(len(self),cnt_matched,percentage_matched,cnt_matched_file,
percentage_matched_file) )
if non_matched: print("check_cross_link_consistency: Warning: Non matched xlinks:",
[(prot_name, sorted(list(non_matched[prot_name]))) for prot_name in non_matched])
return matched,non_matched
def _match_xlinks(self, prot_name, res_index, aa_tuple):
# returns Boolean whether given aa matches a position in the fasta file
# cross link files usually start counting at 1 and not 0; therefore subtract -1 to compare with fasta
amino_dict = IMP.pmi.tools.ThreeToOneConverter()
res_index -= 1
for amino_acid in aa_tuple:
if len(amino_acid) == 3:
amino_acid = amino_dict[amino_acid.upper()]
if prot_name in self.fasta_seq.sequences:
seq = self.fasta_seq.sequences[prot_name]
# if we are looking at the first amino acid in a given sequence always return a match
# the first aa should always be the n-terminal aa
# which may form a crosslink in any case (for BS3-like crosslinkers)
# for some data sets the first aa is at position 1; todo: check why this is the case
if res_index == 0 or res_index == 1:
return True
if res_index < len(seq):
if amino_acid == seq[res_index]:
return True
# else:
# print "Could not match", prot, res+1, amino_acid, seq[res]
return False
def _update_matched_xlinks(self, b_matched, prot, res, matched, non_matched):
if b_matched:
if prot in matched:
matched[prot].add(res)
else:
matched[prot] = set([res])
else:
if prot in non_matched:
non_matched[prot].add(res)
else:
non_matched[prot] = set([res])
return matched, non_matched
def get_cross_link_string(self,xl):
string='|'
for k in self.ordered_key_list:
try:
string+=str(k)+":"+str(xl[k])+"|"
except KeyError:
continue
for k in xl:
if k not in self.ordered_key_list:
string+=str(k)+":"+str(xl[k])+"|"
return string
def get_short_cross_link_string(self,xl):
string='|'
list_of_keys=[self.data_set_name_key,
self.unique_sub_id_key,
self.protein1_key,
self.residue1_key,
self.protein2_key,
self.residue2_key,
self.state_key,
self.psi_key]
for k in list_of_keys:
try:
string+=str(xl[k])+"|"
except KeyError:
continue
return string
def filter(self,FilterOperator):
new_xl_dict={}
for id in self.data_base.keys():
for xl in self.data_base[id]:
if FilterOperator.evaluate(xl):
if id not in new_xl_dict:
new_xl_dict[id]=[xl]
else:
new_xl_dict[id].append(xl)
self._update()
return CrossLinkDataBase(self.cldbkc,new_xl_dict)
def merge(self,CrossLinkDataBase1,CrossLinkDataBase2):
'''
This function merges two cross-link datasets so that if two conflicting crosslinks have the same
cross-link UniqueIDS, the cross-links will be appended under the same UniqueID slots
with different SubIDs
'''
pass
def append_database(self,CrossLinkDataBase2):
'''
This function append one cross-link dataset to another. Unique ids will be renamed to avoid
conflicts.
'''
name1=self.get_name()
name2=CrossLinkDataBase2.get_name()
if name1 == name2:
name1=id(self)
name2=id(CrossLinkDataBase2)
#rename first database:
new_data_base={}
for k in self.data_base:
new_data_base[k]=self.data_base[k]
for k in CrossLinkDataBase2.data_base:
new_data_base[k]=CrossLinkDataBase2.data_base[k]
self.data_base=new_data_base
self._update()
def set_value(self,key,new_value,FilterOperator=None):
'''
This function changes the value for a given key in the database
For instance one can change the name of a protein
@param key: the key in the database that must be changed
@param new_value: the new value of the key
@param FilterOperator: optional FilterOperator to change the value to
a subset of the database
example: `cldb1.set_value(cldb1.protein1_key,'FFF',FO(cldb.protein1_key,operator.eq,"AAA"))`
'''
for xl in self:
if FilterOperator is not None:
if FilterOperator.evaluate(xl):
xl[key]=new_value
else:
xl[key]=new_value
self._update()
def get_values(self,key):
'''
this function returns the list of values for a given key in the database
alphanumerically sorted
'''
values=set()
for xl in self:
values.add(xl[key])
return sorted(list(values))
def offset_residue_index(self,protein_name,offset):
'''
This function offset the residue indexes of a given protein by a specified value
@param protein_name: the protein name that need to be changed
@param offset: the offset value
'''
for xl in self:
if xl[self.protein1_key] == protein_name:
xl[self.residue1_key]=xl[self.residue1_key]+offset
if xl[self.protein2_key] == protein_name:
xl[self.residue2_key]=xl[self.residue2_key]+offset
self._update()
def create_new_keyword(self,keyword,values_from_keyword=None):
'''
This function creates a new keyword for the whole database and set the values from
and existing keyword (optional), otherwise the values are set to None
@param keyword the new keyword name:
@param values_from_keyword the keyword from which we are copying the values:
'''
for xl in self:
if values_from_keyword is not None:
xl[keyword] = xl[values_from_keyword]
else:
xl[keyword] = None
self._update()
def rename_proteins(self,old_to_new_names_dictionary):
'''
This function renames all proteins contained in the input dictionary
from the old names (keys) to the new name (values)
'''
for old_name in old_to_new_names_dictionary:
new_name=old_to_new_names_dictionary[old_name]
fo2=FilterOperator(self.protein1_key,operator.eq,old_name)
self.set_value(self.protein1_key,new_name,fo2)
fo2=FilterOperator(self.protein2_key,operator.eq,old_name)
self.set_value(self.protein2_key,new_name,fo2)
def clone_protein(self,protein_name,new_protein_name):
new_xl_dict={}
for id in self.data_base.keys():
new_data_base=[]
for xl in self.data_base[id]:
new_data_base.append(xl)
if xl[self.protein1_key]==protein_name and xl[self.protein2_key]!=protein_name:
new_xl=dict(xl)
new_xl[self.protein1_key]=new_protein_name
new_data_base.append(new_xl)
elif xl[self.protein1_key]!=protein_name and xl[self.protein2_key]==protein_name:
new_xl=dict(xl)
new_xl[self.protein2_key]=new_protein_name
new_data_base.append(new_xl)
elif xl[self.protein1_key]==protein_name and xl[self.protein2_key]==protein_name:
new_xl=dict(xl)
new_xl[self.protein1_key]=new_protein_name
new_data_base.append(new_xl)
new_xl=dict(xl)
new_xl[self.protein2_key]=new_protein_name
new_data_base.append(new_xl)
new_xl=dict(xl)
new_xl[self.protein1_key]=new_protein_name
new_xl[self.protein2_key]=new_protein_name
new_data_base.append(new_xl)
self.data_base[id]=new_data_base
self._update()
def filter_out_same_residues(self):
'''
This function remove cross-links applied to the same residue
(ie, same chain name and residue number)
'''
new_xl_dict={}
for id in self.data_base.keys():
new_data_base=[]
for xl in self.data_base[id]:
if xl[self.protein1_key]==xl[self.protein2_key] and xl[self.residue1_key]==xl[self.residue2_key]:
continue
else:
new_data_base.append(xl)
self.data_base[id]=new_data_base
self._update()
def jackknife(self,percentage):
'''
this method returns a CrossLinkDataBase class containing
a random subsample of the original cross-link database.
@param percentage float between 0 and 1, is the percentage of
of spectra taken from the original list
'''
import random
if percentage > 1.0 or percentage < 0.0:
raise ValueError('the percentage of random cross-link spectra should be between 0 and 1')
nspectra=self.get_number_of_xlid()
nrandom_spectra=int(nspectra*percentage)
random_keys=random.sample(self.data_base.keys(),nrandom_spectra)
new_data_base={}
for k in random_keys:
new_data_base[k]=self.data_base[k]
return CrossLinkDataBase(self.cldbkc,new_data_base)
def __str__(self):
outstr=''
sorted_ids=sorted(self.data_base.keys())
for id in sorted_ids:
outstr+=id+"\n"
for xl in self.data_base[id]:
for k in self.ordered_key_list:
try:
outstr+="--- "+str(k)+" "+str(xl[k])+"\n"
except KeyError:
continue
for k in xl:
if k not in self.ordered_key_list:
outstr+="--- "+str(k)+" "+str(xl[k])+"\n"
outstr+="-------------\n"
return outstr
def plot(self,filename,**kwargs):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors
if kwargs["type"] == "scatter":
cmap=plt.get_cmap("rainbow")
norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
xkey=kwargs["xkey"]
ykey=kwargs["ykey"]
if "colorkey" in kwargs:
colorkey=kwargs["colorkey"]
if "sizekey" in kwargs:
sizekey=kwargs["sizekey"]
if "logyscale" in kwargs:
logyscale=kwargs["logyscale"]
else:
logyscale=False
xs=[]
ys=[]
colors=[]
for xl in self:
try:
xs.append(float(xl[xkey]))
if logyscale:
ys.append(math.log10(float(xl[ykey])))
else:
ys.append(float(xl[ykey]))
colors.append(float(xl[colorkey]))
except ValueError:
print("CrossLinkDataBase.plot: Value error for cross-link %s" % (xl[self.unique_id_key]))
continue
cs=[]
for color in colors:
cindex=(color-min(colors))/(max(colors)-min(colors))
cs.append(cmap(cindex))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xs, ys, s=50.0, c=cs, alpha=0.8,marker="o")
ax.set_xlabel(xkey)
ax.set_ylabel(ykey)
plt.savefig(filename)
plt.show()
plt.close()
if kwargs["type"] == "residue_links":
#plot the number of distinct links to a residue
#in an histogram
#molecule name
molecule=kwargs["molecule"]
if type(molecule) is IMP.pmi.topology.Molecule:
length=len(molecule.sequence)
molecule=molecule.get_name()
else:
#you need a IMP.pmi.topology.Sequences object
sequences_object=kwargs["sequences_object"]
sequence=sequences_object.sequences[molecule]
length=len(sequence)
histogram=[0]*length
for xl in self:
if xl[self.protein1_key]==molecule:
histogram[xl[self.residue1_key]-1]=xl[self.residue1_links_number_key]
if xl[self.protein2_key]==molecule:
histogram[xl[self.residue2_key]-1]=xl[self.residue2_links_number_key]
rects = plt.bar(range(1,length+1), histogram)
#bar_width,
#alpha=opacity,
#color='b',
#yerr=std_men,
#error_kw=error_config,
#label='Men')
plt.savefig(filename)
plt.show()
plt.close()
if kwargs["type"] == "histogram":
valuekey=kwargs["valuekey"]
reference_xline=kwargs["reference_xline"]
valuename=valuekey
bins=kwargs["bins"]
values_list=[]
for xl in self:
try:
values_list.append(float(xl[valuekey]))
except ValueError:
print("CrossLinkDataBase.plot: Value error for cross-link %s" % (xl[self.unique_id_key]))
continue
IMP.pmi.output.plot_field_histogram(
filename, [values_list], valuename=valuename, bins=bins,
colors=None, format="pdf",
reference_xline=None, yplotrange=None,
xplotrange=None,normalized=True,
leg_names=None)
def dump(self,json_filename):
import json
with open(json_filename, 'w') as fp:
json.dump(self.data_base, fp, sort_keys=True, indent=2, default=set_json_default)
def load(self,json_filename):
import json
with open(json_filename, 'r') as fp:
self.data_base = json.load(fp)
self._update()
#getting rid of unicode
# (can't do this in Python 3, since *everything* is Unicode there)
if sys.version_info[0] < 3:
for xl in self:
for k,v in xl.iteritems():
if type(k) is unicode: k=str(k)
if type(v) is unicode: v=str(v)
xl[k]=v
def save_csv(self,filename):
import csv
data=[]
sorted_ids=None
sorted_group_ids=sorted(self.data_base.keys())
for group in sorted_group_ids:
group_block=[]
for xl in self.data_base[group]:
if not sorted_ids:
sorted_ids=sorted(xl.keys())
values=[xl[k] for k in sorted_ids]
group_block.append(values)
data+=group_block
with open(filename, 'w') as fp:
a = csv.writer(fp, delimiter=',')
a.writerow(sorted_ids)
a.writerows(data)
def get_number_of_unique_crosslinked_sites(self):
"""
Returns the number of non redundant crosslink sites
"""
praset=set()
for xl in self:
pra=_ProteinsResiduesArray(xl)
prai=pra.get_inverted()
praset.add(pra)
praset.add(prai)
return len(list(praset))
class JaccardDistanceMatrix(object):
"""This class allows to compute and plot the distance between datasets"""
def __init__(self,cldb_dictionary):
"""Input a dictionary where keys are cldb names and values are cldbs"""
import scipy.spatial.distance
self.dbs=cldb_dictionary
self.keylist=list(self.dbs.keys())
self.distances=list()
for i,key1 in enumerate(self.keylist):
for key2 in self.keylist[i+1:]:
distance=self.get_distance(key1,key2)
self.distances.append(distance)
self.distances=scipy.spatial.distance.squareform(self.distances)
def get_distance(self,key1,key2):
return 1.0-self.jaccard_index(self.dbs[key1],self.dbs[key2])
def jaccard_index(self,CrossLinkDataBase1,CrossLinkDataBase2):
"""Similarity index between two datasets
https://en.wikipedia.org/wiki/Jaccard_index"""
set1=set()
set2=set()
for xl1 in CrossLinkDataBase1:
a1f=_ProteinsResiduesArray(xl1)
a1b=a1f.get_inverted()
set1.add(a1f)
set1.add(a1b)
for xl2 in CrossLinkDataBase2:
a2f=_ProteinsResiduesArray(xl2)
a2b=a2f.get_inverted()
set2.add(a2f)
set2.add(a2b)
return float(len(set1&set2)/2)/(len(set1)/2+len(set2)/2-len(set1&set2)/2)
def plot_matrix(self,figurename="clustermatrix.pdf"):
import matplotlib as mpl
import numpy
mpl.use('Agg')
import matplotlib.pylab as pl
from scipy.cluster import hierarchy as hrc
raw_distance_matrix=self.distances
labels=self.keylist
fig = pl.figure()
#fig.autolayout=True
ax = fig.add_subplot(1,1,1)
dendrogram = hrc.dendrogram(
hrc.linkage(raw_distance_matrix),
color_threshold=7,
no_labels=True)
leaves_order = dendrogram['leaves']
ax.set_xlabel('Dataset')
ax.set_ylabel('Jaccard Distance')
pl.tight_layout()
pl.savefig("dendrogram."+figurename, dpi=300)
pl.close(fig)
fig = pl.figure()
#fig.autolayout=True
ax = fig.add_subplot(1,1,1)
cax = ax.imshow(
raw_distance_matrix[leaves_order,
:][:,
leaves_order],
interpolation='nearest')
cb = fig.colorbar(cax)
cb.set_label('Jaccard Distance')
ax.set_xlabel('Dataset')
ax.set_ylabel('Dataset')
ax.set_xticks(range(len(labels)))
ax.set_xticklabels(numpy.array(labels)[leaves_order], rotation='vertical')
ax.set_yticks(range(len(labels)))
ax.set_yticklabels(numpy.array(labels)[leaves_order], rotation='horizontal')
pl.tight_layout()
pl.savefig("matrix."+figurename, dpi=300)
pl.close(fig)
class MapCrossLinkDataBaseOnStructure(object):
'''
This class maps a CrossLinkDataBase on a given structure
and save an rmf file with color-coded crosslinks
'''
def __init__(self,CrossLinkDataBase,rmf_or_stat_handler):
self.CrossLinkDataBase=CrossLinkDataBase
if type(rmf_or_stat_handler) is IMP.pmi.output.RMFHierarchyHandler or \
type(rmf_or_stat_handler) is IMP.pmi.output.StatHierarchyHandler:
self.prots=rmf_or_stat_handler
def compute_distances(self):
data=[]
sorted_ids=None
sorted_group_ids=sorted(self.CrossLinkDataBase.data_base.keys())
for group in sorted_group_ids:
#group_block=[]
group_dists=[]
for xl in self.CrossLinkDataBase.data_base[group]:
if not sorted_ids:
sorted_ids=sorted(xl.keys())
data.append(sorted_ids+["UniqueID","Distance","MinAmbiguousDistance"])
(c1,c2,r1,r2)=_ProteinsResiduesArray(xl)
try:
(mdist,p1,p2),(state1,copy1,state2,copy2)=self._get_distance_and_particle_pair(r1,c1,r2,c2)
except:
mdist="None"
state1="None"
copy1="None"
state2="None"
copy2="None"
values=[xl[k] for k in sorted_ids]
values+=[group,mdist]
group_dists.append(mdist)
#group_block.append(values)
xl["Distance"]=mdist
xl["State1"]=state1
xl["Copy1"]=copy1
xl["State2"]=state2
xl["Copy2"]=copy2
for xl in self.CrossLinkDataBase.data_base[group]:
xl["MinAmbiguousDistance"]=min(group_dists)
def _get_distance_and_particle_pair(self,r1,c1,r2,c2):
'''more robust and slower version of above'''
sel=IMP.atom.Selection(self.prots,molecule=c1,residue_index=r1,resolution=1)
selpart_1=sel.get_selected_particles()
if len(selpart_1)==0:
print("MapCrossLinkDataBaseOnStructure: Warning: no particle selected for first site")
return None
sel=IMP.atom.Selection(self.prots,molecule=c2,residue_index=r2,resolution=1)
selpart_2=sel.get_selected_particles()
if len(selpart_2)==0:
print("MapCrossLinkDataBaseOnStructure: Warning: no particle selected for second site")
return None
results=[]
for p1 in selpart_1:
for p2 in selpart_2:
if p1 == p2 and r1 == r2: continue
d1=IMP.core.XYZ(p1)
d2=IMP.core.XYZ(p2)
#round distance to second decimal
dist=float(int(IMP.core.get_distance(d1,d2)*100.0))/100.0
h1=IMP.atom.Hierarchy(p1)
while not IMP.atom.Molecule.get_is_setup(h1.get_particle()):
h1=h1.get_parent()
copy_index1=IMP.atom.Copy(h1).get_copy_index()
while not IMP.atom.State.get_is_setup(h1.get_particle()):
h1=h1.get_parent()
state_index1=IMP.atom.State(h1).get_state_index()
h2=IMP.atom.Hierarchy(p2)
while not IMP.atom.Molecule.get_is_setup(h2.get_particle()):
h2=h2.get_parent()
copy_index2=IMP.atom.Copy(h2).get_copy_index()
while not IMP.atom.State.get_is_setup(h2.get_particle()):
h2=h2.get_parent()
state_index2=IMP.atom.State(h2).get_state_index()
results.append((dist,state_index1,copy_index1,state_index2,copy_index2,p1,p2))
if len(results)==0: return None
results_sorted = sorted(results, key=operator.itemgetter(0,1,2,3,4))
return (results_sorted[0][0],results_sorted[0][5],results_sorted[0][6]),(results_sorted[0][1],results_sorted[0][2],results_sorted[0][3],results_sorted[0][4])
def save_rmf_snapshot(self,filename,color_id=None):
if color_id is None:
color_id=self.CrossLinkDataBase.id_score_key
sorted_group_ids=sorted(self.CrossLinkDataBase.data_base.keys())
list_of_pairs=[]
color_scores=[]
for group in sorted_group_ids:
group_dists_particles=[]
for xl in self.CrossLinkDataBase.data_base[group]:
xllabel=self.CrossLinkDataBase.get_short_cross_link_string(xl)
(c1,c2,r1,r2)=_ProteinsResiduesArray(xl)
try:
(mdist,p1,p2),(state1,copy1,state2,copy2)=self._get_distance_and_particle_pair(r1,c1,r2,c2)
except TypeError:
print("TypeError or missing chain/residue ",r1,c1,r2,c2)
continue
group_dists_particles.append((mdist,p1,p2,xllabel,float(xl[color_id])))
if group_dists_particles:
(minmdist,minp1,minp2,minxllabel,mincolor_score)=min(group_dists_particles, key = lambda t: t[0])
color_scores.append(mincolor_score)
list_of_pairs.append((minp1,minp2,minxllabel,mincolor_score))
else:
continue
linear = IMP.core.Linear(0, 0.0)
linear.set_slope(1.0)
dps2 = IMP.core.DistancePairScore(linear)
rslin = IMP.RestraintSet(self.model, 'linear_dummy_restraints')
sgs=[]
offset=min(color_scores)
maxvalue=max(color_scores)
for pair in list_of_pairs:
pr = IMP.core.PairRestraint(self.model, dps2, (pair[0], pair[1]))
pr.set_name(pair[2])
factor=(pair[3]-offset)/(maxvalue-offset)
print(factor)
c=IMP.display.get_rgb_color(factor)
seg=IMP.algebra.Segment3D(IMP.core.XYZ(pair[0]).get_coordinates(),IMP.core.XYZ(pair[1]).get_coordinates())
rslin.add_restraint(pr)
sgs.append(IMP.display.SegmentGeometry(seg,c,pair[2]))
rh = RMF.create_rmf_file(filename)
IMP.rmf.add_hierarchies(rh, self.prots)
IMP.rmf.add_restraints(rh,[rslin])
IMP.rmf.add_geometries(rh, sgs)
IMP.rmf.save_frame(rh)
del rh
class CrossLinkDataBaseFromStructure(object):
'''
This class generates a CrossLinkDataBase from a given structure
'''
def __init__(self,representation=None,
system=None,
residue_types_1=["K"],
residue_types_2=["K"],
reactivity_range=[0,1],
kt=1.0):
import numpy.random
import math
cldbkc=CrossLinkDataBaseKeywordsConverter()
cldbkc.set_protein1_key("Protein1")
cldbkc.set_protein2_key("Protein2")
cldbkc.set_residue1_key("Residue1")
cldbkc.set_residue2_key("Residue2")
self.cldb=CrossLinkDataBase(cldbkc)
if representation is not None:
#PMI 1.0 mode
self.mode="pmi1"
self.representation=representation
self.model=self.representation.m
elif system is not None:
#PMI 2.0 mode
self.system=system
self.model=self.system.mdl
self.mode="pmi2"
else:
print("Argument error: please provide either a representation object or a IMP.Hierarchy")
raise
self.residue_types_1=residue_types_1
self.residue_types_2=residue_types_2
self.kt=kt
self.indexes_dict1={}
self.indexes_dict2={}
self.protein_residue_dict={}
self.reactivity_dictionary={}
self.euclidean_interacting_pairs=None
self.xwalk_interacting_pairs=None
import random
if self.mode=="pmi1":
for protein in self.representation.sequence_dict.keys():
# we are saving a dictionary with protein name, residue number and random reactivity of the residue
seq=self.representation.sequence_dict[protein]
residues=[i for i in range(1,len(seq)+1) if ((seq[i-1] in self.residue_types_1) or (seq[i-1] in self.residue_types_2))]
for r in residues:
# uniform random reactivities
#self.reactivity_dictionary[(protein,r)]=random.uniform(reactivity_range[0],reactivity_range[1])
# getting reactivities from the CDF of an exponential distribution
rexp=numpy.random.exponential(0.1)
prob=1.0-math.exp(-rexp)
self.reactivity_dictionary[(protein,r)]=prob
residues1=[i for i in range(1,len(seq)+1) if seq[i-1] in self.residue_types_1]
residues2=[i for i in range(1,len(seq)+1) if seq[i-1] in self.residue_types_2]
for r in residues1:
h=IMP.pmi.tools.select_by_tuple(self.representation,(r,r,protein),resolution=1)[0]
p=h.get_particle()
index=p.get_index()
self.indexes_dict1[index]=(protein,r)
self.protein_residue_dict[(protein,r)]=index
for r in residues2:
h=IMP.pmi.tools.select_by_tuple(self.representation,(r,r,protein),resolution=1)[0]
p=h.get_particle()
index=p.get_index()
self.indexes_dict2[index]=(protein,r)
self.protein_residue_dict[(protein,r)]=index
if self.mode=="pmi2":
for state in self.system.get_states():
for moleculename,molecules in state.get_molecules().items():
for molecule in molecules:
# we are saving a dictionary with protein name, residue number and random reactivity of the residue
seq=molecule.sequence
residues=[i for i in range(1,len(seq)+1) if ((seq[i-1] in self.residue_types_1) or (seq[i-1] in self.residue_types_2))]
for r in residues:
# uniform random reactivities
#self.reactivity_dictionary[(protein,r)]=random.uniform(reactivity_range[0],reactivity_range[1])
# getting reactivities from the CDF of an exponential distribution
rexp=numpy.random.exponential(0.00000001)
prob=1.0-math.exp(-rexp)
self.reactivity_dictionary[(molecule,r)]=prob
residues1=[i for i in range(1,len(seq)+1) if seq[i-1] in self.residue_types_1]
residues2=[i for i in range(1,len(seq)+1) if seq[i-1] in self.residue_types_2]
for r in residues1:
s=IMP.atom.Selection(molecule.hier,residue_index=r,resolution=1)
ps=s.get_selected_particles()
for p in ps:
index=p.get_index()
self.indexes_dict1[index]=(molecule,r)
self.protein_residue_dict[(molecule,r)]=index
for r in residues2:
s=IMP.atom.Selection(molecule.hier,residue_index=r,resolution=1)
ps=s.get_selected_particles()
for p in ps:
index=p.get_index()
self.indexes_dict2[index]=(molecule,r)
self.protein_residue_dict[(molecule,r)]=index
def get_all_possible_pairs(self):
n=float(len(self.protein_residue_dict.keys()))
return n*(n-1.0)/2.0
def get_all_feasible_pairs(self,distance=21):
import itertools
particle_index_pairs=[]
nxl=0
for a,b in itertools.combinations(self.protein_residue_dict.keys(),2):
new_xl={}
index1=self.protein_residue_dict[a]
index2=self.protein_residue_dict[b]
particle_distance=IMP.core.get_distance(IMP.core.XYZ(IMP.get_particles(self.model,[index1])[0]),IMP.core.XYZ(IMP.get_particles(self.model,[index2])[0]))
if particle_distance <= distance:
particle_index_pairs.append((index1,index2))
if self.mode=="pmi1":
new_xl[self.cldb.protein1_key]=a[0]
new_xl[self.cldb.protein2_key]=b[0]
elif self.mode=="pmi2":
new_xl[self.cldb.protein1_key]=a[0].get_name()
new_xl[self.cldb.protein2_key]=b[0].get_name()
new_xl["molecule_object1"]=a[0]
new_xl["molecule_object2"]=b[0]
new_xl[self.cldb.residue1_key]=a[1]
new_xl[self.cldb.residue2_key]=b[1]
self.cldb.data_base[str(nxl)]=[new_xl]
nxl+=1
self.cldb._update()
return self.cldb
def get_data_base(self,total_number_of_spectra,
ambiguity_probability=0.1,
noise=0.01,
distance=21,
max_delta_distance=10.0,
xwalk_bin_path=None,
confidence_false=0.75,
confidence_true=0.75):
import math
from random import random,uniform
import numpy as np
number_of_spectra=1
self.beta_true=-1.4427*math.log(0.5*(1.0-confidence_true))
self.beta_false=-1.4427*math.log(0.5*(1.0-confidence_false))
self.cldb.data_base[str(number_of_spectra)]=[]
self.sites_weighted=None
while number_of_spectra<total_number_of_spectra:
if random() > ambiguity_probability and len(self.cldb.data_base[str(number_of_spectra)]) != 0:
# new spectrum
number_of_spectra+=1
self.cldb.data_base[str(number_of_spectra)]=[]
noisy=False
if random() > noise:
# not noisy crosslink
pra,dist=self.get_random_residue_pair(distance,xwalk_bin_path,max_delta_distance)
else:
# noisy crosslink
pra,dist=self.get_random_residue_pair(None,None,None)
noisy=True
new_xl={}
if self.mode=="pmi1":
new_xl[self.cldb.protein1_key]=pra[0]
new_xl[self.cldb.protein2_key]=pra[1]
elif self.mode=="pmi2":
new_xl[self.cldb.protein1_key]=pra[0].get_name()
new_xl[self.cldb.protein2_key]=pra[1].get_name()
new_xl["molecule_object1"]=pra[0]
new_xl["molecule_object2"]=pra[1]
new_xl[self.cldb.residue1_key]=pra[2]
new_xl[self.cldb.residue2_key]=pra[3]
new_xl["Noisy"]=noisy
# the reactivity is defined as r=1-exp(-k*Delta t)
new_xl["Reactivity_Residue1"]=self.reactivity_dictionary[(pra[0],pra[2])]
new_xl["Reactivity_Residue2"]=self.reactivity_dictionary[(pra[1],pra[3])]
r1=new_xl["Reactivity_Residue1"]
r2=new_xl["Reactivity_Residue2"]
#combined reactivity 1-exp(-k12*Delta t),
# k12=k1*k2/(k1+k2)
#new_xl["Reactivity"]=1.0-math.exp(-math.log(1.0/(1.0-r1))*math.log(1.0/(1.0-r2))/math.log(1.0/(1.0-r1)*1.0/(1.0-r2)))
if noisy:
#new_xl["Score"]=uniform(-1.0,1.0)
new_xl["Score"]=np.random.beta(1.0,self.beta_false)
else:
#new_xl["Score"]=new_xl["Reactivity"]+uniform(0.0,2.0)
new_xl["Score"]=1.0-np.random.beta(1.0,self.beta_true)
new_xl["TargetDistance"]=dist
new_xl["NoiseProbability"]=noise
new_xl["AmbiguityProbability"]=ambiguity_probability
# getting if it is intra or inter rigid body
(p1,p2)=IMP.get_particles(self.model,[self.protein_residue_dict[(pra[0],pra[2])],
self.protein_residue_dict[(pra[1],pra[3])]])
if(IMP.core.RigidMember.get_is_setup(p1) and
IMP.core.RigidMember.get_is_setup(p2) and
IMP.core.RigidMember(p1).get_rigid_body() ==
IMP.core.RigidMember(p2).get_rigid_body()):
new_xl["InterRigidBody"] = False
elif (IMP.core.RigidMember.get_is_setup(p1) and
IMP.core.RigidMember.get_is_setup(p2) and
IMP.core.RigidMember(p1).get_rigid_body() !=
IMP.core.RigidMember(p2).get_rigid_body()):
new_xl["InterRigidBody"] = True
else:
new_xl["InterRigidBody"] = None
self.cldb.data_base[str(number_of_spectra)].append(new_xl)
self.cldb._update()
return self.cldb
def get_random_residue_pair(self,distance=None,xwalk_bin_path=None,max_delta_distance=None):
import IMP.pmi.tools
import math
from random import choice,uniform
if distance is None:
# get a random pair
while True:
if self.mode=="pmi1":
protein1=choice(self.representation.sequence_dict.keys())
protein2=choice(self.representation.sequence_dict.keys())
seq1=self.representation.sequence_dict[protein1]
seq2=self.representation.sequence_dict[protein2]
residue1=choice([i for i in range(1,len(seq1)+1) if seq1[i-1] in self.residue_types_1])
residue2=choice([i for i in range(1,len(seq2)+1) if seq2[i-1] in self.residue_types_2])
h1=IMP.pmi.tools.select_by_tuple(self.representation,(residue1,residue1,protein1),resolution=1)[0]
h2=IMP.pmi.tools.select_by_tuple(self.representation,(residue2,residue2,protein2),resolution=1)[0]
particle_distance=IMP.core.get_distance(IMP.core.XYZ(h1.get_particle()),IMP.core.XYZ(h2.get_particle()))
if (protein1,residue1) != (protein2,residue2):
break
elif self.mode=="pmi2":
(protein1,residue1)=choice(self.protein_residue_dict.keys())
(protein2,residue2)=choice(self.protein_residue_dict.keys())
index1=self.protein_residue_dict[(protein1,residue1)]
index2=self.protein_residue_dict[(protein2,residue2)]
particle_distance=IMP.core.get_distance(IMP.core.XYZ(IMP.get_particles(self.model,[index1])[0]),IMP.core.XYZ(IMP.get_particles(self.model,[index2])[0]))
if (protein1,residue1) != (protein2,residue2):
break
else:
# get a pair of residues whose distance is below the threshold
if not xwalk_bin_path:
gcpf = IMP.core.GridClosePairsFinder()
gcpf.set_distance(distance+max_delta_distance)
while True:
#setup the reaction rates lists
if not self.sites_weighted:
self.sites_weighted=[]
for key in self.reactivity_dictionary:
r=self.reactivity_dictionary[key]
self.sites_weighted.append((key,r))
#get a random reaction site
first_site=self.weighted_choice(self.sites_weighted)
#get all distances
if not self.euclidean_interacting_pairs:
self.euclidean_interacting_pairs=gcpf.get_close_pairs(self.model,
self.indexes_dict1.keys(),
self.indexes_dict2.keys())
#get the partner for the first reacted site
first_site_pairs = [pair for pair in self.euclidean_interacting_pairs
if self.indexes_dict1[pair[0]] == first_site or
self.indexes_dict2[pair[1]] == first_site]
if len(first_site_pairs)==0: continue
#build the list of second reaction sites
second_sites_weighted=[]
for pair in first_site_pairs:
if self.indexes_dict1[pair[0]] == first_site: second_site = self.indexes_dict2[pair[1]]
if self.indexes_dict2[pair[1]] == first_site: second_site = self.indexes_dict1[pair[0]]
r=self.reactivity_dictionary[second_site]
second_sites_weighted.append((second_site,r))
second_site=self.weighted_choice(second_sites_weighted)
"""
interacting_pairs_weighted=[]
for pair in self.euclidean_interacting_pairs:
r1=self.reactivity_dictionary[self.indexes_dict1[pair[0]]]
r2=self.reactivity_dictionary[self.indexes_dict2[pair[1]]]
#combined reactivity 1-exp(-k12*Delta t),
# k12=k1*k2/(k1+k2)
#print(r1,r2,dist)
r12=1.0-math.exp(-math.log(1.0/(1.0-r1))*math.log(1.0/(1.0-r2))/math.log(1.0/(1.0-r1)*1.0/(1.0-r2)))
interacting_pairs_weighted.append((pair,r12))
#weight1=math.exp(-self.reactivity_dictionary[self.indexes_dict1[pair[0]]]/self.kt)
#weight2=math.exp(-self.reactivity_dictionary[self.indexes_dict2[pair[1]]]/self.kt)
#interacting_pairs_weighted.append((pair,weight1*weight2))
while True:
pair=self.weighted_choice(interacting_pairs_weighted)
protein1,residue1=self.indexes_dict1[pair[0]]
protein2,residue2=self.indexes_dict2[pair[1]]
particle_pair=IMP.get_particles(self.model,pair)
particle_distance=IMP.core.get_distance(IMP.core.XYZ(particle_pair[0]),IMP.core.XYZ(particle_pair[1]))
if particle_distance<distance and (protein1,residue1) != (protein2,residue2):
break
elif particle_distance>=distance and (protein1,residue1) != (protein2,residue2) and max_delta_distance:
#allow some flexibility
prob=1.0-((particle_distance-distance)/max_delta_distance)**(0.3)
if uniform(0.0,1.0)<prob: break
"""
protein1,residue1=first_site
protein2,residue2=second_site
print("CrossLinkDataBaseFromStructure.get_random_residue_pair:",
"First site",first_site,self.reactivity_dictionary[first_site],
"Second site",second_site,self.reactivity_dictionary[second_site])
particle_pair=IMP.get_particles(self.model,[self.protein_residue_dict[first_site],self.protein_residue_dict[second_site]])
particle_distance=IMP.core.get_distance(IMP.core.XYZ(particle_pair[0]),IMP.core.XYZ(particle_pair[1]))
if particle_distance<distance and (protein1,residue1) != (protein2,residue2):
break
elif particle_distance>=distance and (protein1,residue1) != (protein2,residue2) and max_delta_distance:
#allow some flexibility
#prob=1.0-((particle_distance-distance)/max_delta_distance)**(0.3)
#if uniform(0.0,1.0)<prob: break
if particle_distance-distance < max_delta_distance: break
else:
if not self.xwalk_interacting_pairs:
self.xwalk_interacting_pairs=self.get_xwalk_distances(xwalk_bin_path,distance)
interacting_pairs_weighted=[]
for pair in self.xwalk_interacting_pairs:
protein1=pair[0]
protein2=pair[1]
residue1=pair[2]
residue2=pair[3]
weight1=math.exp(-self.reactivity_dictionary[(protein1,residue1)]/self.kt)
weight2=math.exp(-self.reactivity_dictionary[(protein2,residue2)]/self.kt)
interacting_pairs_weighted.append((pair,weight1*weight2))
pair=self.weighted_choice(interacting_pairs_weighted)
protein1=pair[0]
protein2=pair[1]
residue1=pair[2]
residue2=pair[3]
particle_distance=float(pair[4])
return ((protein1,protein2,residue1,residue2)),particle_distance
def get_xwalk_distances(self,xwalk_bin_path,distance):
import IMP.pmi.output
import os
o=IMP.pmi.output.Output(atomistic=True)
o.init_pdb("xwalk.pdb",self.representation.prot)
o.write_pdb("xwalk.pdb")
namechainiddict=o.dictchain["xwalk.pdb"]
chainiddict={}
for key in namechainiddict: chainiddict[namechainiddict[key]]=key
xwalkout=os.popen('java -Xmx256m -cp ' + xwalk_bin_path +' Xwalk -infile xwalk.pdb -aa1 lys -aa2 lys -a1 cb -a2 cb -max '+str(distance)+' -bb').read()
output_list_of_distance=[]
for line in xwalkout.split("\n")[0:-2]:
tockens=line.split()
first=tockens[2]
second=tockens[3]
distance=float(tockens[6])
fs=first.split("-")
ss=second.split("-")
chainid1=fs[2]
chainid2=ss[2]
protein1=chainiddict[chainid1]
protein2=chainiddict[chainid2]
residue1=int(fs[1])
residue2=int(ss[1])
output_list_of_distance.append((protein1,protein2,residue1,residue2,distance))
return output_list_of_distance
def weighted_choice(self,choices):
import random
# from http://stackoverflow.com/questions/3679694/a-weighted-version-of-random-choice
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w > r:
return c
upto += w
assert False, "Shouldn't get here"
def save_rmf_snapshot(self,filename,color_id=None):
import IMP.rmf
import RMF
if color_id is None:
color_id="Reactivity"
sorted_ids=None
sorted_group_ids=sorted(self.cldb.data_base.keys())
list_of_pairs=[]
color_scores=[]
for group in sorted_group_ids:
group_xls=[]
group_dists_particles=[]
for xl in self.cldb.data_base[group]:
xllabel=self.cldb.get_short_cross_link_string(xl)
(c1,c2,r1,r2)=(xl["molecule_object1"],xl["molecule_object2"],xl[self.cldb.residue1_key],xl[self.cldb.residue2_key])
try:
index1=self.protein_residue_dict[(c1,r1)]
index2=self.protein_residue_dict[(c2,r2)]
p1,p2=IMP.get_particles(self.model,[index1])[0],IMP.get_particles(self.model,[index2])[0]
mdist=xl["TargetDistance"]
except TypeError:
print("TypeError or missing chain/residue ",r1,c1,r2,c2)
continue
group_dists_particles.append((mdist,p1,p2,xllabel,float(xl[color_id])))
if group_dists_particles:
(minmdist,minp1,minp2,minxllabel,mincolor_score)=min(group_dists_particles, key = lambda t: t[0])
color_scores.append(mincolor_score)
list_of_pairs.append((minp1,minp2,minxllabel,mincolor_score))
else:
continue
m=self.model
linear = IMP.core.Linear(0, 0.0)
linear.set_slope(1.0)
dps2 = IMP.core.DistancePairScore(linear)
rslin = IMP.RestraintSet(m, 'linear_dummy_restraints')
sgs=[]
offset=min(color_scores)
maxvalue=max(color_scores)
for pair in list_of_pairs:
pr = IMP.core.PairRestraint(m, dps2, (pair[0], pair[1]))
pr.set_name(pair[2])
factor=(pair[3]-offset)/(maxvalue-offset)
c=IMP.display.get_rgb_color(factor)
seg=IMP.algebra.Segment3D(IMP.core.XYZ(pair[0]).get_coordinates(),IMP.core.XYZ(pair[1]).get_coordinates())
rslin.add_restraint(pr)
sgs.append(IMP.display.SegmentGeometry(seg,c,pair[2]))
rh = RMF.create_rmf_file(filename)
IMP.rmf.add_hierarchies(rh, [self.system.hier])
IMP.rmf.add_restraints(rh,[rslin])
IMP.rmf.add_geometries(rh, sgs)
IMP.rmf.save_frame(rh)
del rh
| gpl-3.0 |
bgroveben/python3_machine_learning_projects | learn_kaggle/deep_learning/packages/learntools/advanced_pandas/grouping_and_sorting.py | 1 | 1704 | import pandas as pd
import seaborn as sns
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
def check_q1(ans):
expected = reviews.groupby('taster_twitter_handle').taster_twitter_handle.count()
return ans.equals(expected)
def answer_q1():
print("""reviews.groupby('taster_twitter_handle').taster_twitter_handle.count()""")
def check_q2(ans):
expected = reviews.groupby('price').points.max().sort_index()
return ans.equals(expected)
def answer_q2():
print("""reviews.groupby('price').points.max().sort_index()""")
def check_q3(ans):
expected = reviews.groupby('variety').price.agg([min, max])
return ans.equals(expected)
def answer_q3():
print("""reviews.groupby('variety').price.agg([min, max])""")
def check_q4(ans):
expected = reviews.groupby('taster_name').points.mean()
return ans.plot.bar() if ans.equals(expected) else False
def answer_q4():
print("""reviews.groupby('taster_name').points.mean()""")
def check_q5(ans):
expected = reviews.groupby('variety').price.agg([min, max]).sort_values(by=['min', 'max'], ascending=False)
return ans.head().plot.bar() if ans.head(10).equals(expected.head(10)) else False
def answer_q5():
print("""reviews.groupby('variety').price.agg([min, max]).sort_values(by=['min', 'max'], ascending=False)""")
def check_q6(ans):
expected = reviews.assign(n=0).groupby(['country', 'variety']).n.count().sort_values(ascending=False)
return ans.head(10).plot.bar() if ans.head(10).equals(expected.head(10)) else False
def answer_q6():
print("""reviews['n'] = 0
reviews.groupby(['country', 'variety']).n.count().sort_values(ascending=False)""")
| mit |
bikong2/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
ankonzoid/Deep-Reinforcement-Learning-Tutorials | advanced_ML/tree_ensembles/src/RandomForest.py | 1 | 1785 | """
RandomForestRegressor.py (author: Anson Wong / git: ankonzoid)
"""
import numpy as np
from sklearn.tree import DecisionTreeRegressor
class RandomForestTreeRegressor:
def __init__(self, n_estimators=20, max_depth=5,
min_samples_split=2, min_samples_leaf=1):
self.n_estimators = n_estimators
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
def fit(self, X, y):
"""
Method:
1) Create n_estimator tree estimators with random splitting
on d/3 max features
2) For each estimator i, sample (X, y) randomly N times with replacement
and train the estimator on this sampled dataset (X_i, y_i)
3) Predict by taking the mean predictions of each estimator
"""
self.models = []
N, d = X.shape
for i in range(self.n_estimators):
# Create tree with random splitting on d/3 max features
model = DecisionTreeRegressor(max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
splitter="random",
max_features=d/3)
# Bagging procedure
idx_sample = np.random.choice(N, N) # random sampling of length N
model.fit(X[idx_sample], y[idx_sample]) # fit on random sampling
self.models.append(model)
def predict(self, X):
y_pred = np.zeros(len(X))
for model in self.models:
y_pred += model.predict(X)
y_pred /= self.n_estimators
return y_pred | mit |
cybernet14/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
jkarnows/scikit-learn | sklearn/decomposition/dict_learning.py | 83 | 44062 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
davidgbe/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
sandeep-n/incubator-systemml | src/main/python/systemml/defmatrix.py | 3 | 46772 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = [ 'setSparkContext', 'matrix', 'eval', 'solve', 'DMLOp', 'set_lazy', 'debug_array_conversion', 'load', 'full', 'seq' ]
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix, spmatrix
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.sql import DataFrame, SparkSession
import pyspark.mllib.common
except ImportError:
raise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
from . import MLContext, pydml, _java2py, Matrix
from .converters import *
def setSparkContext(sc):
"""
Before using the matrix, the user needs to invoke this function if SparkContext is not previously created in the session.
Parameters
----------
sc: SparkContext
SparkContext
"""
matrix.sc = sc
matrix.sparkSession = SparkSession.builder.getOrCreate()
matrix.ml = MLContext(matrix.sc)
def check_MLContext():
if matrix.ml is None:
if SparkContext._active_spark_context is not None:
setSparkContext(SparkContext._active_spark_context)
else:
raise Exception('Expected setSparkContext(sc) to be called, where sc is active SparkContext.')
########################## AST related operations ##################################
class DMLOp(object):
"""
Represents an intermediate node of Abstract syntax tree created to generate the PyDML script
"""
def __init__(self, inputs, dml=None):
self.inputs = inputs
self.dml = dml
self.ID = None
self.depth = 1
for m in self.inputs:
m.referenced = m.referenced + [ self ]
if isinstance(m, matrix) and m.op is not None:
self.depth = max(self.depth, m.op.depth + 1)
MAX_DEPTH = 0
def _visit(self, execute=True):
matrix.dml = matrix.dml + self.dml
def _print_ast(self, numSpaces):
ret = []
for m in self.inputs:
ret = [ m._print_ast(numSpaces+2) ]
return ''.join(ret)
# Special object used internally to specify the placeholder which will be replaced by output ID
# This helps to provide dml containing output ID in construct_intermediate_node
OUTPUT_ID = '$$OutputID$$'
def set_lazy(isLazy):
"""
This method allows users to set whether the matrix operations should be executed in lazy manner.
Parameters
----------
isLazy: True if matrix operations should be evaluated in lazy manner.
"""
if isLazy:
DMLOp.MAX_DEPTH = 0
else:
DMLOp.MAX_DEPTH = 1
def construct_intermediate_node(inputs, dml):
"""
Convenient utility to create an intermediate node of AST.
Parameters
----------
inputs = list of input matrix objects and/or DMLOp
dml = list of DML string (which will be eventually joined before execution). To specify out.ID, please use the placeholder
"""
dmlOp = DMLOp(inputs)
out = matrix(None, op=dmlOp)
dmlOp.dml = [out.ID if x==OUTPUT_ID else x for x in dml]
if DMLOp.MAX_DEPTH > 0 and out.op.depth >= DMLOp.MAX_DEPTH:
out.eval()
return out
def load(file, format='csv'):
"""
Allows user to load a matrix from filesystem
Parameters
----------
file: filepath
format: can be csv, text or binary or mm
"""
return construct_intermediate_node([], [OUTPUT_ID, ' = load(\"', file, '\", format=\"', format, '\")\n'])
def full(shape, fill_value):
"""
Return a new array of given shape filled with fill_value.
Parameters
----------
shape: tuple of length 2
fill_value: float or int
"""
return construct_intermediate_node([], [OUTPUT_ID, ' = full(', str(fill_value), ', rows=', str(shape[0]), ', cols=', str(shape[1]), ')\n'])
def reset():
"""
Resets the visited status of matrix and the operators in the generated AST.
"""
for m in matrix.visited:
m.visited = False
matrix.visited = []
matrix.ml = MLContext(matrix.sc)
matrix.dml = []
matrix.script = pydml('')
def perform_dfs(outputs, execute):
"""
Traverses the forest of nodes rooted at outputs nodes and returns the DML script to execute
"""
for m in outputs:
m.output = True
m._visit(execute=execute)
return ''.join(matrix.dml)
###############################################################################
########################## Utility functions ##################################
def _log_base(val, base):
if not isinstance(val, str):
raise ValueError('The val to _log_base should be of type string')
return '(log(' + val + ')/log(' + str(base) + '))'
def _matricize(lhs, inputs):
"""
Utility fn to convert the supported types to matrix class or to string (if float or int)
and return the string to be passed to DML as well as inputs
"""
if isinstance(lhs, SUPPORTED_TYPES):
lhs = matrix(lhs)
if isinstance(lhs, matrix):
lhsStr = lhs.ID
inputs = inputs + [lhs]
elif isinstance(lhs, float) or isinstance(lhs, int):
lhsStr = str(lhs)
else:
raise TypeError('Incorrect type')
return lhsStr, inputs
def binary_op(lhs, rhs, opStr):
"""
Common function called by all the binary operators in matrix class
"""
inputs = []
lhsStr, inputs = _matricize(lhs, inputs)
rhsStr, inputs = _matricize(rhs, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, opStr, rhsStr, '\n'])
def binaryMatrixFunction(X, Y, fnName):
"""
Common function called by supported PyDML built-in function that has two arguments.
"""
inputs = []
lhsStr, inputs = _matricize(X, inputs)
rhsStr, inputs = _matricize(Y, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', fnName,'(', lhsStr, ', ', rhsStr, ')\n'])
def unaryMatrixFunction(X, fnName):
"""
Common function called by supported PyDML built-in function that has one argument.
"""
inputs = []
lhsStr, inputs = _matricize(X, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', fnName,'(', lhsStr, ')\n'])
def seq(start=None, stop=None, step=1):
"""
Creates a single column vector with values starting from <start>, to <stop>, in increments of <step>.
Note: Unlike Numpy's arange which returns a row-vector, this returns a column vector.
Also, Unlike Numpy's arange which doesnot include stop, this method includes stop in the interval.
Parameters
----------
start: int or float [Optional: default = 0]
stop: int or float
step : int float [Optional: default = 1]
"""
if start is None and stop is None:
raise ValueError('Both start and stop cannot be None')
elif start is not None and stop is None:
stop = start
start = 0
return construct_intermediate_node([], [OUTPUT_ID, ' = seq(', str(start), ',', str(stop), ',', str(step), ')\n'])
# utility function that converts 1:3 into DML string
def convert_seq_to_dml(s):
ret = []
if s is None:
return ''
elif isinstance(s, slice):
if s.step is not None:
raise ValueError('Slicing with step is not supported.')
if s.start is None:
ret = ret + [ '0 : ' ]
else:
ret = ret + [ getValue(s.start), ':' ]
if s.start is None:
ret = ret + [ '' ]
else:
ret = ret + [ getValue(s.stop) ]
else:
ret = ret + [ getValue(s) ]
return ''.join(ret)
# utility function that converts index (such as [1, 2:3]) into DML string
def getIndexingDML(index):
ret = [ '[' ]
if isinstance(index, tuple) and len(index) == 1:
ret = ret + [ convert_seq_to_dml(index[0]), ',' ]
elif isinstance(index, tuple) and len(index) == 2:
ret = ret + [ convert_seq_to_dml(index[0]), ',', convert_seq_to_dml(index[1]) ]
else:
raise TypeError('matrix indexes can only be tuple of length 2. For example: m[1,1], m[0:1,], m[:, 0:1]')
return ret + [ ']' ]
def convert_outputs_to_list(outputs):
if isinstance(outputs, matrix):
return [ outputs ]
elif isinstance(outputs, list):
for o in outputs:
if not isinstance(o, matrix):
raise TypeError('Only matrix or list of matrix allowed')
return outputs
else:
raise TypeError('Only matrix or list of matrix allowed')
def reset_output_flag(outputs):
for m in outputs:
m.output = False
###############################################################################
########################## Global user-facing functions #######################
def solve(A, b):
"""
Computes the least squares solution for system of linear equations A %*% x = b
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> import SystemML as sml
>>> from pyspark.sql import SparkSession
>>> diabetes = datasets.load_diabetes()
>>> diabetes_X = diabetes.data[:, np.newaxis, 2]
>>> X_train = diabetes_X[:-20]
>>> X_test = diabetes_X[-20:]
>>> y_train = diabetes.target[:-20]
>>> y_test = diabetes.target[-20:]
>>> sml.setSparkContext(sc)
>>> X = sml.matrix(X_train)
>>> y = sml.matrix(y_train)
>>> A = X.transpose().dot(X)
>>> b = X.transpose().dot(y)
>>> beta = sml.solve(A, b).toNumPy()
>>> y_predicted = X_test.dot(beta)
>>> print('Residual sum of squares: %.2f' % np.mean((y_predicted - y_test) ** 2))
Residual sum of squares: 25282.12
"""
return binaryMatrixFunction(A, b, 'solve')
def eval(outputs, execute=True):
"""
Executes the unevaluated DML script and computes the matrices specified by outputs.
Parameters
----------
outputs: list of matrices or a matrix object
execute: specified whether to execute the unevaluated operation or just return the script.
"""
check_MLContext()
reset()
outputs = convert_outputs_to_list(outputs)
matrix.script.scriptString = perform_dfs(outputs, execute)
if not execute:
reset_output_flag(outputs)
return matrix.script.scriptString
results = matrix.ml.execute(matrix.script)
for m in outputs:
m.eval_data = results._java_results.get(m.ID)
reset_output_flag(outputs)
def debug_array_conversion(throwError):
matrix.THROW_ARRAY_CONVERSION_ERROR = throwError
def _get_new_var_id():
matrix.systemmlVarID += 1
return 'mVar' + str(matrix.systemmlVarID)
###############################################################################
class matrix(object):
"""
matrix class is a python wrapper that implements basic matrix operators, matrix functions
as well as converters to common Python types (for example: Numpy arrays, PySpark DataFrame
and Pandas DataFrame).
The operators supported are:
1. Arithmetic operators: +, -, *, /, //, %, ** as well as dot (i.e. matrix multiplication)
2. Indexing in the matrix
3. Relational/Boolean operators: <, <=, >, >=, ==, !=, &, |
In addition, following functions are supported for matrix:
1. transpose
2. Aggregation functions: sum, mean, var, sd, max, min, argmin, argmax, cumsum
3. Global statistical built-In functions: exp, log, abs, sqrt, round, floor, ceil, sin, cos, tan, asin, acos, atan, sign, solve
For all the above functions, we always return a two dimensional matrix, especially for aggregation functions with axis.
For example: Assuming m1 is a matrix of (3, n), NumPy returns a 1d vector of dimension (3,) for operation m1.sum(axis=1)
whereas SystemML returns a 2d matrix of dimension (3, 1).
Note: an evaluated matrix contains a data field computed by eval method as DataFrame or NumPy array.
Examples
--------
>>> import SystemML as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
Welcome to Apache SystemML!
>>> m1 = sml.matrix(np.ones((3,3)) + 2)
>>> m2 = sml.matrix(np.ones((3,3)) + 3)
>>> m2 = m1 * (m2 + m1)
>>> m4 = 1.0 - m2
>>> m4
# This matrix (mVar5) is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.
mVar1 = load(" ", format="csv")
mVar2 = load(" ", format="csv")
mVar3 = mVar2 + mVar1
mVar4 = mVar1 * mVar3
mVar5 = 1.0 - mVar4
save(mVar5, " ")
>>> m2.eval()
>>> m2
# This matrix (mVar4) is backed by NumPy array. To fetch the NumPy array, invoke toNumPy() method.
>>> m4
# This matrix (mVar5) is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.
mVar4 = load(" ", format="csv")
mVar5 = 1.0 - mVar4
save(mVar5, " ")
>>> m4.sum(axis=1).toNumPy()
array([[-60.],
[-60.],
[-60.]])
Design Decisions:
1. Until eval() method is invoked, we create an AST (not exposed to the user) that consist of unevaluated operations and data required by those operations.
As an anology, a spark user can treat eval() method similar to calling RDD.persist() followed by RDD.count().
2. The AST consist of two kinds of nodes: either of type matrix or of type DMLOp.
Both these classes expose _visit method, that helps in traversing the AST in DFS manner.
3. A matrix object can either be evaluated or not.
If evaluated, the attribute 'data' is set to one of the supported types (for example: NumPy array or DataFrame). In this case, the attribute 'op' is set to None.
If not evaluated, the attribute 'op' which refers to one of the intermediate node of AST and if of type DMLOp. In this case, the attribute 'data' is set to None.
5. DMLOp has an attribute 'inputs' which contains list of matrix objects or DMLOp.
6. To simplify the traversal, every matrix object is considered immutable and an matrix operations creates a new matrix object.
As an example:
`m1 = sml.matrix(np.ones((3,3)))` creates a matrix object backed by 'data=(np.ones((3,3))'.
`m1 = m1 * 2` will create a new matrix object which is now backed by 'op=DMLOp( ... )' whose input is earlier created matrix object.
7. Left indexing (implemented in __setitem__ method) is a special case, where Python expects the existing object to be mutated.
To ensure the above property, we make deep copy of existing object and point any references to the left-indexed matrix to the newly created object.
Then the left-indexed matrix is set to be backed by DMLOp consisting of following pydml:
left-indexed-matrix = new-deep-copied-matrix
left-indexed-matrix[index] = value
8. Please use m.print_ast() and/or type `m` for debugging. Here is a sample session:
>>> npm = np.ones((3,3))
>>> m1 = sml.matrix(npm + 3)
>>> m2 = sml.matrix(npm + 5)
>>> m3 = m1 + m2
>>> m3
mVar2 = load(" ", format="csv")
mVar1 = load(" ", format="csv")
mVar3 = mVar1 + mVar2
save(mVar3, " ")
>>> m3.print_ast()
- [mVar3] (op).
- [mVar1] (data).
- [mVar2] (data).
"""
# Global variable that is used to keep track of intermediate matrix variables in the DML script
systemmlVarID = 0
# Since joining of string is expensive operation, we collect the set of strings into list and then join
# them before execution: See matrix.script.scriptString = ''.join(matrix.dml) in eval() method
dml = []
# Represents MLContext's script object
script = None
# Represents MLContext object
ml = None
# Contains list of nodes visited in Abstract Syntax Tree. This helps to avoid computation of matrix objects
# that have been previously evaluated.
visited = []
def __init__(self, data, op=None):
"""
Constructs a lazy matrix
Parameters
----------
data: NumPy ndarray, Pandas DataFrame, scipy sparse matrix or PySpark DataFrame. (data cannot be None for external users, 'data=None' is used internally for lazy evaluation).
"""
self.dtype = np.double
check_MLContext()
self.visited = False
self.output = False
self.ID = _get_new_var_id()
self.referenced = []
# op refers to the node of Abstract Syntax Tree created internally for lazy evaluation
self.op = op
self.eval_data = data
self._shape = None
if isinstance(data, SUPPORTED_TYPES):
self._shape = data.shape
if not (isinstance(data, SUPPORTED_TYPES) or hasattr(data, '_jdf') or (data is None and op is not None)):
raise TypeError('Unsupported input type')
def eval(self):
"""
This is a convenience function that calls the global eval method
"""
eval([self])
def toPandas(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into Pandas DataFrame.
"""
self.eval()
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
self.eval_data = convertToPandasDF(self.eval_data)
return self.eval_data
def toNumPy(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into NumPy array.
"""
self.eval()
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
return self.eval_data
if isinstance(self.eval_data, pd.DataFrame):
self.eval_data = self.eval_data.as_matrix()
elif isinstance(self.eval_data, DataFrame):
self.eval_data = self.eval_data.toPandas().as_matrix()
elif isinstance(self.eval_data, spmatrix):
self.eval_data = self.eval_data.toarray()
elif isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
# Always keep default format as NumPy array if possible
return self.eval_data
def toDF(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into DataFrame.
"""
if isinstance(self.eval_data, DataFrame):
return self.eval_data
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toDF()
return self.eval_data
self.eval_data = matrix.sparkSession.createDataFrame(self.toPandas())
return self.eval_data
def save(self, file, format='csv'):
"""
Allows user to save a matrix to filesystem
Parameters
----------
file: filepath
format: can be csv, text or binary or mm
"""
tmp = construct_intermediate_node([self], ['save(', self.ID , ',\"', file, '\", format=\"', format, '\")\n'])
construct_intermediate_node([tmp], [OUTPUT_ID, ' = full(0, rows=1, cols=1)\n']).eval()
def _mark_as_visited(self):
self.visited = True
# for cleanup
matrix.visited = matrix.visited + [ self ]
return self
def _register_as_input(self, execute):
# TODO: Remove this when automatic registration of frame is resolved
matrix.dml = [ self.ID, ' = load(\" \", format=\"csv\")\n'] + matrix.dml
if isinstance(self.eval_data, SUPPORTED_TYPES) and execute:
matrix.script.input(self.ID, convertToMatrixBlock(matrix.sc, self.eval_data))
elif execute:
matrix.script.input(self.ID, self.toDF())
return self
def _register_as_output(self, execute):
# TODO: Remove this when automatic registration of frame is resolved
matrix.dml = matrix.dml + ['save(', self.ID, ', \" \")\n']
if execute:
matrix.script.output(self.ID)
def _visit(self, execute=True):
"""
This function is called for two scenarios:
1. For printing the PyDML script which has not yet been evaluated (execute=False). See '__repr__' method.
2. Called as part of 'eval' method (execute=True). In this scenario, it builds the PyDML script by visiting itself
and its child nodes. Also, it does appropriate registration as input or output that is required by MLContext.
"""
if self.visited:
return self
self._mark_as_visited()
if self.eval_data is not None:
self._register_as_input(execute)
elif self.op is not None:
# Traverse the AST
for m in self.op.inputs:
m._visit(execute=execute)
self.op._visit(execute=execute)
else:
raise Exception('Expected either op or data to be set')
if self.eval_data is None and self.output:
self._register_as_output(execute)
return self
def print_ast(self):
"""
Please use m.print_ast() and/or type `m` for debugging. Here is a sample session:
>>> npm = np.ones((3,3))
>>> m1 = sml.matrix(npm + 3)
>>> m2 = sml.matrix(npm + 5)
>>> m3 = m1 + m2
>>> m3
mVar2 = load(" ", format="csv")
mVar1 = load(" ", format="csv")
mVar3 = mVar1 + mVar2
save(mVar3, " ")
>>> m3.print_ast()
- [mVar3] (op).
- [mVar1] (data).
- [mVar2] (data).
"""
return self._print_ast(0)
def _print_ast(self, numSpaces):
head = ''.join([ ' ' ]*numSpaces + [ '- [', self.ID, '] ' ])
if self.eval_data is not None:
out = head + '(data).\n'
elif self.op is not None:
ret = [ head, '(op).\n' ]
for m in self.op.inputs:
ret = ret + [ m._print_ast(numSpaces + 2) ]
out = ''.join(ret)
else:
raise ValueError('Either op or data needs to be set')
if numSpaces == 0:
print(out)
else:
return out
def __repr__(self):
"""
This function helps to debug matrix class and also examine the generated PyDML script
"""
if self.eval_data is None:
print('# This matrix (' + self.ID + ') is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.\n' + eval([self], execute=False))
else:
print('# This matrix (' + self.ID + ') is backed by ' + str(type(self.eval_data)) + '. To fetch the DataFrame or NumPy array, invoke toDF() or toNumPy() method respectively.')
return ''
######################### NumPy related methods ######################################
__array_priority__ = 10.2
ndim = 2
THROW_ARRAY_CONVERSION_ERROR = False
def __array__(self, dtype=np.double):
"""
As per NumPy from Python,
This method is called to obtain an ndarray object when needed. You should always guarantee this returns an actual ndarray object.
Using this method, you get back a ndarray object, and subsequent operations on the returned ndarray object will be singlenode.
"""
if not isinstance(self.eval_data, SUPPORTED_TYPES):
# Only warn if there is an unevaluated operation (which could potentially generate large matrix or if data is non-supported singlenode formats)
import inspect
frame,filename,line_number,function_name,lines,index = inspect.stack()[1]
msg = 'Conversion from SystemML matrix to NumPy array (occurs in ' + str(filename) + ':' + str(line_number) + ' ' + function_name + ")"
if matrix.THROW_ARRAY_CONVERSION_ERROR:
raise Exception('[ERROR]:' + msg)
else:
print('[WARN]:' + msg)
return np.array(self.toNumPy(), dtype)
def astype(self, t):
# TODO: Throw error if incorrect type
return self
def asfptype(self):
return self
def set_shape(self,shape):
raise NotImplementedError('Reshaping is not implemented')
def get_shape(self):
if self._shape is None:
lhsStr, inputs = _matricize(self, [])
rlen_ID = _get_new_var_id()
clen_ID = _get_new_var_id()
multiline_dml = [rlen_ID, ' = ', lhsStr, '.shape(0)\n']
multiline_dml = multiline_dml + [clen_ID, ' = ', lhsStr, '.shape(1)\n']
multiline_dml = multiline_dml + [OUTPUT_ID, ' = full(0, rows=2, cols=1)\n']
multiline_dml = multiline_dml + [ OUTPUT_ID, '[0,0] = ', rlen_ID, '\n' ]
multiline_dml = multiline_dml + [ OUTPUT_ID, '[1,0] = ', clen_ID, '\n' ]
ret = construct_intermediate_node(inputs, multiline_dml).toNumPy()
self._shape = tuple(np.array(ret, dtype=int).flatten())
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
"""
This function enables systemml matrix to be compatible with NumPy's ufuncs.
Parameters
----------
func: ufunc object that was called.
method: string indicating which Ufunc method was called (one of "__call__", "reduce", "reduceat", "accumulate", "outer", "inner").
pos: index of self in inputs.
inputs: tuple of the input arguments to the ufunc
kwargs: dictionary containing the optional input arguments of the ufunc.
"""
if method != '__call__' or kwargs:
return NotImplemented
if func in matrix._numpy_to_systeml_mapping:
fn = matrix._numpy_to_systeml_mapping[func]
else:
return NotImplemented
if len(inputs) == 2:
return fn(inputs[0], inputs[1])
elif len(inputs) == 1:
return fn(inputs[0])
else:
raise ValueError('Unsupported number of inputs')
def hstack(self, other):
"""
Stack matrices horizontally (column wise). Invokes cbind internally.
"""
return binaryMatrixFunction(self, other, 'cbind')
def vstack(self, other):
"""
Stack matrices vertically (row wise). Invokes rbind internally.
"""
return binaryMatrixFunction(self, other, 'rbind')
######################### Arithmetic operators ######################################
def negative(self):
lhsStr, inputs = _matricize(self, [])
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = -', lhsStr, '\n'])
def remainder(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = floor(', lhsStr, '/', rhsStr, ') * ', rhsStr, '\n'])
def ldexp(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, '* (2**', rhsStr, ')\n'])
def mod(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, ' - floor(', lhsStr, '/', rhsStr, ') * ', rhsStr, '\n'])
def logaddexp(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = log(exp(', lhsStr, ') + exp(', rhsStr, '))\n'])
def logaddexp2(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
opStr = _log_base('2**' + lhsStr + '2**' + rhsStr, 2)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', opStr, '\n'])
def log1p(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = log(1 + ', lhsStr, ')\n'])
def exp(self):
return unaryMatrixFunction(self, 'exp')
def exp2(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = 2**', lhsStr, '\n'])
def square(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, '**2\n'])
def reciprocal(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = 1/', lhsStr, '\n'])
def expm1(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = exp(', lhsStr, ') - 1\n'])
def ones_like(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rlen = lhsStr + '.shape(axis=0)'
clen = lhsStr + '.shape(axis=1)'
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = full(1, rows=', rlen, ', cols=', clen, ')\n'])
def zeros_like(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rlen = lhsStr + '.shape(axis=0)'
clen = lhsStr + '.shape(axis=1)'
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = full(0, rows=', rlen, ', cols=', clen, ')\n'])
def log2(self):
return self.log(2)
def log10(self):
return self.log(10)
def log(self, y=None):
if y is None:
return unaryMatrixFunction(self, 'log')
else:
return binaryMatrixFunction(self, y, 'log')
def abs(self):
return unaryMatrixFunction(self, 'abs')
def sqrt(self):
return unaryMatrixFunction(self, 'sqrt')
def round(self):
return unaryMatrixFunction(self, 'round')
def floor(self):
return unaryMatrixFunction(self, 'floor')
def ceil(self):
return unaryMatrixFunction(self, 'ceil')
def sin(self):
return unaryMatrixFunction(self, 'sin')
def cos(self):
return unaryMatrixFunction(self, 'cos')
def tan(self):
return unaryMatrixFunction(self, 'tan')
def arcsin(self):
return self.asin()
def arccos(self):
return self.acos()
def arctan(self):
return self.atan()
def asin(self):
return unaryMatrixFunction(self, 'asin')
def acos(self):
return unaryMatrixFunction(self, 'acos')
def atan(self):
return unaryMatrixFunction(self, 'atan')
def rad2deg(self):
"""
Convert angles from radians to degrees.
"""
inputs = []
lhsStr, inputs = _matricize(self, inputs)
# 180/pi = 57.2957795131
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, '*57.2957795131\n'])
def deg2rad(self):
"""
Convert angles from degrees to radians.
"""
inputs = []
lhsStr, inputs = _matricize(self, inputs)
# pi/180 = 0.01745329251
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, '*0.01745329251\n'])
def sign(self):
return unaryMatrixFunction(self, 'sign')
def __add__(self, other):
return binary_op(self, other, ' + ')
def __sub__(self, other):
return binary_op(self, other, ' - ')
def __mul__(self, other):
return binary_op(self, other, ' * ')
def __floordiv__(self, other):
return binary_op(self, other, ' // ')
def __div__(self, other):
"""
Performs division (Python 2 way).
"""
return binary_op(self, other, ' / ')
def __truediv__(self, other):
"""
Performs division (Python 3 way).
"""
return binary_op(self, other, ' / ')
def __mod__(self, other):
return binary_op(self, other, ' % ')
def __pow__(self, other):
return binary_op(self, other, ' ** ')
def __radd__(self, other):
return binary_op(other, self, ' + ')
def __rsub__(self, other):
return binary_op(other, self, ' - ')
def __rmul__(self, other):
return binary_op(other, self, ' * ')
def __rfloordiv__(self, other):
return binary_op(other, self, ' // ')
def __rdiv__(self, other):
return binary_op(other, self, ' / ')
def __rtruediv__(self, other):
"""
Performs division (Python 3 way).
"""
return binary_op(other, self, ' / ')
def __rmod__(self, other):
return binary_op(other, self, ' % ')
def __rpow__(self, other):
return binary_op(other, self, ' ** ')
def dot(self, other):
"""
Numpy way of performing matrix multiplication
"""
return binaryMatrixFunction(self, other, 'dot')
def __matmul__(self, other):
"""
Performs matrix multiplication (infix operator: @). See PEP 465)
"""
return binaryMatrixFunction(self, other, 'dot')
######################### Relational/Boolean operators ######################################
def __lt__(self, other):
return binary_op(self, other, ' < ')
def __le__(self, other):
return binary_op(self, other, ' <= ')
def __gt__(self, other):
return binary_op(self, other, ' > ')
def __ge__(self, other):
return binary_op(self, other,' >= ')
def __eq__(self, other):
return binary_op(self, other, ' == ')
def __ne__(self, other):
return binary_op(self, other, ' != ')
# TODO: Cast the output back into scalar and return boolean results
def __and__(self, other):
return binary_op(other, self, ' & ')
def __or__(self, other):
return binary_op(other, self, ' | ')
def logical_not(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = !', lhsStr, '\n'])
def remove_empty(self, axis=None):
"""
Removes all empty rows or columns from the input matrix target X according to specified axis.
Parameters
----------
axis : int (0 or 1)
"""
if axis is None:
raise ValueError('axis is a mandatory argument for remove_empty')
if axis == 0:
return self._parameterized_helper_fn(self, 'removeEmpty', { 'target':self, 'margin':'rows' })
elif axis == 1:
return self._parameterized_helper_fn(self, 'removeEmpty', { 'target':self, 'margin':'cols' })
else:
raise ValueError('axis for remove_empty needs to be either 0 or 1.')
def replace(self, pattern=None, replacement=None):
"""
Removes all empty rows or columns from the input matrix target X according to specified axis.
Parameters
----------
pattern : float or int
replacement : float or int
"""
if pattern is None or not isinstance(pattern, (float, int)):
raise ValueError('pattern should be of type float or int')
if replacement is None or not isinstance(replacement, (float, int)):
raise ValueError('replacement should be of type float or int')
return self._parameterized_helper_fn(self, 'replace', { 'target':self, 'pattern':pattern, 'replacement':replacement })
def _parameterized_helper_fn(self, fnName, **kwargs):
"""
Helper to invoke parameterized builtin function
"""
dml_script = ''
lhsStr, inputs = _matricize(self, [])
dml_script = [OUTPUT_ID, ' = ', fnName, '(', lhsStr ]
first_arg = True
for key in kwargs:
if first_arg:
first_arg = False
else:
dml_script = dml_script + [ ', ' ]
v = kwargs[key]
if isinstance(v, str):
dml_script = dml_script + [key, '=\"', v, '\"' ]
elif isinstance(v, matrix):
dml_script = dml_script + [key, '=', v.ID]
else:
dml_script = dml_script + [key, '=', str(v) ]
dml_script = dml_script + [ ')\n' ]
return construct_intermediate_node(inputs, dml_script)
######################### Aggregation functions ######################################
def prod(self):
"""
Return the product of all cells in matrix
"""
return self._aggFn('prod', None)
def sum(self, axis=None):
"""
Compute the sum along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('sum', axis)
def mean(self, axis=None):
"""
Compute the arithmetic mean along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('mean', axis)
def var(self, axis=None):
"""
Compute the variance along the specified axis.
We assume that delta degree of freedom is 1 (unlike NumPy which assumes ddof=0).
Parameters
----------
axis : int, optional
"""
return self._aggFn('var', axis)
def moment(self, moment=1, axis=None):
"""
Calculates the nth moment about the mean
Parameters
----------
moment : int
can be 1, 2, 3 or 4
axis : int, optional
"""
if moment == 1:
return self.mean(axis)
elif moment == 2:
return self.var(axis)
elif moment == 3 or moment == 4:
return self._moment_helper(moment, axis)
else:
raise ValueError('The specified moment is not supported:' + str(moment))
def _moment_helper(self, k, axis=0):
dml_script = ''
lhsStr, inputs = _matricize(self, [])
dml_script = [OUTPUT_ID, ' = moment(', lhsStr, ', ', str(k), ')\n' ]
dml_script = [OUTPUT_ID, ' = moment(', lhsStr, ', ', str(k), ')\n' ]
if axis is None:
dml_script = [OUTPUT_ID, ' = moment(full(', lhsStr, ', rows=length(', lhsStr, '), cols=1), ', str(k), ')\n' ]
elif axis == 0:
dml_script = [OUTPUT_ID, ' = full(0, rows=nrow(', lhsStr, '), cols=1)\n' ]
dml_script = dml_script + [ 'parfor(i in 1:nrow(', lhsStr, '), check=0):\n' ]
dml_script = dml_script + [ '\t', OUTPUT_ID, '[i-1, 0] = moment(full(', lhsStr, '[i-1,], rows=ncol(', lhsStr, '), cols=1), ', str(k), ')\n\n' ]
elif axis == 1:
dml_script = [OUTPUT_ID, ' = full(0, rows=1, cols=ncol(', lhsStr, '))\n' ]
dml_script = dml_script + [ 'parfor(i in 1:ncol(', lhsStr, '), check=0):\n' ]
dml_script = dml_script + [ '\t', OUTPUT_ID, '[0, i-1] = moment(', lhsStr, '[,i-1], ', str(k), ')\n\n' ]
else:
raise ValueError('Incorrect axis:' + axis)
return construct_intermediate_node(inputs, dml_script)
def sd(self, axis=None):
"""
Compute the standard deviation along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('sd', axis)
def max(self, other=None, axis=None):
"""
Compute the maximum value along the specified axis
Parameters
----------
other: matrix or numpy array (& other supported types) or scalar
axis : int, optional
"""
if other is not None and axis is not None:
raise ValueError('Both axis and other cannot be not None')
elif other is None and axis is not None:
return self._aggFn('max', axis)
else:
return binaryMatrixFunction(self, other, 'max')
def min(self, other=None, axis=None):
"""
Compute the minimum value along the specified axis
Parameters
----------
other: matrix or numpy array (& other supported types) or scalar
axis : int, optional
"""
if other is not None and axis is not None:
raise ValueError('Both axis and other cannot be not None')
elif other is None and axis is not None:
return self._aggFn('min', axis)
else:
return binaryMatrixFunction(self, other, 'min')
def argmin(self, axis=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
axis : int, optional (only axis=1, i.e. rowIndexMax is supported in this version)
"""
return self._aggFn('argmin', axis)
def argmax(self, axis=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
axis : int, optional (only axis=1, i.e. rowIndexMax is supported in this version)
"""
return self._aggFn('argmax', axis)
def cumsum(self, axis=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
axis : int, optional (only axis=0, i.e. cumsum along the rows is supported in this version)
"""
return self._aggFn('cumsum', axis)
def transpose(self):
"""
Transposes the matrix.
"""
return self._aggFn('transpose', None)
def trace(self):
"""
Return the sum of the cells of the main diagonal square matrix
"""
return self._aggFn('trace', None)
def _aggFn(self, fnName, axis):
"""
Common function that is called for functions that have axis as parameter.
"""
dml_script = ''
lhsStr, inputs = _matricize(self, [])
if axis is None:
dml_script = [OUTPUT_ID, ' = ', fnName, '(', lhsStr, ')\n']
else:
dml_script = [OUTPUT_ID, ' = ', fnName, '(', lhsStr, ', axis=', str(axis) ,')\n']
return construct_intermediate_node(inputs, dml_script)
######################### Indexing operators ######################################
def __getitem__(self, index):
"""
Implements evaluation of right indexing operations such as m[1,1], m[0:1,], m[:, 0:1]
"""
return construct_intermediate_node([self], [OUTPUT_ID, ' = ', self.ID ] + getIndexingDML(index) + [ '\n' ])
# Performs deep copy if the matrix is backed by data
def _prepareForInPlaceUpdate(self):
temp = matrix(self.eval_data, op=self.op)
for op in self.referenced:
op.inputs = [temp if x.ID==self.ID else x for x in op.inputs]
self.ID, temp.ID = temp.ID, self.ID # Copy even the IDs as the IDs might be used to create DML
self.op = DMLOp([temp], dml=[self.ID, " = ", temp.ID])
self.eval_data = None
temp.referenced = self.referenced + [ self.op ]
self.referenced = []
def __setitem__(self, index, value):
"""
Implements evaluation of left indexing operations such as m[1,1]=2
"""
self._prepareForInPlaceUpdate()
if isinstance(value, matrix) or isinstance(value, DMLOp):
self.op.inputs = self.op.inputs + [ value ]
if isinstance(value, matrix):
value.referenced = value.referenced + [ self.op ]
self.op.dml = self.op.dml + [ '\n', self.ID ] + getIndexingDML(index) + [ ' = ', getValue(value), '\n']
# Not implemented: conj, hyperbolic/inverse-hyperbolic functions(i.e. sinh, arcsinh, cosh, ...), bitwise operator, xor operator, isreal, iscomplex, isfinite, isinf, isnan, copysign, nextafter, modf, frexp, trunc
_numpy_to_systeml_mapping = {np.add: __add__, np.subtract: __sub__, np.multiply: __mul__, np.divide: __div__, np.logaddexp: logaddexp, np.true_divide: __truediv__, np.floor_divide: __floordiv__, np.negative: negative, np.power: __pow__, np.remainder: remainder, np.mod: mod, np.fmod: __mod__, np.absolute: abs, np.rint: round, np.sign: sign, np.exp: exp, np.exp2: exp2, np.log: log, np.log2: log2, np.log10: log10, np.expm1: expm1, np.log1p: log1p, np.sqrt: sqrt, np.square: square, np.reciprocal: reciprocal, np.ones_like: ones_like, np.zeros_like: zeros_like, np.sin: sin, np.cos: cos, np.tan: tan, np.arcsin: arcsin, np.arccos: arccos, np.arctan: arctan, np.deg2rad: deg2rad, np.rad2deg: rad2deg, np.greater: __gt__, np.greater_equal: __ge__, np.less: __lt__, np.less_equal: __le__, np.not_equal: __ne__, np.equal: __eq__, np.logical_not: logical_not, np.logical_and: __and__, np.logical_or: __or__, np.maximum: max, np.minimum: min, np.signbit: sign, np.ldexp: ldexp, np.dot:dot}
| apache-2.0 |
pompiduskus/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
sinhrks/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 168 | 2088 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
low-sky/simscript | postproc/makefits.py | 1 | 3190 | #----------------------------------------------------------
# Python Routine to produce FITS files from RADMC output
#
# two fits files are produced: 1) PPV, and 2) the mom0 map
# units of the PPV pixels are 'erg/s/cm^2/Hz/ster',
# and that of the mom0 map is in K km/s (e.g. Wco)
#
# Created June 7, 2013
#
# toK -- Binary flag to write out a PPV data cube in K
#----------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import numpy as np
import readRADMC as RADMC
import astropy.io.fits as pyfits
import math
import sys
pi = math.pi
def makefits (fitsfile='image.fits', dpc = None, mom0fitsfile='mom0.fits', lambda0=2600.7576, toK = True):
Boltz = 1.380700e-16
ckms = 2.9979e5
a = RADMC.readimage()
lambdanum=a.nrfr
xnum=a.nx
ynum=a.ny
xsize=a.sizepix_x
ysize=a.sizepix_y
a.image[a.image==0.0]=np.nan
# ind = 1
# dpc = 1
x1 = a.x[0]
y1 = a.y[0]
firstlambda = a.wavelength[0]
if (lambdanum > 1):
deltalam = float(a.wavelength[1]) - float(a.wavelength[0])
v0 = ckms * (1 - lambda0/a.wavelength[0])
v1 = ckms * (1 - lambda0/a.wavelength[1])
delta = v1 - v0
else:
deltalam = a.wavelength[0]
if(toK == True):
scalefac = (0.5*(a.wavelength[ind]*1e-4)**2)/Boltz # Uses a single wavelength for the sake of memory
else:
scalefac = 1
hdu = pyfits.PrimaryHDU(data = (scalefac*a.image))
hd = hdu.header
if(dpc != None):
if (dpc == 0):
print('0 pc to object?')
hd['CDELT1'] = -xsize/(dpc*3.086e18)*(2*pi/360)
hd['CRPIX1'] = xnum/2
hd['CRVAL1'] = 180e0
hd['CTYPE1'] = 'RA---CAR'
hd['CDELT2'] = ysize/(dpc*3.086e18)*(2*pi/360)
hd['CRVAL2'] = 0.0
hd['CTYPE2'] = 'DEC--CAR'
hd['EQUINOX'] = 2000.0
hd['radesys'] = 'FK5'
hd['TELESCOP'] = 'RADMC3D'
hd['object'] = 'SIM@ %.0f PC'%dpc
else:
hd['CDELT1'] = xsize
hd['CRPIX1'] = 1.0
hd['CRVAL1'] = x1
hd['CTYPE1'] = 'position - cm'
hd['CDELT2'] = ysize
hd['CRPIX2'] = 1.0
hd['CRVAL2'] = y1
hd['CTYPE2'] = 'position - cm'
if (toK == True):
lam0 = 2.99792458e8/110.2013542798e9*1e6
vaxis = 2.99792458e8*(lam0-a.wavelength)/lam0
dv = np.median(vaxis-Shift(vaxis,1))
ind = np.argmin(abs(vaxis))
hd['CDELT3'] = dv
hd['CRPIX3'] = ind + 1
hd['CRVAL3'] = vaxis[ind+1]
hd['CTYPE3'] = 'V0PT'
hd.update('BUNIT','K','Tmb')
else:
scalefac = 1
hd['CDELT3'] = deltalam
hd['CRPIX3'] = 1.0
hd['CRVAL3'] = firstlambda
hd['CTYPE3'] = 'Wavelength - micron'
hd['BUNIT'] = 'erg/s/cm^2/Hz/ster'
# print(type(a.image))
hdu.writeto(fitsfile,clobber=True)
# mom = pyfits.PrimaryHDU(data = mom0fitsfile)
# momhd = mom.header
# if len(mom0fitsfile) > 0 and lambdanum > 1:
# mom0arr = np.zeros((xnum,ynum), 'f')
# for i in range(xnum - 1):
# for j in range(ynum - 1):
# for k in range(lambdanum - 1):
# #print(mom0arr[1,1,1])
# #print(a.image[1,1])
# mom0arr[i,j] = mom0arr[i,j] + 0.5 * (a.wavelength(k)*1.0e-4)**(2*a.image[i,j,k]*deltav/Boltz)
# hd['BUNIT'] = 'K km/s'
# momhd = hd
# mom.writeto(mom0arr)
def Shift(l,n):
l = list(l)
n = int(n)
n = n % len(l)
n = -n
shifted = l[n:] + l[:n]
return shifted
#print(sys.argv)
#makefits(sys.argv[1][1:-1],sys.argv[2])
#makefits('image.out')
| gpl-2.0 |
bmassman/fake_news | fake_news/summary_stats.py | 1 | 3707 | #!/usr/bin/env python3
"""
Script to produce summary statistics for news articles.
"""
import pandas as pd
import seaborn as sns
from .article_db import ArticleDB
def print_full(x):
"""Print all rows in Pandas DataFrame x."""
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
def global_stats(articles: pd.DataFrame):
"""Calculate global stats on article db."""
print(f'Number of articles: {len(articles):,}')
num_sources = len(pd.value_counts(articles['base_url'], sort=False))
print(f'Number of news sources: {num_sources}')
mean_wc = articles['word_count'].mean()
print(f'Global mean word count: {mean_wc:.1f}')
missing_authors = (articles['authors'] == '').sum()
print(f'Missing authors: {missing_authors:,}')
missing_titles = (articles['title'] == '').sum()
print(f'Missing titles: {missing_titles}')
missing_texts = (articles['text'] == '').sum()
print(f'Missing texts: {missing_texts:,}')
def calculate_word_count_stats(articles: pd.DataFrame):
"""Calculate aggregate word count statistics on each source's articles."""
by_source = articles.groupby(['base_url'])['word_count']
by_source = by_source.agg(['count', 'mean', 'std'])
by_source.sort_values('count', ascending=False, inplace=True)
print_full(by_source)
top_sources = by_source.head(10).index
top_counts = by_source.reset_index()[by_source.index.isin(top_sources)]
sns.barplot(x='base_url', y='count', data=top_counts)
sns.plt.show()
sns.boxplot(x='base_url', y='word_count',
data=articles[articles['base_url'].isin(top_sources)])
sns.plt.show()
def show_articles_by_source(articles: pd.DataFrame):
"""Show boxplot comparing articles by source for fake and true news."""
by_source = (articles.groupby(['base_url', 'labels'])
.size()
.reset_index(name='count'))
by_source = by_source[by_source['count'] > 100]
sns.boxplot(x='labels', y='count', data=by_source)
sns.plt.show()
def calculate_missing_values(articles: pd.DataFrame):
"""Calculate count of nulls in each column."""
def null_fields(x: pd.Series) -> pd.Series:
return pd.Series({'no_author': (x['authors'] == '').sum(),
'no_text': (x['text'] == '').sum(),
'no_title': (x['title'] == '').sum()})
null_field_count = articles.groupby('base_url').apply(null_fields)
null_field_count = null_field_count[(null_field_count.T != 0).any()]
print_full(null_field_count)
def word_count_by_label(articles: pd.DataFrame):
"""Show graph of word counts by article label."""
palette = sns.color_palette(palette='hls', n_colors=2)
true_news_wc = articles[articles['labels'] == 0]['word_count']
fake_news_wc = articles[articles['labels'] == 1]['word_count']
sns.kdeplot(true_news_wc, bw=3, color=palette[0], label='True News')
sns.kdeplot(fake_news_wc, bw=3, color=palette[1], label='Fake News')
sns.plt.legend()
sns.plt.show()
def show_stats():
"""Display statistics on articles."""
articles = ArticleDB(tfidf=False, author=False, tags=False, title=False,
domain_endings=False, grammar_mistakes=False,
word_count=True, misspellings=False, lshash=False,
source_count=False, sentiment=False)
articles.X
articles = articles.df
global_stats(articles)
calculate_word_count_stats(articles)
calculate_missing_values(articles)
word_count_by_label(articles)
show_articles_by_source(articles)
if __name__ == '__main__':
show_stats()
| mit |
rexshihaoren/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
cjayb/mne-python | mne/utils/config.py | 2 | 19152 | # -*- coding: utf-8 -*-
"""The config functions."""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import atexit
from functools import partial
import json
import os
import os.path as op
import platform
import shutil
import sys
import tempfile
import numpy as np
from .check import _validate_type, _check_pyqt5_version
from ._logging import warn, logger
_temp_home_dir = None
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir : str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir, set_env=False)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing.
Parameters
----------
memmap_min_size : str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, str):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size, set_env=False)
# List the known configuration values
known_config_types = (
'MNE_3D_OPTION_ANTIALIAS',
'MNE_BROWSE_RAW_SIZE',
'MNE_CACHE_DIR',
'MNE_COREG_ADVANCED_RENDERING',
'MNE_COREG_COPY_ANNOT',
'MNE_COREG_GUESS_MRI_SUBJECT',
'MNE_COREG_HEAD_HIGH_RES',
'MNE_COREG_HEAD_OPACITY',
'MNE_COREG_INTERACTION',
'MNE_COREG_MARK_INSIDE',
'MNE_COREG_PREPARE_BEM',
'MNE_COREG_PROJECT_EEG',
'MNE_COREG_ORIENT_TO_SURFACE',
'MNE_COREG_SCALE_LABELS',
'MNE_COREG_SCALE_BY_DISTANCE',
'MNE_COREG_SCENE_SCALE',
'MNE_COREG_WINDOW_HEIGHT',
'MNE_COREG_WINDOW_WIDTH',
'MNE_COREG_SUBJECTS_DIR',
'MNE_CUDA_DEVICE',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_HF_SEF_PATH',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_MISC_PATH',
'MNE_DATASETS_MTRF_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_MULTIMODAL_PATH',
'MNE_DATASETS_FNIRS_MOTOR_PATH',
'MNE_DATASETS_OPM_PATH',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
'MNE_DATASETS_KILOWORD_PATH',
'MNE_DATASETS_FIELDTRIP_CMC_PATH',
'MNE_DATASETS_PHANTOM_4DBTI_PATH',
'MNE_DATASETS_LIMO_PATH',
'MNE_DATASETS_REFMEG_NOISE_PATH',
'MNE_FORCE_SERIAL',
'MNE_KIT2FIFF_STIM_CHANNELS',
'MNE_KIT2FIFF_STIM_CHANNEL_CODING',
'MNE_KIT2FIFF_STIM_CHANNEL_SLOPE',
'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
'MNE_LOGGING_LEVEL',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_FTP_TESTS',
'MNE_SKIP_NETWORK_TESTS',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_STIM_CHANNEL',
'MNE_USE_CUDA',
'MNE_USE_NUMBA',
'SUBJECTS_DIR',
)
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = (
'MNE_STIM_CHANNEL',
)
def _load_config(config_path, raise_error=False):
"""Safely load a config file."""
with open(config_path, 'r') as fid:
try:
config = json.load(fid)
except ValueError:
# No JSON object could be decoded --> corrupt file?
msg = ('The MNE-Python config file (%s) is not a valid JSON '
'file and might be corrupted' % config_path)
if raise_error:
raise RuntimeError(msg)
warn(msg)
config = dict()
return config
def get_config_path(home_dir=None):
r"""Get path to standard mne-python config file.
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%USERPROFILE%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def get_config(key=None, default=None, raise_error=False, home_dir=None,
use_env=True):
"""Read MNE-Python preferences from environment or config file.
Parameters
----------
key : None | str
The preference key to look for. The os environment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in environment variables or
the path are returned. If key is an empty string, a list of all valid
keys (but not values) is returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
use_env : bool
If True, consider env vars, if available.
If False, only use MNE-Python configuration file values.
.. versionadded:: 0.18
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
_validate_type(key, (str, type(None)), "key", 'string or None')
if key == '':
return known_config_types
# first, check to see if key is in env
if use_env and key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
config = {}
else:
config = _load_config(config_path)
if key is None:
# update config with environment variables
if use_env:
env_keys = (set(config).union(known_config_types).
intersection(os.environ))
config.update({key: os.environ[key] for key in env_keys})
return config
elif raise_error is True and key not in config:
loc_env = 'the environment or in the ' if use_env else ''
meth_env = ('either os.environ["%s"] = VALUE for a temporary '
'solution, or ' % key) if use_env else ''
extra_env = (' You can also set the environment variable before '
'running python.' if use_env else '')
meth_file = ('mne.utils.set_config("%s", VALUE, set_env=True) '
'for a permanent one' % key)
raise KeyError('Key "%s" not found in %s'
'the mne-python config file (%s). '
'Try %s%s.%s'
% (key, loc_env, config_path, meth_env, meth_file,
extra_env))
else:
return config.get(key, default)
def set_config(key, value, home_dir=None, set_env=True):
"""Set a MNE-Python preference key in the config file and environment.
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
set_env : bool
If True (default), update :data:`os.environ` in addition to
updating the MNE-Python config file.
See Also
--------
get_config
"""
_validate_type(key, 'str', "key")
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
_validate_type(value, (str, 'path-like', type(None)), 'value')
if value is not None:
value = str(value)
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
config = _load_config(config_path, raise_error=True)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
if set_env and key in os.environ:
del os.environ[key]
else:
config[key] = value
if set_env:
os.environ[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)."""
global _temp_home_dir
if home_dir is None:
home_dir = os.environ.get('_MNE_FAKE_HOME_DIR')
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
if op.isdir(op.join(os.getenv('APPDATA'), '.mne')):
home_dir = os.getenv('APPDATA')
else:
home_dir = os.getenv('USERPROFILE')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR.
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
return subjects_dir
def _get_stim_channel(stim_channel, info, raise_error=True):
"""Determine the appropriate stim_channel.
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
info : instance of Info
An information structure containing information about the channels.
Returns
-------
stim_channel : str | list of str
The name of the stim channel(s) to use
"""
if stim_channel is not None:
if not isinstance(stim_channel, list):
_validate_type(stim_channel, 'str', "Stim channel")
stim_channel = [stim_channel]
for channel in stim_channel:
_validate_type(channel, 'str', "Each provided stim channel")
return stim_channel
stim_channel = list()
ch_count = 0
ch = get_config('MNE_STIM_CHANNEL')
while(ch is not None and ch in info['ch_names']):
stim_channel.append(ch)
ch_count += 1
ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
if ch_count > 0:
return stim_channel
if 'STI101' in info['ch_names']: # combination channel for newer systems
return ['STI101']
if 'STI 014' in info['ch_names']: # for older systems
return ['STI 014']
from ..io.pick import pick_types
stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
if len(stim_channel) > 0:
stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
elif raise_error:
raise ValueError("No stim channels found. Consider specifying them "
"manually using the 'stim_channel' parameter.")
return stim_channel
def _get_root_dir():
"""Get as close to the repo root as possible."""
root_dir = op.abspath(op.join(op.dirname(__file__), '..'))
up_dir = op.join(root_dir, '..')
if op.isfile(op.join(up_dir, 'setup.py')) and all(
op.isdir(op.join(up_dir, x)) for x in ('mne', 'examples', 'doc')):
root_dir = op.abspath(up_dir)
return root_dir
def _get_numpy_libs():
from ._testing import SilenceStdout
with SilenceStdout(close=False) as capture:
np.show_config()
lines = capture.getvalue().split('\n')
capture.close()
libs = []
for li, line in enumerate(lines):
for key in ('lapack', 'blas'):
if line.startswith('%s_opt_info' % key):
lib = lines[li + 1]
if 'NOT AVAILABLE' in lib:
lib = 'unknown'
else:
try:
lib = lib.split('[')[1].split("'")[1]
except IndexError:
pass # keep whatever it was
libs += ['%s=%s' % (key, lib)]
libs = ', '.join(libs)
return libs
def sys_info(fid=None, show_paths=False):
"""Print the system information for debugging.
This function is useful for printing system information
to help triage bugs.
Parameters
----------
fid : file-like | None
The file to write to. Will be passed to :func:`print()`.
Can be None to use :data:`sys.stdout`.
show_paths : bool
If True, print paths for each module.
Examples
--------
Running this function with no arguments prints an output that is
useful when submitting bug reports::
>>> import mne
>>> mne.sys_info() # doctest: +SKIP
Platform: Linux-4.15.0-1067-aws-x86_64-with-glibc2.2.5
Python: 3.8.1 (default, Feb 2 2020, 08:37:37) [GCC 8.3.0]
Executable: /usr/local/bin/python
CPU: : 36 cores
Memory: 68.7 GB
mne: 0.21.dev0
numpy: 1.19.0 {blas=openblas, lapack=openblas}
scipy: 1.5.1
matplotlib: 3.2.2 {backend=Qt5Agg}
sklearn: 0.23.1
numba: 0.50.1
nibabel: 3.1.1
cupy: Not found
pandas: 1.0.5
dipy: 1.1.1
mayavi: Not found
pyvista: 0.25.3 {pyvistaqt=0.1.1, OpenGL 3.3 (Core Profile) Mesa 18.3.6 via llvmpipe (LLVM 7.0, 256 bits)}
vtk: 9.0.1
PyQt5: 5.15.0
""" # noqa: E501
ljust = 15
out = 'Platform:'.ljust(ljust) + platform.platform() + '\n'
out += 'Python:'.ljust(ljust) + str(sys.version).replace('\n', ' ') + '\n'
out += 'Executable:'.ljust(ljust) + sys.executable + '\n'
out += 'CPU:'.ljust(ljust) + ('%s: ' % platform.processor())
try:
import multiprocessing
except ImportError:
out += ('number of processors unavailable ' +
'(requires "multiprocessing" package)\n')
else:
out += '%s cores\n' % multiprocessing.cpu_count()
out += 'Memory:'.ljust(ljust)
try:
import psutil
except ImportError:
out += 'Unavailable (requires "psutil" package)'
else:
out += '%0.1f GB\n' % (psutil.virtual_memory().total / float(2 ** 30),)
out += '\n'
libs = _get_numpy_libs()
has_3d = False
for mod_name in ('mne', 'numpy', 'scipy', 'matplotlib', '', 'sklearn',
'numba', 'nibabel', 'cupy', 'pandas', 'dipy',
'mayavi', 'pyvista', 'vtk', 'PyQt5'):
if mod_name == '':
out += '\n'
continue
if mod_name == 'PyQt5' and not has_3d:
continue
out += ('%s:' % mod_name).ljust(ljust)
try:
mod = __import__(mod_name)
if mod_name == 'mayavi':
# the real test
from mayavi import mlab # noqa, analysis:ignore
except Exception:
out += 'Not found\n'
else:
extra = (' (%s)' % op.dirname(mod.__file__)) if show_paths else ''
if mod_name == 'numpy':
extra += ' {%s}%s' % (libs, extra)
elif mod_name == 'matplotlib':
extra += ' {backend=%s}%s' % (mod.get_backend(), extra)
elif mod_name == 'pyvista':
extras = list()
try:
from pyvistaqt import __version__
except Exception:
pass
else:
extras += [f'pyvistaqt={__version__}']
try:
from pyvista import GPUInfo
except ImportError:
pass
else:
gi = GPUInfo()
extras += [f'OpenGL {gi.version} via {gi.renderer}']
if extras:
extra += f' {{{", ".join(extras)}}}'
elif mod_name in ('mayavi', 'vtk'):
has_3d = True
if mod_name == 'vtk':
version = mod.VTK_VERSION
elif mod_name == 'PyQt5':
version = _check_pyqt5_version()
else:
version = mod.__version__
out += '%s%s\n' % (version, extra)
print(out, end='', file=fid)
| bsd-3-clause |
LivSim2017/LivSim-Codes | Postprocessing Statistics/PostTransplantEstimator_2.py | 1 | 6833 | #This code prepares and estimates post-transplant deaths
import numpy as nump
import time
import csv
import scipy as scip
import datetime
import operator
import sys
import queue
import pandas as pd
from copy import deepcopy
from matplotlib.dates import strpdate2num
#Prepare Converters
def tf_convert(s):
#print(s)
if s ==b'True':
return 1
else:
return 0
def missing_convert(s):
#print(s)
if s ==b'.':
return nump.nan
else:
return float(s)
def estimate_post_transplant_death(txids, doids):
"""
This function estimate the number of post transplant deaths.
@Input:
@txids: list of patients who received transplants
@doids: list of donated organs
@Output:
@output_totals: number of post transplant deaths for each replication
"""
#set seed
nump.random.seed(7777)
patcols = (2,5,11,71,72,74,79,82,86,88,95,97,104,106,107,108,109,110,135,138) #patient columns
statuscols = (1,4,5,8,9,14,15) #status columns
istatuscols = {2,120,121,124,125,134,98} #
donorcols =(4,15,54,55,57,60,70,76,82,85) #donor columns
waitlist = nump.loadtxt("waitlist.txt",delimiter ="|",skiprows=3,usecols=patcols,converters={11: tf_convert}) #upload waitlist file
patients = nump.loadtxt("patients.txt",delimiter ="|",skiprows=3,usecols=patcols,converters={11: tf_convert}) #upload patient file
patients = nump.vstack((waitlist,patients)) #concatenate patient to waitlist
#do the same as before
is_waitlist = nump.loadtxt("waitlist.txt",delimiter ="|",skiprows=3,usecols=istatuscols,converters={120: missing_convert,121: missing_convert,124: missing_convert,125: missing_convert, 134: missing_convert,98: missing_convert })
is_patients = nump.loadtxt("patients.txt",delimiter ="|",skiprows=3,usecols=istatuscols,converters={120: missing_convert,121: missing_convert,124: missing_convert,125: missing_convert, 134: missing_convert,98: missing_convert })
is_patients = nump.vstack((is_waitlist,is_patients))
#upload status and status time
status= nump.loadtxt("status.txt",delimiter ="|",skiprows=3,usecols=statuscols,converters={4: missing_convert,5: missing_convert,8: missing_convert,9: missing_convert,14: missing_convert,15: missing_convert })
statustimes = nump.loadtxt("status_times.txt")
#upload donors
donors = nump.loadtxt("donor.txt",delimiter ="|",skiprows=3,usecols=donorcols)
#survival coefficients and step survival function
survcoeff = nump.loadtxt("survivalcoefficients.txt")
stepsurv = nump.loadtxt("stepsurvival.txt")
#Setting
nreps = 1 # number of replications
maxtime = 5 #maxtime of survival
output_totals = []
for i in range(0,nreps):
for y in range(0, maxtime):
#print iteration
print('Replication %d, Year %d' %(i,y))
#Form Survival Dataset
survdata = nump.empty([1,50])
txtimes = nump.empty([1,1])
#get donors of replication i and by year y
donor_subset = doids[doids.iloc[:,1] == i+1]
donor_subset = donor_subset[donor_subset.iloc[:,0] == y]
#get transplant patient of replication i
tx_subset = txids[txids.iloc[:,1] == i+1]
tx_subset = tx_subset[tx_subset.iloc[:,0] == y]
for n in range(0, len(donor_subset)):
lsampatid = int(donor_subset.iloc[n,3]) #lsam patient id
lsamdonid = int(donor_subset.iloc[n,4]) -1 #lsam donor id
lsamtxtime = donor_subset.iloc[n,2] #lsam transplant time
#Create categorical age variables
page = [1*(patients[lsampatid][1] < 18), 1*((patients[lsampatid][1] >= 18 and patients[lsampatid][1] <25)),
1*((patients[lsampatid][1] >= 25 and patients[lsampatid][1] <35)),
1*((patients[lsampatid][1] >= 45 and patients[lsampatid][1] <55)),
1*((patients[lsampatid][1] >= 55 and patients[lsampatid][1] <65)),
1*((patients[lsampatid][1] >= 65))] #patient age
dage = [1*(donors[lsamdonid][0] < 18), 1*((donors[lsamdonid][0] >= 40 and donors[lsamdonid][0] <50)),
1*((donors[lsamdonid][0] >= 50 and donors[lsamdonid][0] <60)),
1*((donors[lsamdonid][0] >= 60 and donors[lsamdonid][0] <70)),
1*((donors[lsamdonid][0] >= 70))] #donor age
#Obtain last status record before transplant
statuspat = is_patients[lsampatid]
for j in range(1,len(statustimes)):
if statustimes[j][1] > lsamtxtime:
"""
skip the patient whose status time is later than transplant time
"""
break
if statustimes[j][0] == lsampatid and nump.isnan(status[j]).any() == False:
statuspat = status[j]
record = nump.hstack((patients[lsampatid],donors[lsamdonid],statuspat,tx_subset.iloc[n,4],tx_subset.iloc[n,5],page,dage))
survdata = nump.vstack((survdata,record)) #append the observation to the survival data
txtimes = nump.vstack((txtimes,lsamtxtime)) #append the transplant time
#get rid of the first row that is just zeroes
survdata = survdata[1:]
txtimes = txtimes[1:]
#Compute survival
values = nump.zeros(nump.shape(survdata)[0])
for l in range(0, nump.shape(survdata)[0]):
values[l] = nump.exp(nump.dot(survdata[l],survcoeff))
mobs = nump.shape(survdata)[0] #obtain number of observations
svalues = deepcopy(values)
deaths = deepcopy(values)
mu = nump.random.uniform(0,1,mobs) #create a vector of probability values from [0,1]
for m in range(0,mobs):
svalues[m] = nump.exp(nump.log(mu[m])/values[m])
#Calculate death
for k in range(1,nump.shape(stepsurv)[0]):
if svalues[m] < stepsurv[-1,0]:
svalues[m] = stepsurv[-1,1]
#deaths[m] = int(bool( nump.random.uniform(0,1,1) <=stepsurv[-1,2] and svalues[m]/365 + txtimes[m] <=maxtime))
deaths[m] = int(bool( svalues[m]/365 + txtimes[m] <=maxtime))
break
elif svalues[m] < stepsurv[k-1,0] and svalues[m] >= stepsurv[k,0]:
svalues[m] = stepsurv[k,1]
#deaths[m] = int(bool( nump.random.uniform(0,1,1) <=stepsurv[k,2] and svalues[m]/365 + txtimes[m] <=maxtime))
deaths[m] = int(bool( svalues[m]/365 + txtimes[m] <=maxtime))
break
#Total Deaths
output_totals.append(nump.sum(deaths))
return output_totals
def estimate_post_transplant_outcome(directory):
"""
This function estimates the number of deaths among patients who receive transplant.
Results are writtent to file in the given directory.
@Input:
@directory: directory where the files RawOutput_TxID.csv and RawOutput_DoID.csv are located. Also where the output
files will be written to.
"""
txids = pd.read_csv(directory + 'RawOutput_TxID.csv')
doids = pd.read_csv(directory + 'RawOutput_DoID.csv')
#estimate post transplant deaths
total_transplant_death = estimate_post_transplant_death(txids, doids)
#write to csv file
total_transplant_death = pd.DataFrame(total_transplant_death)
total_transplant_death.columns = ["Number of Transplant Death"]
total_transplant_death.to_csv(directory + 'Output_post_transplant_deaths.csv')
| mit |
glennq/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 84 | 1221 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([[x1, x2]])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/viz/tests/test_decoding.py | 10 | 3823 | # Authors: Denis Engemann <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
from nose.tools import assert_raises, assert_equals
import numpy as np
from mne.epochs import equalize_epoch_counts, concatenate_epochs
from mne.decoding import GeneralizationAcrossTime
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn, run_tests_if_main
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
warnings.simplefilter('always') # enable b/c these tests throw warnings
def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
"""Aux function for testing GAT viz"""
gat = GeneralizationAcrossTime()
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
decim = 30
# Test on time generalization within one condition
with warnings.catch_warnings(record=True):
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, decim=decim)
epochs_list = [epochs[k] for k in event_id]
equalize_epoch_counts(epochs_list)
epochs = concatenate_epochs(epochs_list)
# Test default running
gat = GeneralizationAcrossTime(test_times=test_times)
gat.fit(epochs)
gat.score(epochs)
return gat
@requires_sklearn
def test_gat_plot_matrix():
"""Test GAT matrix plot"""
gat = _get_data()
gat.plot()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_diagonal():
"""Test GAT diagonal plot"""
gat = _get_data()
gat.plot_diagonal()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_times():
"""Test GAT times plot"""
gat = _get_data()
# test one line
gat.plot_times(gat.train_times_['times'][0])
# test multiple lines
gat.plot_times(gat.train_times_['times'])
# test multiple colors
n_times = len(gat.train_times_['times'])
colors = np.tile(['r', 'g', 'b'],
int(np.ceil(n_times / 3)))[:n_times]
gat.plot_times(gat.train_times_['times'], color=colors)
# test invalid time point
assert_raises(ValueError, gat.plot_times, -1.)
# test float type
assert_raises(ValueError, gat.plot_times, 1)
assert_raises(ValueError, gat.plot_times, 'diagonal')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
def chance(ax):
return ax.get_children()[1].get_lines()[0].get_ydata()[0]
@requires_sklearn
def test_gat_chance_level():
"""Test GAT plot_times chance level"""
gat = _get_data()
ax = gat.plot_diagonal(chance=False)
ax = gat.plot_diagonal()
assert_equals(chance(ax), .5)
gat = _get_data(event_id=dict(aud_l=1, vis_l=3, aud_r=2, vis_r=4))
ax = gat.plot_diagonal()
assert_equals(chance(ax), .25)
ax = gat.plot_diagonal(chance=1.234)
assert_equals(chance(ax), 1.234)
assert_raises(ValueError, gat.plot_diagonal, chance='foo')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_nonsquared():
"""Test GAT diagonal plot"""
gat = _get_data(test_times=dict(start=0.))
gat.plot()
ax = gat.plot_diagonal()
scores = ax.get_children()[1].get_lines()[2].get_ydata()
assert_equals(len(scores), len(gat.estimators_))
run_tests_if_main()
| bsd-3-clause |
yhat/ggplot | ggplot/themes/themes.py | 1 | 2119 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
class theme(object):
def __init__(self):
self._rcParams = {}
def __radd__(self, other):
if other.__class__.__name__=="ggplot":
other.theme = self
return other
return self
def get_rcParams(self):
return self._rcParams
def apply_final_touches(self, ax):
pass
class theme_bw(theme_gray):
"""
White background w/ black gridlines
"""
def __init__(self):
super(theme_bw, self).__init__()
self._rcParams['axes.facecolor'] = 'white'
class theme_xkcd(theme):
"""
xkcd theme
The theme internaly uses the settings from pyplot.xkcd().
"""
def __init__(self, scale=1, length=100, randomness=2):
super(theme_xkcd, self).__init__()
with plt.xkcd(scale=scale, length=length, randomness=randomness):
_xkcd = mpl.rcParams.copy()
# no need to a get a deprecate warning for nothing...
for key in mpl._deprecated_map:
if key in _xkcd:
del _xkcd[key]
if 'tk.pythoninspect' in _xkcd:
del _xkcd['tk.pythoninspect']
self._rcParams.update(_xkcd)
def __deepcopy__(self, memo):
class _empty(object):
pass
result = _empty()
result.__class__ = self.__class__
result.__dict__["_rcParams"] = {}
for k, v in self._rcParams.items():
try:
result.__dict__["_rcParams"][k] = deepcopy(v, memo)
except NotImplementedError:
# deepcopy raises an error for objects that are drived from or
# composed of matplotlib.transform.TransformNode.
# Not desirable, but probably requires upstream fix.
# In particular, XKCD uses matplotlib.patheffects.withStrok
# -gdowding
result.__dict__["_rcParams"][k] = copy(v)
return result
| bsd-2-clause |
tkerola/pipeline_grid_search | examples/example.py | 1 | 1145 | import time
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from pipeline_grid_search import PipelineGridSearchCV
pipe = Pipeline([
("pca", PCA()),
("svm", SVC()),
])
cv_params = dict([
('pca__n_components', [100,200,300]),
('svm__C', [1,10,100,1000]),
])
X, y = make_classification(n_samples=1000, n_features=1000)
n_folds = 5
n_jobs = 4
verbose = 1
start = time.time()
model = PipelineGridSearchCV(pipe, cv_params, cv=n_folds, verbose=verbose, n_jobs=n_jobs)
model.fit(X,y) # This will run much faster than ordinary GridSearchCV
elapsed_pipeline_grid_search = time.time() - start
start = time.time()
model = GridSearchCV(pipe, cv_params, cv=n_folds, verbose=verbose, n_jobs=n_jobs)
model.fit(X,y)
elapsed_grid_search = time.time() - start
print("-----------------------------------")
print("Elapsed time for doing grid search:")
print("PipelineGridSearchCV: {} secs".format(elapsed_pipeline_grid_search))
print("GridSearchCV: {} secs".format(elapsed_grid_search))
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tseries/tests/test_converter.py | 7 | 6554 | from datetime import datetime, date
import nose
import numpy as np
from pandas import Timestamp, Period
from pandas.compat import u
import pandas.util.testing as tm
from pandas.tseries.offsets import Second, Milli, Micro
from pandas.compat.numpy import np_datetime64_compat
try:
import pandas.tseries.converter as converter
except ImportError:
raise nose.SkipTest("no pandas.tseries.converter, skipping")
def test_timtetonum_accepts_unicode():
assert (converter.time2num("00:01") == converter.time2num(u("00:01")))
class TestDateTimeConverter(tm.TestCase):
def setUp(self):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert(u("12:22"), None, None)
assert (r1 == r2), "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(['2012-1-1'], None, None)[0]
xp = datetime(2012, 1, 1).toordinal()
self.assertEqual(rs, xp)
rs = self.dtc.convert('2012-1-1', None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(date(2012, 1, 1), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert('2012-1-1', None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(Timestamp('2012-1-1'), None, None)
self.assertEqual(rs, xp)
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert(np_datetime64_compat('2012-01-01'), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(np_datetime64_compat(
'2012-01-01 00:00:00+0000'), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(np.array([
np_datetime64_compat('2012-01-01 00:00:00+0000'),
np_datetime64_compat('2012-01-02 00:00:00+0000')]), None, None)
self.assertEqual(rs[0], xp)
def test_conversion_float(self):
decimals = 9
rs = self.dtc.convert(
Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None)
xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC'))
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(
Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None)
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
tm.assert_almost_equal(rs, xp, decimals)
def test_conversion_outofbounds_datetime(self):
# 2579
values = [date(1677, 1, 1), date(1677, 1, 2)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
self.assertEqual(rs, xp)
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
self.assertEqual(rs, xp)
def test_time_formatter(self):
self.tc(90000)
def test_dateindex_conversion(self):
decimals = 9
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k=10, freq=freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
tm.assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
def _assert_less(ts1, ts2):
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError('{0} is not less than {1}.'.format(val1,
val2))
# Matplotlib's time representation using floats cannot distinguish
# intervals smaller than ~10 microsecond in the common range of years.
ts = Timestamp('2012-1-1')
_assert_less(ts, ts + Second())
_assert_less(ts, ts + Milli())
_assert_less(ts, ts + Micro(50))
class TestPeriodConverter(tm.TestCase):
def setUp(self):
self.pc = converter.PeriodConverter()
class Axis(object):
pass
self.axis = Axis()
self.axis.freq = 'D'
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert(u("2012-1-1"), None, self.axis)
self.assert_equal(r1, r2,
"PeriodConverter.convert should accept unicode")
def test_conversion(self):
rs = self.pc.convert(['2012-1-1'], None, self.axis)[0]
xp = Period('2012-1-1').ordinal
self.assertEqual(rs, xp)
rs = self.pc.convert('2012-1-1', None, self.axis)
self.assertEqual(rs, xp)
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
self.assertEqual(rs, xp)
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
self.assertEqual(rs, xp)
rs = self.pc.convert([Timestamp('2012-1-1')], None, self.axis)[0]
self.assertEqual(rs, xp)
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
self.assertEqual(rs, xp)
# FIXME
# rs = self.pc.convert(
# np_datetime64_compat('2012-01-01'), None, self.axis)
# self.assertEqual(rs, xp)
#
# rs = self.pc.convert(
# np_datetime64_compat('2012-01-01 00:00:00+0000'),
# None, self.axis)
# self.assertEqual(rs, xp)
#
# rs = self.pc.convert(np.array([
# np_datetime64_compat('2012-01-01 00:00:00+0000'),
# np_datetime64_compat('2012-01-02 00:00:00+0000')]),
# None, self.axis)
# self.assertEqual(rs[0], xp)
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
self.assertEqual(rs, xp)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
easonlius/fingercode | unused/test.py | 1 | 2002 | # _*_ encoding: utf-8 _*_
import math
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Gabor import *
from fingercode import *
"""
if (4 > 2)&(7 > 4):
print "ok"
else:
print "NO"
a = -1.0
b = 1
print math.degrees(math.atan(-1.0 / 1.0)) + 180.0
print math.degrees(math.atan(1.0/1.0))
print math.degrees(math.atan(1.0/-1.0)) + 360.0
print math.degrees(math.atan(-1.0/-1.0)) + 180.0
img = cv2.imread('1/101_7.tif', 0)
print type(img[10][10])
a = numpy.uint8([1])
print type(a)
print math.sqrt(4)
img = cv2.imread('1/101_7.tif', 0)
result = getGabor(img)
plt.figure()
for i in range(len(result)):
plt.subplot(1, 8, i+1)
plt.imshow(result[i], cmap='gray')
plt.show()
img = cv2.imread('1/101_7.tif')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(img, None)
kk, des = sift.compute(gray, kp)
img = cv2.drawKeypoints(gray, kp, img)
plt.figure()
plt.subplot(1, 1, 1)
plt.imshow(img)
plt.show()
"""
img = cv2.imread('2/101_5.tif')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
image, contours, hi = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 0, 255), 1)
"""
plt.figure()
plt.subplot(1, 1, 1)
plt.imshow(img, 'gray')
plt.show()
"""
y = contours[0]
MAX = 0.0
for contour in contours:
if len(contour) > 20:
for i in range(len(contour)-2):
q = contour[i-2][0]
#print q
p = contour[i][0]
r = contour[i+2][0]
cross = (q[1] - p[1]) * (r[1] - p[1]) + (q[0] - p[0]) * (r[0] - p[0])
if cross > MAX:
MAX = cross
point = p
print MAX
print point
plt.figure()
plt.subplot(1, 1, 1)
plt.imshow(img, 'gray')
plt.show()
"""
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=13)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=13)
sobelxy = cv2.Sobel(img, cv2.CV_64F, 1, 1, ksize=13)
"""
| mit |
MatthieuBizien/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 38 | 3869 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying Non-negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
tomlof/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 76 | 45197 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
parekhmitchell/Machine-Learning | Machine Learning A-Z Template Folder/Part 3 - Classification/Section 20 - Random Forest Classification/classification_template.py | 37 | 2538 | # Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
# Create your classifier here
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | mit |
glennq/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
zhouyao1994/incubator-superset | setup.py | 1 | 4310 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import os
import subprocess
import sys
from setuptools import find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python < 3.6 is not supported")
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
PACKAGE_JSON = os.path.join(BASE_DIR, "superset", "assets", "package.json")
with open(PACKAGE_JSON, "r") as package_file:
version_string = json.load(package_file)["version"]
with io.open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
def get_git_sha():
try:
s = subprocess.check_output(["git", "rev-parse", "HEAD"])
return s.decode().strip()
except Exception:
return ""
GIT_SHA = get_git_sha()
version_info = {"GIT_SHA": GIT_SHA, "version": version_string}
print("-==-" * 15)
print("VERSION: " + version_string)
print("GIT SHA: " + GIT_SHA)
print("-==-" * 15)
VERSION_INFO_FILE = os.path.join(
BASE_DIR, "superset", "static", "assets", "version_info.json"
)
with open(VERSION_INFO_FILE, "w") as version_file:
json.dump(version_info, version_file)
setup(
name="apache-superset",
description=("A modern, enterprise-ready business intelligence web application"),
long_description=long_description,
long_description_content_type="text/markdown",
version=version_string,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=["superset/bin/superset"],
install_requires=[
"backoff>=1.8.0",
"bleach>=3.0.2, <4.0.0",
"celery>=4.3.0, <5.0.0",
"click>=6.0, <7.0.0", # `click`>=7 forces "-" instead of "_"
"colorama",
"contextlib2",
"croniter>=0.3.28",
"cryptography>=2.4.2",
"flask>=1.1.0, <2.0.0",
"flask-appbuilder>=2.2.0, <2.3.0",
"flask-caching",
"flask-compress",
"flask-talisman",
"flask-migrate",
"flask-wtf",
"geopy",
"gunicorn<19.9.0", # deprecated
"humanize",
"isodate",
"markdown>=3.0",
"msgpack>=0.6.1, <0.7.0",
"pandas>=0.24.2, <0.25.0",
"parsedatetime",
"pathlib2",
"polyline",
"python-dateutil",
"python-dotenv",
"python-geohash",
"pyarrow>=0.15.1, <0.16.0",
"pyyaml>=5.1",
"retry>=0.9.2",
"selenium>=3.141.0",
"simplejson>=3.15.0",
"sqlalchemy>=1.3.5,<2.0",
"sqlalchemy-utils>=0.33.2",
"sqlparse>=0.3.0,<0.4",
"wtforms-json",
],
extras_require={
"bigquery": ["pybigquery>=0.4.10", "pandas_gbq>=0.10.0"],
"cors": ["flask-cors>=2.0.0"],
"gsheets": ["gsheetsdb>=0.1.9"],
"hive": ["pyhive[hive]>=0.6.1", "tableschema", "thrift>=0.11.0, <1.0.0"],
"mysql": ["mysqlclient==1.4.2.post1"],
"postgres": ["psycopg2-binary==2.7.5"],
"presto": ["pyhive[presto]>=0.4.0"],
"elasticsearch": ["elasticsearch-dbapi>=0.1.0, <0.2.0"],
"druid": ["pydruid==0.5.7", "requests==2.22.0"],
"hana": ["hdbcli==2.4.162", "sqlalchemy_hana==0.4.0"],
},
python_requires="~=3.6",
author="Apache Software Foundation",
author_email="[email protected]",
url="https://superset.apache.org/",
download_url="https://www.apache.org/dist/incubator/superset/" + version_string,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
tests_require=["flask-testing==0.7.1"],
)
| apache-2.0 |
mitschabaude/nanopores | scripts/pughpore/randomwalk/attempt/test.py | 1 | 2870 | import sys
#import matplotlib.pyplot as plt
from random import gauss
from math import sqrt, ceil, floor, pi
import numpy as np
import nanopores as nano
import nanopores.geometries.pughpore as pughpore
#eps = 5e-2
#L = 1.
geop = nano.Params(pughpore.params)
physp = nano.Physics(name="pore_mol")
kT = physp.kT
eta = physp.eta
l3 = geop.l3
rMolecule = geop.rMolecule
Dmol = kT/(6.*pi*eta*rMolecule*1e-9) # [m^2/s]
D = Dmol*1e9 # from [m^2/s] to [nm^2/ns]
fac = sqrt(2*D) # correction factor
L = l3-rMolecule
eps = L*1e-1
taufacinv = int(sys.argv[1])
tau = eps**2/(2*D)*5e-2*(1./taufacinv)
iter = 50000000
len=tau*iter
sqrttau = sqrt(tau)
attempt = 0
try:
np.load('does_not_exist.npy')
X = np.load('X.npy')
Y = np.load('Y.npy')
except:
Y = np.zeros(iter)
for i in range(iter-1):
if i%int(iter/100.)==0:
print '%.0f %%'%(100*float(i)/float(iter))
xi = gauss(0.,1.)
Y[i+1]=Y[i]+sqrttau*fac*xi
X = np.linspace(0.,len,iter)
np.save('X',X)
np.save('Y',Y)
maxL = ceil(np.max(Y))
minL = floor(np.min(Y))
L_ = np.concatenate((-np.arange(L,abs(minL)+L,L)[::-1],np.arange(0.,maxL+L,L)))
#iold = 0
sig = 0.
ffa = False
try:
np.load('does_not_exist.npy')
Exp=np.load('Exp.npy')
XA = np.load('XA.npy')
YA = np.load('YA.npy')
except:
XA = np.array([])
YA = np.array([])
Exp = np.zeros(100)
exptau=len/100.
j=1
for i in range(1,X.shape[0]):
if i%int(X.shape[0]/100.)==0 and i!=0:
print '%.0f %%'%(100*float(i)/float(X.shape[0]))
dist = np.min(np.absolute(L_-Y[i]))
Li = np.where(abs(L_-Y[i])==dist)[0][0]
if dist<=eps and np.sign(Y[i]-L_[Li])!=sig and ffa:
attempt+=1
XA = np.append(XA,X[i])
YA = np.append(YA,Y[i])
ffa = False
if dist>eps:
sig = np.sign(Y[i]-L_[Li])
# if np.sign(Y[iold]-L_[Li])!=sig:
# attempt+=1
# plt.plot([X[iold],X[i]],[Y[iold],Y[i]],color='#00ff00',linewidth=2.)
ffa = True
# iold = i
if X[i]>=exptau*j:
Exp[j-1]=attempt/(j*exptau)
j+=1
Exp[-1]=attempt/(len)
np.save('Exp',Exp)
np.save('XA',XA)
np.save('YA',YA)
attemptrate = Exp[-1]
print 'L = %.2e; eps = %.2e, D = %.2e'%(L,eps,D)
print 'tau = %.3e'%tau
theo = 2*D/(L*eps)
print 'analytic attempt rate = %.2e'%theo
print 'numeric attemptrate = %.3e'%attemptrate
# plot stuff
#for i in L_:
# plt.plot([0.,iter*tau],[i,i],'r--')
# plt.fill_between([0.,iter*tau],[float(i)+eps,float(i)+eps],[float(i)-eps,float(i)-eps],color='#ff0000',alpha=.2)
#plt.plot(X,Y,color='#000000')
#plt.tight_layout()
#plt.plot(XA,YA,'ro')
#plt.show()
#plt.plot(np.linspace(1,len,100),Exp,color='#0000ff')
#plt.plot([0.,len],[attemptrate,attemptrate],color='#0000ff')
#plt.show()
| mit |
AICP/external_chromium_org | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 94 | 3083 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows but it is not
# clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
# run_breakpad_browser_process_crash_test is flaky.
# See http://crbug.com/317890
tests_to_disable.append('run_breakpad_browser_process_crash_test')
# See http://crbug.com/332301
tests_to_disable.append('run_breakpad_crash_in_syscall_test')
# It appears that crash_service.exe is not being reliably built by
# default in the CQ. See: http://crbug.com/380880
tests_to_disable.append('run_breakpad_untrusted_crash_test')
tests_to_disable.append('run_breakpad_trusted_crash_in_startup_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
heli522/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
GetsDrawn/getsdrawndotcom | GetsDrawnDotCom.py | 1 | 19166 |
# coding: utf-8
# <h1>GetsDrawn DotCom</h1>
# This is a python script to generate the website GetsDrawn. It takes data from /r/RedditGetsDrawn and makes something awesome.
#
# The script has envolved and been rewritten several times.
#
# The first script for rgdsnatch was written after I got banned from posting my artwork on /r/RedditGetsDrawn. The plan was to create a new site that displayed stuff from /r/RedditGetsDrawn.
#
# Currently it gets the most recent 25 items on redditgetsdrawn, and saves it to a folder. The script looks at the newest 25 reference photos on RedditGetsDrawn. It focuses only on jpeg/png images and ignores and links to none .jpg or .png ending files.
# It is needed to instead of ignoring them files - get the image or images in some cases, from the link.
# The photos are always submitted from imgur.
# Still filter out the i.imgur files, but take the links and filter them through a python imgur module returning the .jpeg or .png files.
#
#
# This is moving forward from rgdsnatch.py because I am stuck on it.
#
# TODO
#
# Fix the links that don't link to png/jpeg and link to webaddress.
# Needs to get the images that are at that web address and embed them.
#
# Display artwork submitted under the images.
#
# Upload artwork to user. Sends them a message on redditgetsdrawn with links.
#
# More pandas
#
# Saves reference images to imgs/year/month/day/reference/username-reference.png
#
# Saves art images to imgs/year/month/day/art/username-line-bw-colour.png
#
# Creates index.html file with:
# Title of site and logo: GetsDrawn
# Last updated date and time.
#
# Path of image file /imgs/year/month/day/username-reference.png.
# (This needs changed to just their username).
#
# Save off .meta data from reddit of each photo, saving it to reference folder.
# username-yrmnthday.meta - contains info such as author, title, upvotes, downvotes.
# Currently saving .meta files to a meta folder - along side art and reference.
#
# Folder sorting system of files.
# websitename/index.html-style.css-imgs/YEAR(15)-MONTH(2)-DAY(4)/art-reference-meta
# Inside art folder
# Currently it generates USERNAME-line/bw/colour.png 50/50 white files. Maybe should be getting art replies from reddit?
#
# Inside reference folder
# Reference fold is working decent.
# it creates USERNAME-reference.png / jpeg files.
#
# Currently saves username-line-bw-colour.png to imgs folder. Instead get it to save to imgs/year/month/day/usernames.png.
# Script checks the year/month/day and if folder isnt created, it creates it. If folder is there, exit.
# Maybe get the reference image and save it with the line/bw/color.pngs
#
# The script now filters the jpeg and png image and skips links to imgur pages. This needs to be fixed by getting the images from the imgur pages.
# It renames the image files to the redditor username followed by a -reference tag (and ending with png of course).
# It opens these files up with PIL and checks the sizes.
# It needs to resize the images that are larger than 800px to 800px.
# These images need to be linked in the index.html instead of the imgur altenatives.
#
# Instead of the jpeg/png files on imgur they are downloaded to the server with this script.
#
# Filter through as images are getting downloaded and if it has been less than certain time or if the image has been submitted before
#
# Extending the subreddits it gets data from to cycle though a list, run script though list of subreddits.
#
# Browse certain days - Current day by default but option to scroll through other days.
#
# Filters - male/female/animals/couples etc
# Function that returns only male portraits.
# tags to add to photos.
# Filter images with tags
#
#
#
# In[1]:
import os
import requests
from bs4 import BeautifulSoup
import re
import json
import time
import praw
import dominate
from dominate.tags import *
from time import gmtime, strftime
#import nose
#import unittest
import numpy as np
import pandas as pd
from pandas import *
from PIL import Image
from pprint import pprint
#import pyttsx
import shutil
import getpass
import random
from TwitterFollowBot import TwitterBot
# In[2]:
my_bot = TwitterBot()
# In[3]:
hosnam = getpass.getuser()
# In[4]:
gtsdrndir = ('/home/' + hosnam + '/getsdrawndotcom/')
# In[5]:
gtsdrndir
# In[6]:
if os.path.isdir(gtsdrndir) == True:
print ('its true')
else:
print ('its false')
os.mkdir(gtsdrndir)
# In[7]:
os.chdir(gtsdrndir)
# In[8]:
r = praw.Reddit(user_agent='getsdrawndotcom')
# In[9]:
#getmin = r.get_redditor('itwillbemine')
# In[10]:
#mincom = getmin.get_comments()
# In[11]:
#engine = pyttsx.init()
#engine.say('The quick brown fox jumped over the lazy dog.')
#engine.runAndWait()
# In[12]:
#shtweet = []
# In[13]:
#for mi in mincom:
# print mi
# shtweet.append(mi)
# In[14]:
bodycom = []
bodyicv = dict()
# In[15]:
#beginz = pyttsx.init()
# In[16]:
#for shtz in shtweet:
# print shtz.downs
# print shtz.ups
# print shtz.body
# print shtz.replies
#beginz.say(shtz.author)
#beginz.say(shtz.body)
#beginz.runAndWait()
# bodycom.append(shtz.body)
#bodyic
# In[17]:
#bodycom
# In[18]:
getnewr = r.get_subreddit('redditgetsdrawn')
# In[19]:
rdnew = getnewr.get_new()
# In[20]:
lisrgc = []
lisauth = []
# In[21]:
for uz in rdnew:
#print uz
lisrgc.append(uz)
# In[22]:
gtdrndic = dict()
# In[23]:
imgdir = (gtsdrndir + 'imgs')
# In[24]:
imgdir
# In[25]:
if os.path.isdir(imgdir) == True:
print ('its true')
else:
print ('its false')
os.mkdir(imgdir)
# In[26]:
artlist = os.listdir(imgdir)
# In[27]:
from time import time
# In[28]:
yearz = strftime("%y", gmtime())
monthz = strftime("%m", gmtime())
dayz = strftime("%d", gmtime())
#strftime("%y %m %d", gmtime())
# In[29]:
yrzpat = (imgdir + '/' + yearz)
monzpath = (yrzpat + '/' + monthz)
dayzpath = (monzpath + '/' + dayz)
rmgzdays = (dayzpath + '/reference')
imgzdays = (dayzpath + '/art')
metzdays = (dayzpath + '/meta')
repathz = (imgdir + '/' + yearz + '/' + monthz + '/' + dayz + '/')
# In[30]:
repathz
# In[31]:
dayzpath
# In[32]:
imgzdays
# In[33]:
repathz
# In[34]:
def ospacheck():
if os.path.isdir(imgdir + yearz) == True:
print ('its true')
else:
print ('its false')
os.mkdir(imgdir + yearz)
# In[35]:
ospacheck()
# In[36]:
#if os.path.isdir(imgzdir + yearz) == True:
# print 'its true'
#else:
# print 'its false'
# os.mkdir(imgzdir + yearz)
# In[37]:
lizmon = ['monzpath', 'dayzpath', 'imgzdays', 'rmgzdays', 'metzdays']
# Something is wrong with the script and it's no longer creating these dir in the correct folder. How did this break?
# Fixed that but problems with it
# Getting error:
# OSError: [Errno 17] File exists: '/home/wcmckee/getsdrawndotcom/imgs/15/01'
# If the file exists it should be skipping over it, thats why it has the os.path.isdir == True:
# print its true
# else
# print its false, and make the dir
# In[38]:
if os.path.isdir(monzpath) == True:
print ('its true')
else:
print ('its false')
#os.mkdir('/home/wcmckee/getsdrawndotcom/' + monzpath)
# In[39]:
if os.path.isdir(imgzdays) == True:
print ('its true')
else:
print ('its false')
os.mkdir(imgzdays)
if os.path.isdir(rmgzdays) == True:
print ('its true')
else:
print ('its false')
os.mkdir(rmgzdays)
if os.path.isdir(metzdays) == True:
print ('its true')
else:
print ('its false')
os.mkdir(metzdays)
if os.path.isdir(dayzpath) == True:
print ('its true')
else:
print ('its false')
os.mkdir(dayzpath)
# Need to fix dir to just have /imgs/15/02/reference/imgnam-reference.jpg
# In[40]:
monzpath
# In[ ]:
# In[41]:
iwcpath = 'imgs/' + yearz + '/' + monthz + '/' + dayz + '/reference'
#monzpath = (yrzpat + '/' + monthz)
#dayzpath = (monzpath + '/' + dayz)
#rmgzdays = (dayzpath + '/reference')
# In[42]:
#for liz in lizmon:
# if os.path.isdir(liz) == True:
## print 'its true'
# else:
# print 'its false'
# os.mkdir(liz)
# In[43]:
fullhom = ('/home/wcmckee/getsdrawndotcom/')
# In[44]:
#artlist
# In[45]:
httpad = ('http://getsdrawn.com/imgs')
# In[46]:
#im = Image.new("RGB", (512, 512), "white")
#im.save(file + ".thumbnail", "JPEG")
# In[47]:
rmgzdays = (dayzpath + '/reference')
imgzdays = (dayzpath + '/art')
metzdays = (dayzpath + '/meta')
# In[48]:
os.chdir(metzdays)
# In[49]:
metadict = dict()
# if i save the data to the file how am i going to get it to update as the post is archieved. Such as up and down votes.
# In[50]:
rgde = len(lisrgc)
# In[51]:
rgde
# In[52]:
alrgds = dict()
# In[53]:
#for lisr in lisrgc:
# print(lisr.author)
# print(lisr.title[0:30])
# In[54]:
for lisz in lisrgc:
metadict.update({'up': lisz.ups})
metadict.update({'down': lisz.downs})
metadict.update({'title': lisz.title})
metadict.update({'created': lisz.created})
#metadict.update({'createdutc': lisz.created_utc})
#print lisz.ups
#print lisz.downs
#print lisz.created
#print lisz.comments
# In[55]:
import random
# In[56]:
ranchor = random.choice(lisrgc)
# In[57]:
titshort = ranchor.title[0:30]
# In[58]:
titsre =titshort.replace(' ', '')
# In[59]:
titsre
# In[60]:
ranchor.url
# In[61]:
ranautr = (ranchor.author)
# In[62]:
hasra = ('#') + str(ranautr)
# In[63]:
hasra
# In[64]:
hasgd = ('#getsdrawn')
# In[ ]:
# In[ ]:
# In[ ]:
# In[65]:
urlfin = ('http://getsdrawn.com/' + iwcpath + '/' + str(ranautr) + '-reference.png')
# In[66]:
(urlfin)
# In[67]:
twez = (titsre + ' ' + urlfin + ' ' + hasra + ' ' + hasgd)
# In[68]:
len(twez)
# In[ ]:
# Need to save json object.
#
# Dict is created but it isnt saving. Looping through lisrgc twice, should only require the one loop.
#
# Cycle through lisr and append to dict/concert to json, and also cycle through lisr.author meta folders saving the json that was created.
# In[69]:
for lisr in lisrgc:
gtdrndic.update({'title': lisr.title})
lisauth.append(str(lisr.author))
for osliz in os.listdir(metzdays):
with open(str(lisr.author) + '.meta', "w") as f:
rstrin = lisr.title.encode('ascii', 'ignore').decode('ascii')
#print matdict
#metadict = dict()
#for lisz in lisrgc:
# metadict.update({'up': lisz.ups})
# metadict.update({'down': lisz.downs})
# metadict.update({'title': lisz.title})
# metadict.update({'created': lisz.created})
f.write(rstrin)
# In[70]:
#matdict
# I have it creating a meta folder and creating/writing username.meta files. It wrote 'test' in each folder, but now it writes the photo author title of post.. the username/image data. It should be writing more than author title - maybe upvotes/downvotes, subreddit, time published etc.
#
# In[71]:
#os.listdir(dayzpath)
# Instead of creating these white images, why not download the art replies of the reference photo.
# In[72]:
#for lisa in lisauth:
# #print lisa + '-line.png'
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-line.png')
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-bw.png')
#print lisa + '-bw.png'
# im = Image.new("RGB", (512, 512), "white")
# im.save(lisa + '-colour.png')
#print lisa + '-colour.png'
# In[73]:
#lisauth
# I want to save the list of usernames that submit images as png files in a dir.
# Currently when I call the list of authors it returns Redditor(user_name='theusername'). I want to return 'theusername'.
# Once this is resolved I can add '-line.png' '-bw.png' '-colour.png' to each folder.
# In[74]:
#lisr.author
# In[75]:
namlis = []
# In[76]:
#opsinz = open('/home/wcmckee/visignsys/index.meta', 'r')
#panz = opsinz.read()
# In[77]:
os.chdir(rmgzdays)
# Filter the non jpeg/png links. Need to perform request or imgur api to get the jpeg/png files from the link. Hey maybe bs4?
# In[ ]:
# In[78]:
#from imgurpython import ImgurClient
# In[79]:
#opps = open('/home/wcmckee/ps.txt', 'r')
#opzs = open('/home/wcmckee/ps2.txt', 'r')
#oprd = opps.read()
#opzrd = opzs.read()
# In[80]:
#client = ImgurClient(oprd, opzrd)
# Example request
#items = client.gallery()
#for item in items:
# print(item.link)
#itz = client.get_album_images()
# In[81]:
#galim = client.get_image('SBaV275')
# In[82]:
#galim.size
# In[83]:
#gelim = client.get_album_images('LTDJ9')
# In[84]:
#gelim
# In[85]:
#from urlparse import urlparse
# In[86]:
#linklis = []
# I need to get the image ids from each url. Strip the http://imgur.com/ from the string. The gallery id is the random characters after. if it's an album a is added. if multi imgs then , is used to seprate.
#
# Doesnt currently work.
#
# Having problems with mixed /a/etwet and wetfwet urls. Using .strip('/') to remove forward slash in front of path.
# In[87]:
#pathlis = []
# In[88]:
#for rdz in lisrgc:
# if 'http://imgur.com/' in rdz.url:
# print rdz.url
# parsed = urlparse(rdz.url)
## print parsed.path.strip('/')
# pathlis.append(parsed.path.strip('/'))
#for pared in parsed.path:
# print pared.strip('/')
#itgar = client.gallery_item(parsed.path.strip('/'))
#itz = client.get_album_images(parsed.path.strip('a/'))
# reimg = requests.get(rdz.url)
## retxt = reimg.text
# souptxt = BeautifulSoup(''.join(retxt))
# soupurz = souptxt.findAll('img')
# for soupuz in soupurz:
# imgurl = soupuz['src']
# print imgurl
# linklis.append(imgurl)
#try:
# imzdata = requests.get(imgurl)
# In[89]:
#pathlis
# In[90]:
#noalis = []
# In[91]:
#for pathl in pathlis:
# if 'a/' in pathl:
# print 'a found'
# else:
# noalis.append(pathl)
# In[92]:
#if 'a/' in pathlis:
# print 'a found'
#else:
# noalis.append(pathlis)
# In[93]:
#for noaz in noalis:
# print noaz
#itgar = client.gallery_item()
# In[94]:
#linklis
# In[95]:
#if '.jpg' in linklis:
# print 'yes'
#else:
# print 'no'
# In[96]:
#panz()
for rdz in lisrgc:
(rdz.title)
#a(rdz.url)
if 'http://i.imgur.com' in rdz.url:
#print rdz.url
print (rdz.url)
url = rdz.url
response = requests.get(url, stream=True)
with open(str(rdz.author) + '-reference.png', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
# In[97]:
apsize = []
# In[98]:
aptype = []
# In[99]:
basewidth = 600
# In[100]:
imgdict = dict()
# In[101]:
for rmglis in os.listdir(rmgzdays):
#print rmglis
im = Image.open(rmglis)
#print im.size
imgdict.update({rmglis : im.size})
#im.thumbnail(size, Image.ANTIALIAS)
#im.save(file + ".thumbnail", "JPEG")
apsize.append(im.size)
aptype.append(rmglis)
# In[102]:
#for imdva in imgdict.values():
#print imdva
#for deva in imdva:
#print deva
# if deva < 1000:
# print 'omg less than 1000'
# else:
# print 'omg more than 1000'
# print deva / 2
#print imgdict.values
# Needs to update imgdict.values with this new number. Must halve height also.
# In[103]:
#basewidth = 300
#img = Image.open('somepic.jpg')
#wpercent = (basewidth/float(img.size[0]))
#hsize = int((float(img.size[1])*float(wpercent)))
#img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
#img.save('sompic.jpg')
# In[104]:
#os.chdir(metzdays)
# In[ ]:
# In[105]:
#for numz in apsize:
# print numz[0]
# if numz[0] > 800:
# print ('greater than 800')
# else:
# print ('less than 800!')
# In[106]:
reliz = []
# In[107]:
for refls in os.listdir(rmgzdays):
#print rmgzdays + refls
reliz.append(iwcpath + '/' + refls)
# In[108]:
len(reliz)
# Tweet each reference img in list, removing the item when it's tweeted so that same item isn't tweeted twice.
# Make new list of items to tweet, appending in new items when site is updated
# In[109]:
for apt in aptype:
print (apt)
# In[ ]:
# In[ ]:
# In[110]:
#opad = open('/home/wcmckee/ad.html', 'r')
# In[111]:
#opred = opad.read()
# In[112]:
#str2 = opred.replace("\n", "")
# In[113]:
#str2
# In[ ]:
# In[114]:
doc = dominate.document(title='GetsDrawn')
with doc.head:
link(rel='stylesheet', href='style.css')
script(type ='text/javascript', src='script.js')
#str(str2)
with div():
attr(cls='header')
h1('GetsDrawn')
p(img('imgs/getsdrawn-bw.png', src='imgs/getsdrawn-bw.png'))
#p(img('imgs/15/01/02/ReptileLover82-reference.png', src= 'imgs/15/01/02/ReptileLover82-reference.png'))
h1('Updated ', strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
#p(panz)
p(bodycom)
with doc:
with div(id='body').add(ol()):
for rdz in reliz:
#h1(rdz.title)
#a(rdz.url)
#p(img(rdz, src='%s' % rdz))
#print rdz
p(img(rdz, src = rdz))
p(rdz)
#print rdz.url
#if '.jpg' in rdz.url:
# img(rdz.urlz)
#else:
# a(rdz.urlz)
#h1(str(rdz.author))
#li(img(i.lower(), src='%s' % i))
with div():
attr(cls='body')
p('GetsDrawn is open source')
a('https://github.com/getsdrawn/getsdrawndotcom')
a('https://reddit.com/r/redditgetsdrawn')
#print doc
# In[115]:
docre = doc.render()
#s = docre.decode('ascii', 'ignore')
yourstring = docre.encode('ascii', 'ignore').decode('ascii')
indfil = ('/home/wcmckee/getsdrawndotcom/index.html')
mkind = open(indfil, 'w')
mkind.write(yourstring)
mkind.close()
# In[116]:
mkind = open(indfil, 'w')
mkind.write(yourstring)
mkind.close()
# In[110]:
#os.system('scp -r /home/wcmckee/getsdrawndotcom/ [email protected]:/home/wcmckee/getsdrawndotcom')
# In[111]:
#rsync -azP source destination
# In[112]:
#updatehtm = raw_input('Update index? Y/n')
#updateref = raw_input('Update reference? Y/n')
#if 'y' or '' in updatehtm:
# os.system('scp -r /home/wcmckee/getsdrawndotcom/index.html [email protected]:/home/wcmckee/getsdrawndotcom/index.html')
#elif 'n' in updatehtm:
# print 'not uploading'
#if 'y' or '' in updateref:
# os.system('rsync -azP /home/wcmckee/getsdrawndotcom/ [email protected]:/home/wcmckee/getsdrawndotcom/')
# In[113]:
#os.system('scp -r /home/wcmckee/getsdrawndotcom/index.html [email protected]:/home/wcmckee/getsdrawndotcom/index.html')
# In[105]:
#os.system('scp -r /home/wcmckee/getsdrawndotcom/style.css [email protected]:/home/wcmckee/getsdrawndotcom/style.css')
# In[118]:
my_bot.send_tweet(twez)
# In[ ]:
# In[321]:
# In[138]:
# In[138]:
# In[ ]:
| mit |
JackKelly/neuralnilm_prototype | scripts/e278.py | 2 | 51343 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.objectives import scaled_cost
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=3,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
def change_learning_rate(net, epoch):
net.updates = partial(nesterov_momentum, learning_rate=0.001)
net.compile()
def change_subsample(net, epoch):
net.source.subsample_target = 3
net.generate_validation_data_and_set_shapes()
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.01),
do_save_activations=True,
epoch_callbacks={501: change_learning_rate}
)
def exp_a(name):
# avg valid cost = 0.5296852589
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_b(name):
# 1 layer, pool after RNN
# avg valid cost = 0.8358715773
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
# 2 layers, pool in between
# avg valid cost = 0.5183933973
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=(1/sqrt(N))),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# 3 layers, pool after first layer
# avg valid cost = 0.5396855474
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=(1/sqrt(N))),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=(1/sqrt(N))),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# layerwise pre-training
# avg valid cost = 0.6081719398
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 1
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net_dict_copy['layer_changes'] = {
501: {
'remove_from': -3,
'new_layers': [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
},
1001: {
'remove_from': -3,
'callback': change_subsample,
'new_layers': [
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
}
}
net = Net(**net_dict_copy)
return net
def exp_f(name):
# layerwise pre-training (pre-train with pool)
# need to re-run
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 1
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net_dict_copy['layer_changes'] = {
501: {
'remove_from': -3,
'callback': change_subsample,
'new_layers': [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
}
]
},
1001: {
'remove_from': -3,
'new_layers': [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
}
}
net = Net(**net_dict_copy)
return net
def exp_g(name):
# sigmoid
# avg valid cost = 1.5114594698
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': sigmoid
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': sigmoid
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': sigmoid
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
# ReLU
# doesn't train: training error is 0
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': rectify
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
# 2 x dense layers at end (both with 5 units)
# avg valid cost = 0.5794851780
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(source.n_outputs)))
}
]
net = Net(**net_dict_copy)
return net
def exp_j(name):
# 2 x dense layers at end (penultiate with 50 units)
# avg valid cost = 0.5457109213
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': N,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_k(name):
# RNN output
# doesn't train (0 error)
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': source.n_outputs,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_l(name):
# RNN output, 2 layers
# NaNs
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': source.n_outputs,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': None
},
{
'type': BidirectionalRecurrentLayer,
'num_units': source.n_outputs,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(source.n_outputs)),
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_m(name):
# Conv AND pool, with 50 filters, filter_length=10
# avg valid cost = 0.4936202168
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=9
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 50,
'filter_length': 10,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_n(name):
# Conv AND pool, with 10 filters
# avg valid cost = 0.4842122793 (run on 277)
# avg valid cost = 0.5845696926 (run on 278)
# need to re-run with correct initialisations
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=2
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_o(name):
# 2 lots of conv, then pool
# need to re-run with correct initialisations
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=4
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 32,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_p(name):
# 2 lots of conv, then pool
# a re-run of O but with correct initialisations
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=4
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 32,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(32)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(16)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_q(name):
# Conv AND pool, with 10 filters
# avg valid cost = 0.4842122793 (run on 277)
# avg valid cost = 0.5845696926 (run on 278)
# re-run of P but with correct initialisations
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=2
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(10)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
net.load_params(iteration=1139)
return net
def exp_r(name):
# 2 lots of conv, then pool
# a re-run of O but with 50 filters and correct init
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=4
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': N,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': N,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_s(name):
# Conv AND pool, with 50 filters
# avg valid cost = 0.4842122793 (run on 277)
# avg valid cost = 0.5845696926 (run on 278)
# re-run of P but with correct initialisations and 50 filters
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=2
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': N,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_t(name):
# a but with no random seed
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
seed=None
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_u(name):
# a but with no random seed
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
seed=None
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_v(name):
# a
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_w(name):
# 2x2x pool
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
downsample_target=4
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_x(name):
# 3x2x pool
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
downsample_target=6
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_y(name):
# 5-way RNN as penultimate layer
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 5,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(5)))
}
]
net = Net(**net_dict_copy)
return net
def exp_z(name):
# layerwise pre-training
# avg valid cost = 0.6081719398
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 1
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
epoch_callbacks={4501: change_learning_rate}
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net_dict_copy['layer_changes'] = {
2001: {
'remove_from': -3,
'new_layers': [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
},
4001: {
'remove_from': -3,
'callback': change_subsample,
'new_layers': [
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
}
}
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('qrstuvwxyz')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=6000 if experiment == 'z' else 2000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
if __name__ == "__main__":
main()
| mit |
xyguo/scikit-learn | examples/manifold/plot_compare_methods.py | 39 | 4036 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
BhallaLab/moose-full | moose-gui/mplot.py | 2 | 7910 | # mplot.py ---
#
# Filename: mplot.py
# Description:
# Author:
# Maintainer:
# Created: Mon Mar 11 20:24:26 2013 (+0530)
# Version:
# Last-Updated: Wed Jul 3 10:32:35 2013 (+0530)
# By: subha
# Update #: 309
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Moose plot widget default implementation. This should be rich enough
# to suffice for most purposes.
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""
*TODO*
1) Option for default colors, markers, etc.
2) Option for configuring number of rows and columns of
subplots. (I think matplotlib grids will be a bit too much to
implement). Problem is this has to be done before actual axes are
created (as far as I know). Idea: can we do something like movable
widgets example in Qt?
3) Option for selecting any line or set of lines and change its
configuration (as in dataviz).
4) Association between plots and the data source.
5) Lots and lots of scipy/numpy/scikits/statsmodels utilities can be added. To
start with, we should have
a)digital filters
b) fft
c) curve fitting
6) For (5), think of another layer of plugins. Think of this as a
standalone program. All these facilities should again be
pluggable. We do not want to overwhelm novice users with fancy
machine-learning stuff. They should be made available only on
request.
- There is a proposal for data analysis library by Andrew Davison ...
"""
__author__ = "Subhasis Ray"
import sys
import numpy as np
from PyQt4 import QtGui, QtCore
from PyQt4.Qt import Qt
from matplotlib import mlab
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
#from moose import utils
import moose
from PyQt4.QtCore import *
class CanvasWidget(FigureCanvas):
"""Widget to draw plots on.
This class keep track of all the axes in a dictionary. The key for
an axis is its index number in sequence of creation.
next_id: The key for the next axis.
current_id: Key for current axis (anu plotting will happen on
this).
"""
updateSignal = pyqtSignal()
def __init__(self, model, graph, index, *args, **kwargs):
self.model = model
self.graph = graph
self.index = index
# QColor(243, 239, 238, 255)
self.figure = Figure(facecolor = '#F3EFEE')#figsize=(1,1))
FigureCanvas.__init__(self, self.figure, *args, **kwargs)
self.figure.set_canvas(self)
# self.set_xlabel('Time (s)')
# self.set_ylabel('Concentration (mM)')
if len(args) > 0 and isinstance(args[0], QtGui.QWidget):
self.reparent(args[0])
elif (kwargs is not None) and ('parent' in kwargs):
self.reparent(kwargs['parent'])
#self.setAcceptDrops(True)
# self.setMaximumSize(100, 100)
FigureCanvas.updateGeometry(self)
self.axes = {}
self.next_id = 0
self.current_id = -1
tabList = []
self.addTabletoPlot = ''
self.setAcceptDrops(True)
self.gridMode = False
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('text/plain'):
event.acceptProposedAction()
def dragMoveEvent(self, event):
if event.mimeData().hasFormat('text/plain'):
event.acceptProposedAction()
def eventFilter(self, source, event):
if (event.type() == QtCore.QEvent.Drop):
pass
def dropEvent(self, event):
"""Insert an element of the specified class in drop location"""
if not event.mimeData().hasFormat('text/plain'):
return
# print " active window ", self.isActiveWindow()
# print "Mouse : ", self.mouse
# pos = self.mapFromGlobal(QCursor.pos())
# print "Mouse Position : ", pos
modelRoot, element = event.mimeData().data
if isinstance (element,moose.PoolBase):
tablePath = moose.utils.create_table_path(self.model, self.graph, element, "Conc")
table = moose.utils.create_table(tablePath, element, "Conc","Table2")
# moose.connect(table, 'requestOut', element, 'getConc')
self.updateSignal.emit()
elif isinstance(element, moose.CompartmentBase):
tablePath = moose.utils.create_table_path(self.model, self.graph, element, "Vm")
table = moose.utils.create_table(tablePath, element, "Vm","Table")
self.updateSignal.emit()
else:
QtGui.QMessageBox.question(self, 'Message',"This element's properties cannot be plotted.", QtGui.QMessageBox.Ok)
def addSubplot(self, rows, cols):
"""Add a subplot to figure and set it as current axes."""
assert(self.next_id <= rows * cols)
axes = self.figure.add_subplot(rows, cols, self.next_id+1)
axes.set_xlabel("Time (s)")
axes.set_ylabel("Concentration (mM)")
axes.set_xlim(left=0.0)
axes.set_ylim(bottom=0.0)
self.axes[self.next_id] = axes
axes.set_title("Graph " + str(self.index + 1))
self.current_id = self.next_id
self.next_id += 1
labelList = []
axes.legend(loc='upper center')
return axes
def plot(self, *args, **kwargs):
#self.callAxesFn('legend',loc='lower center',bbox_to_anchor=(0.5, -0.03),fancybox=True, shadow=True, ncol=3)
return self.callAxesFn('plot', *args, **kwargs)
def callAxesFn(self, fname, *args, **kwargs):
"""Call any arbitrary function of current axes object."""
if self.current_id < 0:
self.addSubplot(1,1)
fn = eval('self.axes[self.current_id].%s' % (fname))
return fn(*args, **kwargs)
def resize_event(self, event):
print("Resize event called ", event)
def toggleGrid(self):
self.gridMode = not self.gridMode
for key in self.axes:
self.axes[key].grid(self.gridMode)
self.draw()
def setXLimit(self, minX, maxX):
for key in self.axes:
self.axes[key].set_xlim([minX, maxX])
self.draw()
import sys
import os
import config
import unittest
from PyQt4.QtTest import QTest
class CanvasWidgetTests(unittest.TestCase):
def setUp(self):
self.app = QtGui.QApplication([])
QtGui.qApp = self.app
icon = QtGui.QIcon(os.path.join(config.KEY_ICON_DIR,'moose_icon.png'))
self.app.setWindowIcon(icon)
self.window = QtGui.QMainWindow()
self.cwidget = CanvasWidget()
self.window.setCentralWidget(self.cwidget)
self.window.show()
def testPlot(self):
"""Test plot function"""
self.cwidget.addSubplot(1,1)
self.cwidget.plot(np.arange(1000), mlab.normpdf(np.arange(1000), 500, 150))
def testCallAxesFn(self):
self.cwidget.addSubplot(1,1)
self.cwidget.callAxesFn('scatter', np.random.randint(0, 100, 100), np.random.randint(0, 100,100))
def tearDown(self):
self.app.exec_()
if __name__ == '__main__':
unittest.main()
#
# mplot.py ends here
| gpl-2.0 |
GGoussar/scikit-image | doc/examples/xx_applications/plot_morphology.py | 9 | 8867 | """
=======================
Morphological Filtering
=======================
Morphological image processing is a collection of non-linear operations related
to the shape or morphology of features in an image, such as boundaries,
skeletons, etc. In any given technique, we probe an image with a small shape or
template called a structuring element, which defines the region of interest or
neighborhood around a pixel.
In this document we outline the following basic morphological operations:
1. Erosion
2. Dilation
3. Opening
4. Closing
5. White Tophat
6. Black Tophat
7. Skeletonize
8. Convex Hull
To get started, let's load an image using ``io.imread``. Note that morphology
functions only work on gray-scale or binary images, so we set ``as_grey=True``.
"""
import os
import matplotlib.pyplot as plt
from skimage.data import data_dir
from skimage.util import img_as_ubyte
from skimage import io
orig_phantom = img_as_ubyte(io.imread(os.path.join(data_dir, "phantom.png"),
as_grey=True))
fig, ax = plt.subplots()
ax.imshow(orig_phantom, cmap=plt.cm.gray)
######################################################################
# Let's also define a convenience function for plotting comparisons:
def plot_comparison(original, filtered, filter_name):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True,
sharey=True)
ax1.imshow(original, cmap=plt.cm.gray)
ax1.set_title('original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(filtered, cmap=plt.cm.gray)
ax2.set_title(filter_name)
ax2.axis('off')
ax2.set_adjustable('box-forced')
######################################################################
# Erosion
# =======
#
# Morphological ``erosion`` sets a pixel at (i, j) to the *minimum over all
# pixels in the neighborhood centered at (i, j)*. The structuring element,
# ``selem``, passed to ``erosion`` is a boolean array that describes this
# neighborhood. Below, we use ``disk`` to create a circular structuring
# element, which we use for most of the following examples.
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
selem = disk(6)
eroded = erosion(orig_phantom, selem)
plot_comparison(orig_phantom, eroded, 'erosion')
######################################################################
# Notice how the white boundary of the image disappears or gets eroded as we
# increase the size of the disk. Also notice the increase in size of the two
# black ellipses in the center and the disappearance of the 3 light grey
# patches in the lower part of the image.
#
#Dilation
#========
#
#Morphological ``dilation`` sets a pixel at (i, j) to the *maximum over all
#pixels in the neighborhood centered at (i, j)*. Dilation enlarges bright
#regions and shrinks dark regions.
dilated = dilation(orig_phantom, selem)
plot_comparison(orig_phantom, dilated, 'dilation')
######################################################################
# Notice how the white boundary of the image thickens, or gets dilated, as we
#increase the size of the disk. Also notice the decrease in size of the two
#black ellipses in the centre, and the thickening of the light grey circle
#in the center and the 3 patches in the lower part of the image.
#
#Opening
#=======
#
#Morphological ``opening`` on an image is defined as an *erosion followed by
#a dilation*. Opening can remove small bright spots (i.e. "salt") and
#connect small dark cracks.
opened = opening(orig_phantom, selem)
plot_comparison(orig_phantom, opened, 'opening')
######################################################################
#Since ``opening`` an image starts with an erosion operation, light regions
#that are *smaller* than the structuring element are removed. The dilation
#operation that follows ensures that light regions that are *larger* than
#the structuring element retain their original size. Notice how the light
#and dark shapes in the center their original thickness but the 3 lighter
#patches in the bottom get completely eroded. The size dependence is
#highlighted by the outer white ring: The parts of the ring thinner than the
#structuring element were completely erased, while the thicker region at the
#top retains its original thickness.
#
#Closing
#=======
#
#Morphological ``closing`` on an image is defined as a *dilation followed by
#an erosion*. Closing can remove small dark spots (i.e. "pepper") and
#connect small bright cracks.
#
#To illustrate this more clearly, let's add a small crack to the white
#border:
phantom = orig_phantom.copy()
phantom[10:30, 200:210] = 0
closed = closing(phantom, selem)
plot_comparison(phantom, closed, 'closing')
######################################################################
# Since ``closing`` an image starts with an dilation operation, dark regions
# that are *smaller* than the structuring element are removed. The dilation
# operation that follows ensures that dark regions that are *larger* than the
# structuring element retain their original size. Notice how the white
# ellipses at the bottom get connected because of dilation, but other dark
# region retain their original sizes. Also notice how the crack we added is
# mostly removed.
#
# White tophat
# ============
#
# The ``white_tophat`` of an image is defined as the *image minus its
# morphological opening*. This operation returns the bright spots of the
# image that are smaller than the structuring element.
#
# To make things interesting, we'll add bright and dark spots to the image:
phantom = orig_phantom.copy()
phantom[340:350, 200:210] = 255
phantom[100:110, 200:210] = 0
w_tophat = white_tophat(phantom, selem)
plot_comparison(phantom, w_tophat, 'white tophat')
######################################################################
# As you can see, the 10-pixel wide white square is highlighted since it is
# smaller than the structuring element. Also, the thin, white edges around
# most of the ellipse are retained because they're smaller than the
# structuring element, but the thicker region at the top disappears.
#
# Black tophat
# ============
#
# The ``black_tophat`` of an image is defined as its morphological **closing
# minus the original image**. This operation returns the *dark spots of the
# image that are smaller than the structuring element*.
b_tophat = black_tophat(phantom, selem)
plot_comparison(phantom, b_tophat, 'black tophat')
######################################################################
#As you can see, the 10-pixel wide black square is highlighted since
#it is smaller than the structuring element.
#
#**Duality**
#
#As you should have noticed, many of these operations are simply the reverse
#of another operation. This duality can be summarized as follows:
#
# 1. Erosion <-> Dilation
#
# 2. Opening <-> Closing
#
# 3. White tophat <-> Black tophat
#
#Skeletonize
#===========
#
#Thinning is used to reduce each connected component in a binary image to a
#*single-pixel wide skeleton*. It is important to note that this is
#performed on binary images only.
horse = io.imread(os.path.join(data_dir, "horse.png"), as_grey=True)
sk = skeletonize(horse == 0)
plot_comparison(horse, sk, 'skeletonize')
######################################################################
#
# As the name suggests, this technique is used to thin the image to 1-pixel
# wide skeleton by applying thinning successively.
#
# Convex hull
# ===========
#
# The ``convex_hull_image`` is the *set of pixels included in the smallest
# convex polygon that surround all white pixels in the input image*. Again
# note that this is also performed on binary images.
hull1 = convex_hull_image(horse == 0)
plot_comparison(horse, hull1, 'convex hull')
######################################################################
# As the figure illustrates, ``convex_hull_image`` gives the smallest polygon
# which covers the white or True completely in the image.
#
# If we add a small grain to the image, we can see how the convex hull adapts
# to enclose that grain:
import numpy as np
horse_mask = horse == 0
horse_mask[45:50, 75:80] = 1
hull2 = convex_hull_image(horse_mask)
plot_comparison(horse_mask, hull2, 'convex hull')
######################################################################
#
# Additional Resources
# ====================
#
# 1. `MathWorks tutorial on morphological processing
# <http://www.mathworks.com/help/images/morphology-fundamentals-dilation-and-
# erosion.html>`_
#
# 2. `Auckland university's tutorial on Morphological Image
# Processing <http://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures
# /ImageProcessing-html/topic4.htm>`_
#
# 3. http://en.wikipedia.org/wiki/Mathematical_morphology
| bsd-3-clause |
Nikolay-Lysenko/dsawl | dsawl/active_learning/utils.py | 1 | 1176 | """
This file contains some auxiliaries.
@author: Nikolay Lysenko
"""
from typing import List, Optional
import numpy as np
from sklearn.base import BaseEstimator, clone
from sklearn.model_selection import StratifiedKFold
from dsawl.stacking.stackers import FoldType
def make_committee(
est: BaseEstimator,
X_train: np.ndarray, y_train: np.ndarray,
splitter: Optional[FoldType] = None
) -> List[BaseEstimator]:
"""
Make committee from a single estimator by fitting it to
various folds.
:param est:
estimator instance that has method `fit`
:param X_train:
feature representation of training objects
:param y_train:
target label
:param splitter:
instance that can split data into folds
:return:
list of fitted instances
"""
committee = []
splitter = splitter or StratifiedKFold()
for train_index, test_index in splitter.split(X_train, y_train):
X_curr_train = X_train[train_index]
y_curr_train = y_train[train_index]
curr_est = clone(est).fit(X_curr_train, y_curr_train)
committee.append(curr_est)
return committee
| mit |
ndingwall/scikit-learn | examples/linear_model/plot_logistic.py | 17 | 1571 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from scipy.special import expit
# General a toy dataset:s it's just a straight line with some Gaussian noise:
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# Fit the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
loss = expit(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.tight_layout()
plt.show()
| bsd-3-clause |
justincely/cos_monitoring | cosmo/monitors/dark_monitors.py | 1 | 22621 |
import os
import json
import datetime
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objs as go
# from tqdm import tqdm
from typing import Any
from urllib import request
from itertools import repeat
from plotly.subplots import make_subplots
from monitorframe.monitor import BaseMonitor
from astropy.convolution import Box1DKernel, convolve
from .. import SETTINGS
from .data_models import DarkDataModel
from ..monitor_helpers import explode_df, absolute_time
COS_MONITORING = SETTINGS['output']
NOAA_URL = 'https://services.swpc.noaa.gov/json/solar-cycle/observed-solar-cycle-indices.json'
# ----------------------------------------------------------------------------#
# def run_all_dark_monitors():
# fuva_bottom_monitor = FUVABottomDarkMonitor()
# fuva_left_monitor = FUVALeftDarkMonitor()
# fuva_top_monitor = FUVATopDarkMonitor()
# fuva_right_monitor = FUVARightDarkMonitor()
# fuva_inner_monitor = FUVAInnerDarkMonitor()
# fuvb_bottom_monitor = FUVBBottomDarkMonitor()
# fuvb_left_monitor = FUVBLeftDarkMonitor()
# fuvb_top_monitor = FUVBTopDarkMonitor()
# fuvb_right_monitor = FUVBRightDarkMonitor()
# fuvb_inner_monitor = FUVBInnerDarkMonitor()
# nuv_monitor = NUVDarkMonitor()
# for monitor in tqdm([fuva_bottom_monitor, fuva_left_monitor,
# fuva_top_monitor, fuva_right_monitor,
# fuva_inner_monitor, fuvb_bottom_monitor,
# fuvb_left_monitor, fuvb_top_monitor,
# fuvb_right_monitor, fuvb_inner_monitor,
# nuv_monitor]):
# monitor.monitor()
def dark_filter(df_row, filter_pha, location):
"""Given a row corresponding to a dark corrtag file, filter it based on
the location and PHA (if FUV), and calculate dark rate information. Will
return the exploded dataframe with the correct dark information for that
one file."""
good_pha = (2, 23)
# time step stuff
time_step = 25
time_bins = df_row['TIME_3'][::time_step]
lat = df_row['LATITUDE'][::time_step][:-1]
lon = df_row['LONGITUDE'][::time_step][:-1]
# filtering pha
if filter_pha:
event_df = df_row[
['SEGMENT', 'XCORR', 'YCORR', 'PHA', 'TIME']].to_frame().T
event_df = explode_df(event_df, ['XCORR', 'YCORR', 'PHA', 'TIME'])
else:
event_df = df_row[['SEGMENT', 'XCORR', 'YCORR', 'TIME']].to_frame().T
event_df = explode_df(event_df, ['XCORR', 'YCORR', 'TIME'])
# creating event dataframe and filtering it by location on the detector
npix = (location[1] - location[0]) * (location[3] - location[2])
index = np.where((event_df['XCORR'] > location[0]) & (
event_df['XCORR'] < location[1]) & (
event_df['YCORR'] > location[2]) & (
event_df['YCORR'] < location[3]))
filtered_row = event_df.iloc[index].reset_index(drop=True)
# filtered events only need to be further filtered by PHA if not NUV
if filter_pha:
filtered_row = filtered_row[(filtered_row['PHA'] > good_pha[0]) & (
filtered_row['PHA'] < good_pha[1])]
counts = np.histogram(filtered_row.TIME, bins=time_bins)[0]
date = absolute_time(
expstart=list(repeat(df_row['EXPSTART'], len(time_bins))),
time=time_bins.tolist()).to_datetime()[:-1]
dark_rate = counts / npix / time_step
return pd.DataFrame({
'segment': df_row['SEGMENT'], 'darks': [dark_rate], 'date': [date],
'rootname': df_row['ROOTNAME'], 'latitude': [lat], "longitude": [lon]
})
def get_solar_data(url, datemin, datemax, box=4):
"""Download the most recent solar data, save as file and dataframe,
filter dataframe to date range. Also replace -1 values in the smoothed
flux."""
response = request.urlopen(url)
if response.status == 200:
data = json.loads(response.read())
else:
print("Invalid response! HTTP Status Code: {}".format(response.status))
df = pd.DataFrame(data)
dates = [datetime.datetime.strptime(val, '%Y-%m') for val in
df['time-tag']]
df.index = pd.DatetimeIndex(dates)
todays_date = datetime.datetime.today().strftime('%b%d_%Y')
outfile = os.path.join(COS_MONITORING,
"noaa_solar_indices_{}.txt".format(todays_date))
# print("Saving outfile: {}".format(outfile))
df.to_csv(outfile, header=True, index=True)
# print("Filtering the dataframe to the date range: {}, {}".format(datemin,
# datemax))
df = df.loc[datemin:datemax]
# smoothing the f10.7 data
kernel = Box1DKernel(box)
smoothed_107 = convolve(df["f10.7"], kernel)
df["box_convolved_f10.7"] = smoothed_107
return df
class DarkMonitor(BaseMonitor):
"""Abstracted Dark Monitor. Not meant to be used directly but rather
inherited by specific segment and region dark monitors"""
labels = ['rootname']
output = COS_MONITORING
docs = "https://spacetelescope.github.io/cosmo/monitors.html#dark-rate" \
"-monitors"
segment = None
location = None
data_model = DarkDataModel
plottype = 'scatter'
x = 'date'
y = 'darks'
# # # defaults, can change these but optional definitions in the Monitor
# objects # # #
multi = False
sub_names = None
filter_saa = True
inner_region = 4 # number region that corresponds to the inner_region,
# for FUV use only.
def get_data(self): # -> Any: fix this later,
"""Required method to get the data necessary for plotting in the
correct format. Takes care of all data organization and "explosion"
of dataframes. Returns full dataframe with correct calculations and
columns."""
# should be fine in the monitor, just not in jupyter notebook
if self.multi:
# prime the pump
exploded_df = self.filter_data(self.location[0])
exploded_df["region"] = 0
for index, location in enumerate(self.location[1:]):
sub_exploded_df = self.filter_data(location)
sub_exploded_df["region"] = index + 1
exploded_df = exploded_df.append(sub_exploded_df)
else:
exploded_df = self.filter_data(self.location)
return exploded_df
def filter_data(self, location):
"""Given a location (region) on the detector, filter down the new
data from the DataModel as appropriate and perform dark filtering
and "explosion" of dataframe as necessary. Return the fully
"exploded" data for that location."""
filtered_rows = []
for _, row in self.model.new_data.iterrows():
if row.EXPSTART == 0:
continue
if row.SEGMENT == self.segment:
if row.SEGMENT == "N/A": # NUV
filtered_rows.append(dark_filter(row, False, location))
else: # Any of the FUV situations
filtered_rows.append(dark_filter(row, True, location))
filtered_df = pd.concat(filtered_rows).reset_index(drop=True)
exploded_df = explode_df(filtered_df,
['darks', 'date', 'latitude', 'longitude'])
# after exploding, add SAA filtering if required
if self.filter_saa:
exploded_df["no_saa"] = np.where(
exploded_df.eval("latitude > 10 or longitude < 260"), 1, 0)
return exploded_df
def plot(self):
"""Make the interactive subplots, including the solar plot, based on
how many locations (regions) are given. Write out the file to the
correct outpath."""
# make the interactive plots with sub-solar plots
if self.multi:
rows = len(self.location) + 1
self.sub_names += ["Solar Radio Flux"]
titles = tuple(self.sub_names)
else:
# only one region means two subplots
rows = 2
titles = (self.name, "Solar Radio Flux")
fig_height = 750
delta = 250
if rows > 3:
fig_height = delta * rows
pio.templates.default = "simple_white"
self.figure = make_subplots(rows=rows, cols=1, shared_xaxes=True,
subplot_titles=titles, x_title="Year",
vertical_spacing=0.05)
self.figure.update_layout(height=fig_height, width=1200,
title_text=self.name)
if self.multi:
# prime the pump again
region_x_data = self.data[self.x].where(self.data["region"] == 0)
region_y_data = self.data[self.y].where(self.data["region"] == 0)
self.figure.add_trace(
go.Scatter(x=region_x_data, y=region_y_data, mode="markers",
marker=dict(color="black", size=5),
hovertext=self.labels, hoverinfo="x+y+text",
name="Mean Dark Rate"), row=1, col=1)
self.figure.update_yaxes(
title_text="Mean Dark Rate<br>(counts/pix/sec)", row=1, col=1)
for index, location in enumerate(self.location[1:]):
index = index + 1
region_x_data = self.data[self.x].where(
self.data["region"] == index)
region_y_data = self.data[self.y].where(
self.data["region"] == index)
self.figure.add_trace(
go.Scatter(x=region_x_data, y=region_y_data,
showlegend=False, mode="markers",
marker=dict(color="black", size=5),
hovertext=self.labels, hoverinfo="x+y+text",
name="Mean Dark Rate"), row=index + 1, col=1)
self.figure.update_yaxes(
title_text="Mean Dark Rate<br>(counts/pix/sec)",
row=index + 1, col=1)
else:
# single plot
self.figure.add_trace(
go.Scatter(x=self.data[self.x], y=self.data[self.y],
mode="markers", marker=dict(color="black", size=5),
hovertext=self.labels, hoverinfo="x+y+text",
name="Mean Dark Rate"), row=1, col=1)
self.figure.update_yaxes(
title_text="Mean Dark Rate<br>(counts/pix/sec)", row=1, col=1)
## this is solar stuff only until the next ##
datemin = self.data[self.x].min()
datemax = self.data[self.x].max()
# sunpy_data = sunpy_retriever(date_min, date_max)
solar_data = get_solar_data(NOAA_URL, datemin, datemax)
solar_time = solar_data.index
solar_flux = solar_data["f10.7"]
solar_flux_smooth = solar_data["box_convolved_f10.7"]
self.figure.add_trace(
go.Scatter(x=solar_time, y=solar_flux, mode="lines",
line=dict(dash="longdash", color="#0F2080"),
name="10.7 cm"), row=rows, col=1)
self.figure.add_trace(
go.Scatter(x=solar_time, y=solar_flux_smooth, mode="lines",
line=dict(color="#85C0F9"), name="10.7 cm Smoothed"),
row=rows, col=1)
self.figure.update_yaxes(title_text="Solar Radio Flux", row=rows,
col=1)
##
self.figure.update_xaxes(showgrid=True, showline=True, mirror=True)
self.figure.update_yaxes(showgrid=True, showline=True, mirror=True)
def plot_histogram(self, nbins=100):
"""Make the interactive histogram which displays the distribution of
the data and the ETC dark rates. Write out the file to the
correct outpath."""
if self.data is None:
self.data = self.get_data()
dist995, dark_column, lines = self.calculate_histogram(nbins)
full_names = [f"Mean: {lines[0]:.2e}", f"Median: {lines[0]:.2e}",
f"2 sigma: {lines[2]:.2e}", f"3 sigma: {lines[3]:.2e}",
f"95%: {lines[4]:.2e}", f"99%: {lines[5]:.2e}"]
# histogram
fig = go.Figure(
data=[go.Histogram(x=dark_column, nbinsx=nbins, showlegend=False)])
# value lines--have to do a shape and trace for both of them until
# plotly adds vertical line plotting features (because shapes can't
# be in the legend, only traces)
# also f strings and latex together is super annoying. have to
# triple bracket the latex part
fig.add_trace(
go.Scatter(x=[lines[0], lines[0]], y=[0, 1], mode="lines",
line=dict(color="#DC267F"), name=full_names[0]))
fig.add_shape(
dict(type="line", xref="x", yref="paper", x0=lines[0], y0=0,
x1=lines[0], y1=1, line=dict(color="#DC267F")))
fig.add_trace(
go.Scatter(x=[lines[1], lines[1]], y=[0, 1], mode="lines",
line=dict(color="#DC267F", dash="dash"),
name=full_names[1]))
fig.add_shape(
dict(type="line", xref="x", yref="paper", x0=lines[1], y0=0,
x1=lines[1], y1=1, line=dict(color="#DC267F", dash="dash")))
fig.add_trace(
go.Scatter(x=[lines[2], lines[2]], y=[0, 1], mode="lines",
line=dict(color="#FE6100"), name=full_names[2]))
fig.add_shape(
dict(type="line", xref="x", yref="paper", x0=lines[2], y0=0,
x1=lines[2], y1=1, line=dict(color="#FE6100")))
fig.add_trace(
go.Scatter(x=[lines[3], lines[3]], y=[0, 1], mode="lines",
line=dict(color="#FE6100", dash="dash"),
name=full_names[3]))
fig.add_shape(
dict(type="line", xref="x", yref="paper", x0=lines[3], y0=0,
x1=lines[3], y1=1, line=dict(color="#FE6100", dash="dash")))
fig.add_trace(
go.Scatter(x=[lines[4], lines[4]], y=[0, 1], mode="lines",
line=dict(color="#FFB000"), name=full_names[4]))
fig.add_shape(
dict(type="line", xref="x", yref="paper", x0=lines[4], y0=0,
x1=lines[4], y1=1, line=dict(color="#FFB000")))
fig.add_trace(
go.Scatter(x=[lines[5], lines[5]], y=[0, 1], mode="lines",
line=dict(color="#FFB000", dash="dash"),
name=full_names[5]))
fig.add_shape(
dict(type="line", xref="x", yref="paper", x0=lines[5], y0=0,
x1=lines[5], y1=1, line=dict(color="#FFB000", dash="dash")))
datemin = self.data[self.x].min()
datemax = self.data[self.x].max()
fig.update_xaxes(range=[0, dist995], title_text="Counts/Pix/Sec",
showline=True)
fig.update_yaxes(rangemode="tozero", title_text="Frequency",
showline=True)
fig.update_layout(
title_text=self.name + f" Histogram: {datemin:%Y-%m-%d} - "
f"{datemax:%Y-%m-%d}")
fig.update_layout(xaxis=dict(showexponent='all', exponentformat='e'))
fig.update_layout(yaxis_showgrid=True)
# fix this naming convention later
if not self.output:
output = f'{os.path.join(os.getcwd(), f"{self._filename}_hist.html")}'
else:
# you would think you could use self.output but that gets
# updated somewhere
# in the superclass to include the full plot name so we can't
# use that.
# kind of a bug
output = os.path.join(COS_MONITORING,
f"{self._filename}_hist.html")
fig.write_html(output)
def calculate_histogram(self, nbins=100):
"""Calculate the histogram distribution for the important plot and
ETC values."""
if self.data is None:
self.data = self.get_data()
# filter out the flag == 0 / grab the flag == 1 from self.data[self.y]
if self.filter_saa:
dark_column = self.data[self.y].loc[self.data["no_saa"] == 1]
if "FUV" in self.segment:
dark_column = self.data[self.y].loc[
(self.data["no_saa"] == 1) & (
self.data["region"] == self.inner_region)]
else:
dark_column = self.data[self.y]
counts, bins = np.histogram(dark_column, bins=nbins)
cuml_dist = np.cumsum(counts)
count_99 = abs(cuml_dist / float(cuml_dist.max()) - .99).argmin()
count_95 = abs(cuml_dist / float(cuml_dist.max()) - .95).argmin()
# only used for plotting
count995 = abs(cuml_dist / float(cuml_dist.max()) - .995).argmin()
mean = dark_column.mean()
med = np.median(dark_column)
std = dark_column.std()
onesig = med + std
twosig = med + (2 * std)
threesig = med + (3 * std)
dist95 = bins[count_95]
dist99 = bins[count_99]
dist995 = bins[count995]
values = [mean, med, onesig, twosig, threesig, dist95, dist99]
return dist995, dark_column, values
def track(self, nbins=100):
_, _, track_list = self.calculate_histogram(nbins)
return track_list
def plot_orbital_variation(self):
"""Make the orbital variation plot and write out the file to the
correct outpath."""
if self.data is None:
self.data = self.get_data()
colormin = self.data["darks"].min()
colormax = self.data["darks"].max()
fig = go.Figure(data=[
go.Scatter(x=self.data["longitude"], y=self.data["latitude"],
mode="markers",
marker=dict(color=self.data["darks"], size=2,
colorscale='Viridis', opacity=0.5,
colorbar=dict(thickness=20,
exponentformat="e",
title=dict(text="Dark Rate")),
cmin=colormin, cmax=colormax))])
datemin = self.data[self.x].min()
datemax = self.data[self.x].max()
fig.update_layout(
title_text=self.name + f" Orbital Variation: {datemin:%Y-%m-%d} - "
f"{datemax:%Y-%m-%d}")
fig.update_xaxes(title_text="Longitude", showline=True)
fig.update_yaxes(title_text="Latitude", showline=True)
if not self.output:
output = os.path.join(os.getcwd(),
f"{self._filename}_orbital.html")
else:
# you would think you could use self.output but that gets
# updated somewhere
# in the superclass to include the full plot name so we can't
# use that.
# kind of a bug
output = os.path.join(COS_MONITORING,
f"{self._filename}_orbital.html")
fig.write_html(output)
def store_results(self):
# TODO: Define results to store
pass
# ----------------------------------------------------------------------------#
class FUVADarkMonitor(DarkMonitor):
"""FUVA Dark Monitor for all edges and inner region."""
name = 'FUVA Dark Monitor'
segment = 'FUVA'
multi = True
location = [(1060, 15250, 296, 375), (1060, 1260, 296, 734),
(1060, 15250, 660, 734), (15119, 15250, 296, 734),
(1260, 15119, 375, 660)]
sub_names = ["FUVA Dark Monitor - Bottom", "FUVA Dark Monitor - Left",
"FUVA Dark Monitor - Top", "FUVA Dark Monitor - Right",
"FUVA Dark Monitor - Inner"]
class FUVBDarkMonitor(DarkMonitor):
"""FUVB Dark Monitor for all edges and inner region."""
name = 'FUVB Dark Monitor'
segment = 'FUVB'
multi = True
location = [(809, 15182, 360, 405), (809, 1000, 360, 785),
(809, 15182, 740, 785), (14990, 15182, 360, 785),
(1000, 14990, 405, 740)]
sub_names = ["FUVB Dark Monitor - Bottom", "FUVB Dark Monitor - Left",
"FUVB Dark Monitor - Top", "FUVB Dark Monitor - Right",
"FUVB Dark Monitor - Inner"]
class FUVABottomDarkMonitor(DarkMonitor):
"""FUVA Dark Monitor for bottom edge."""
segment = 'FUVA'
location = (1060, 15250, 296, 375)
name = 'FUVA Dark Monitor - Bottom'
class FUVALeftDarkMonitor(DarkMonitor):
"""FUVA Dark Monitor for left edge."""
name = 'FUVA Dark Monitor - Left'
segment = 'FUVA'
location = (1060, 1260, 296, 734)
class FUVATopDarkMonitor(DarkMonitor):
"""FUVA Dark Monitor for top edge."""
name = 'FUVA Dark Monitor - Top'
segment = 'FUVA'
location = (1060, 15250, 660, 734)
class FUVARightDarkMonitor(DarkMonitor):
"""FUVA Dark Monitor for right edge."""
name = 'FUVA Dark Monitor - Right'
segment = 'FUVA'
location = (15119, 15250, 296, 734)
class FUVAInnerDarkMonitor(DarkMonitor):
"""FUVA Dark Monitor for inner region."""
name = 'FUVA Dark Monitor - Inner'
segment = 'FUVA'
location = (1260, 15119, 375, 660)
class FUVBBottomDarkMonitor(DarkMonitor):
"""FUVB Dark Monitor for bottom edge."""
name = 'FUVB Dark Monitor - Bottom'
segment = 'FUVB'
location = (809, 15182, 360, 405)
class FUVBLeftDarkMonitor(DarkMonitor):
"""FUVB Dark Monitor for left edge."""
name = 'FUVB Dark Monitor - Left'
segment = 'FUVB'
location = (809, 1000, 360, 785)
class FUVBTopDarkMonitor(DarkMonitor):
"""FUVB Dark Monitor for top edge."""
name = 'FUVB Dark Monitor - Top'
segment = 'FUVB'
location = (809, 15182, 740, 785)
class FUVBRightDarkMonitor(DarkMonitor):
"""FUVB Dark Monitor for right edge."""
name = 'FUVB Dark Monitor - Right'
segment = 'FUVB'
location = (14990, 15182, 360, 785)
class FUVBInnerDarkMonitor(DarkMonitor):
"""FUVB Dark Monitor for inner region."""
name = 'FUVB Dark Monitor - Inner'
segment = 'FUVB'
location = (1000, 14990, 405, 740)
class NUVDarkMonitor(DarkMonitor):
"""NUV Dark Monitor for full detector."""
name = "NUV Dark Monitor"
segment = "N/A"
location = (0, 1024, 0, 1024)
| bsd-3-clause |
kdebrab/pandas | pandas/tests/extension/decimal/array.py | 2 | 3360 | import decimal
import numbers
import random
import sys
import numpy as np
import pandas as pd
from pandas.core.arrays import (ExtensionArray,
ExtensionScalarOpsMixin)
from pandas.core.dtypes.base import ExtensionDtype
class DecimalDtype(ExtensionDtype):
type = decimal.Decimal
name = 'decimal'
na_value = decimal.Decimal('NaN')
@classmethod
def construct_array_type(cls):
"""Return the array type associated with this dtype
Returns
-------
type
"""
return DecimalArray
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError("Cannot construct a '{}' from "
"'{}'".format(cls, string))
class DecimalArray(ExtensionArray, ExtensionScalarOpsMixin):
dtype = DecimalDtype()
def __init__(self, values, copy=False):
for val in values:
if not isinstance(val, self.dtype.type):
raise TypeError("All values must be of type " +
str(self.dtype.type))
values = np.asarray(values, dtype=object)
self._data = values
# Some aliases for common attribute names to ensure pandas supports
# these
self._items = self.data = self._data
# those aliases are currently not working due to assumptions
# in internal code (GH-20735)
# self._values = self.values = self.data
@classmethod
def _from_sequence(cls, scalars, copy=False):
return cls(scalars)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
return self._data[item]
else:
return type(self)(self._data[item])
def take(self, indexer, allow_fill=False, fill_value=None):
from pandas.api.extensions import take
data = self._data
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indexer, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result)
def copy(self, deep=False):
if deep:
return type(self)(self._data.copy())
return type(self)(self)
def __setitem__(self, key, value):
if pd.api.types.is_list_like(value):
value = [decimal.Decimal(v) for v in value]
else:
value = decimal.Decimal(value)
self._data[key] = value
def __len__(self):
return len(self._data)
def __repr__(self):
return 'DecimalArray({!r})'.format(self._data)
@property
def nbytes(self):
n = len(self)
if n:
return n * sys.getsizeof(self[0])
return 0
def isna(self):
return np.array([x.is_nan() for x in self._data], dtype=bool)
@property
def _na_value(self):
return decimal.Decimal('NaN')
@classmethod
def _concat_same_type(cls, to_concat):
return cls(np.concatenate([x._data for x in to_concat]))
DecimalArray._add_arithmetic_ops()
DecimalArray._add_comparison_ops()
def make_data():
return [decimal.Decimal(random.random()) for _ in range(100)]
| bsd-3-clause |
Axelrod-Python/axelrod-moran | src/validate.py | 1 | 4812 | """
A script to draw the validation plots.
"""
import pandas as pd
import axelrod as axl
import matplotlib.pyplot as plt
import csv
import generate_cache
import theoretic
import functools
import multiprocessing
import itertools
def simulated_fixation(strategy_pair, N, i=1, repetitions=10,
cachefile=None):
"""Run an approximate Moran process and obtain the fixation probabilities"""
if cachefile is None:
cachefile = "../data/outcomes.csv"
cache = generate_cache.read_csv(cachefile)
for k, v in cache.items():
cache[k] = axl.Pdf(v)
players = []
for _ in range(i):
players.append(strategy_pair[0])
for _ in range(N - i):
players.append(strategy_pair[1])
mp = axl.ApproximateMoranProcess(players, cached_outcomes=cache)
win_count = 0
for seed in range(repetitions):
axl.seed(seed)
mp.reset()
mp.play()
if mp.winning_strategy_name == str(players[0]):
win_count += 1
return win_count / repetitions
def theoretic_vs_simulated(repetitions, utilities, filename,
N, player1, player2):
"""
Return the theoretic values and the simulated values
"""
players = (player1, player2)
starting_pop = [1, N // 2, N - 1] if N != 2 else [1]
for i in starting_pop:
player_names = [str(p) for p in players]
t = theoretic.fixation(player_names, N, i, utilities=utilities)
s = simulated_fixation(players, N, i, repetitions=repetitions)
with open(filename, "a") as f:
writer = csv.writer(f)
writer.writerow([repetitions, N, i, *player_names, t, s])
if __name__ == "__main__":
outcomes_file = "../data/outcomes.csv"
output_file = "../data/fixation_validation.csv"
with open(output_file, "w") as f:
f.write("Repetitions,N,i,Player 1,Player 2,Theoretic,Simulated\n")
player_pairs = [(axl.Defector(), axl.Defector()),
(axl.Defector(), axl.Alternator()),
(axl.Defector(), axl.Cooperator()),
(axl.Defector(), axl.TitForTat()),
(axl.Defector(), axl.WinStayLoseShift()),
(axl.Random(), axl.Random()),
(axl.Random(), axl.ZDExtort2()),
(axl.Random(), axl.GTFT()),
(axl.Random(), axl.ALLCorALLD()),
(axl.Random(), axl.PSOGambler2_2_2()),
(axl.Cooperator(), axl.Random()),
(axl.Cooperator(), axl.ZDExtort2()),
(axl.Cooperator(), axl.GTFT()),
(axl.Cooperator(), axl.ALLCorALLD()),
(axl.Cooperator(), axl.PSOGambler2_2_2()),
(axl.Alternator(), axl.Random()),
(axl.Alternator(), axl.ZDExtort2()),
(axl.Alternator(), axl.GTFT()),
(axl.Alternator(), axl.ALLCorALLD()),
(axl.Alternator(), axl.PSOGambler2_2_2()),
(axl.ALLCorALLD(), axl.Cooperator()),
(axl.ALLCorALLD(), axl.Defector()),
(axl.ALLCorALLD(), axl.TitForTat()),
(axl.Alternator(), axl.Cooperator()),
(axl.Alternator(), axl.Defector()),
(axl.Alternator(), axl.TitForTat()),
(axl.Alternator(), axl.WinStayLoseShift()),
(axl.Defector(), axl.WinStayLoseShift()),
(axl.Calculator(), axl.ALLCorALLD()),
(axl.Calculator(), axl.ArrogantQLearner()),
(axl.Calculator(), axl.Random()),
(axl.Cooperator(), axl.TitForTat()),
(axl.Defector(), axl.Cooperator()),
(axl.Defector(), axl.TitForTat()),
(axl.Random(), axl.Cooperator()),
(axl.Random(), axl.Defector()),
(axl.Random(), axl.TitForTat()),
(axl.WinStayLoseShift(), axl.TitForTat())]
max_N = 20
repetitions = 1000
df = pd.read_csv(outcomes_file, header=None,
names=["Player 1", "Player 2",
"Score 1", "Score 2", "Iteration"])
utilities = {pair: (f["Score 1"].mean(), f["Score 2"].mean())
for pair, f in df.groupby(["Player 1", "Player 2"])}
processes = multiprocessing.cpu_count()
func = functools.partial(theoretic_vs_simulated, repetitions,
utilities, output_file)
p = multiprocessing.Pool(processes)
args = ((N, *players)
for N, players in itertools.product(range(2, max_N + 1, 2),
player_pairs))
p.starmap(func, args)
| mit |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/backends/backend_ps.py | 4 | 62877 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import StringIO
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
import io
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import mkstemp
from matplotlib import verbose, __version__, rcParams, checkdep_ghostscript
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict, file_requires_unicode
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D
from matplotlib.backends.backend_mixed import MixedModeRenderer
import numpy as np
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
class PsBackendHelper(object):
def __init__(self):
self._cached = {}
@property
def gs_exe(self):
"""
excutable name of ghostscript.
"""
try:
return self._cached["gs_exe"]
except KeyError:
pass
gs_exe, gs_version = checkdep_ghostscript()
if gs_exe is None:
gs_exe = 'gs'
self._cached["gs_exe"] = gs_exe
return gs_exe
@property
def gs_version(self):
"""
version of ghostscript.
"""
try:
return self._cached["gs_version"]
except KeyError:
pass
from matplotlib.compat.subprocess import Popen, PIPE
s = Popen(self.gs_exe + " --version",
shell=True, stdout=PIPE)
pipe, stderr = s.communicate()
if six.PY3:
ver = pipe.decode('ascii')
else:
ver = pipe
try:
gs_version = tuple(map(int, ver.strip().split(".")))
except ValueError:
# if something went wrong parsing return null version number
gs_version = (0, 0)
self._cached["gs_version"] = gs_version
return gs_version
@property
def supports_ps2write(self):
"""
True if the installed ghostscript supports ps2write device.
"""
return self.gs_version[0] >= 9
ps_backend_helper = PsBackendHelper()
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = list(six.iterkeys(papersize))
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s = s.replace(b"\\", b"\\\\")
s = s.replace(b"(", b"\\(")
s = s.replace(b")", b"\\)")
s = s.replace(b"'", b"\\251")
s = s.replace(b"`", b"\\301")
s = re.sub(br"[^ -~\n]", lambda x: br"\%03o" % ord(x.group()), s)
return s.decode('ascii')
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return np.alltrue(np.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self._hatches = {}
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
self._afm_font_dir = os.path.join(
rcParams['datapath'], 'fonts', 'afm')
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
linewidth = float(linewidth)
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname, fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def create_hatch(self, hatch):
sidelen = 72
if hatch in self._hatches:
return self._hatches[hatch]
name = 'H%d' % len(self._hatches)
self._pswriter.write("""\
<< /PatternType 1
/PaintType 2
/TilingType 2
/BBox[0 0 %(sidelen)d %(sidelen)d]
/XStep %(sidelen)d
/YStep %(sidelen)d
/PaintProc {
pop
0 setlinewidth
""" % locals())
self._pswriter.write(
self._convert_path(Path.hatch(hatch), Affine2D().scale(72.0),
simplify=False))
self._pswriter.write("""\
stroke
} bind
>>
matrix
makepattern
/%(name)s exch def
""" % locals())
self._hatches[hatch] = name
return name
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm', directory=self._afm_font_dir)
if fname is None:
fname = findfont(
"Helvetica", fontext='afm', directory=self._afm_font_dir)
font = self.afmfontd.get(fname)
if font is None:
with io.open(fname, 'rb') as fh:
font = AFM(fh)
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(fname)
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[::-1,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = np.fromstring(rgbat[2], np.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba = rgba[::-1]
rgba_f = rgba.astype(np.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(np.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def option_scale_image(self):
"""
ps backend support arbitrary scaling of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def _get_image_h_w_bits_command(self, im):
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
return h, w, bits, imagecmd
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
dx, dy is the width and height of the image. If a transform
(which must be an affine transform) is given, x, y, dx, dy are
interpreted as the coordinate of the transform.
"""
h, w, bits, imagecmd = self._get_image_h_w_bits_command(im)
hexlines = b'\n'.join(self._hex_lines(bits)).decode('ascii')
if dx is None:
xscale = w / self.image_magnification
else:
xscale = dx
if dy is None:
yscale = h/self.image_magnification
else:
yscale = dy
if transform is None:
matrix = "1 0 0 1 0 0"
else:
matrix = " ".join(map(str, transform.to_values()))
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
bbox = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
[%(matrix)s] concat
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
def _convert_path(self, path, transform, clip=False, simplify=None):
ps = []
last_points = None
if clip:
clip = (0.0, 0.0, self.width * 72.0,
self.height * 72.0)
else:
clip = None
return _path.convert_to_string(
path, transform, clip, simplify, None,
6, [b'm', b'l', b'', b'c', b'cl'], True).decode('ascii')
def _get_clip_path(self, clippath, clippath_transform):
key = (clippath, id(clippath_transform))
pid = self._clip_paths.get(key)
if pid is None:
pid = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % pid]
ps_cmd.append(self._convert_path(clippath, clippath_transform,
simplify=False))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[key] = pid
return pid
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
ps = self._convert_path(
path, transform, clip=clip, simplify=simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace[:3]
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
lw = gc.get_linewidth()
stroke = lw != 0.0
if stroke:
ps_cmd.append('%.1f setlinewidth' % lw)
jint = gc.get_joinstyle()
ps_cmd.append('%d setlinejoin' % jint)
cint = gc.get_capstyle()
ps_cmd.append('%d setlinecap' % cint)
ps_cmd.append(self._convert_path(marker_path, marker_trans,
simplify=False))
if rgbFace:
if stroke:
ps_cmd.append('gsave')
ps_cmd.extend([ps_color, 'fill'])
if stroke:
ps_cmd.append('grestore')
if stroke:
ps_cmd.append('stroke')
ps_cmd.extend(['grestore', '} bind def'])
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.width*72, self.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is
# (len_path + 2) * uses_per_path
# cost of definition+use is
# (len_path + 3) + 3 * uses_per_path
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + 3 * uses_per_path + 3 < (len_path + 2) * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform, simplify=False))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc0, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'][0], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
corr = 0#w/2*(fontsize-10)/10
if rcParams['text.latex.preview']:
# use baseline alignment!
pos = _nums_to_str(x-corr, y)
self.psfrag.append(r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
else:
# stick to the bottom alignment, but this may give incorrect baseline some times.
pos = _nums_to_str(x-corr, y-bl)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1,0,0,6)].decode('macroman')
except KeyError:
ps_name = sfnt[(3,1,0x0409,6)].decode(
'utf-16be')
ps_name = ps_name.encode('ascii', 'replace').decode('ascii')
self.set_font(ps_name, prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = 0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def new_gc(self):
return GraphicsContextPS()
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_points = trans.transform(flat_points)
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 12)
points_max = np.max(flat_points, axis=0) + (1 << 12)
factor = np.ceil(float(2 ** 32 - 1) / (points_max - points_min))
xmin, ymin = points_min
xmax, ymax = points_max
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
stream = quote_ps_string(streamarr.tostring())
self._pswriter.write("""
gsave
<< /ShadingType 4
/ColorSpace [/DeviceRGB]
/BitsPerCoordinate 32
/BitsPerComponent 8
/BitsPerFlag 8
/AntiAlias true
/Decode [ %(xmin)f %(xmax)f %(ymin)f %(ymax)f 0 1 0 1 0 1 ]
/DataSource (%(stream)s)
>>
shfill
grestore
""" % locals())
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = gc.shouldstroke()
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
if stroke:
write("grestore\n")
hatch = gc.get_hatch()
if hatch:
hatch_name = self.create_hatch(hatch)
write("gsave\n")
write("[/Pattern [/DeviceRGB]] setcolorspace %f %f %f " % gc.get_rgb()[:3])
write("%s setcolor fill grestore\n" % hatch_name)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def shouldstroke(self):
return (self.get_linewidth() > 0.0 and
(len(self.get_rgb()) <= 3 or self.get_rgb()[3] != 0.0))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPS(figure)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
_renderer_class = RendererPS
fixed_dpi = 72
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.pop("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join(six.iterkeys(papersize))))
orientation = kwargs.pop("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.pop("dpi", 72)
facecolor = kwargs.pop("facecolor", "w")
edgecolor = kwargs.pop("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None,
**kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = io.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height, self._pswriter,
imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
def print_figure_impl():
# write the PostScript headers
if isEPSF: print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
else: print("%!PS-Adobe-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%Orientation: " + orientation, file=fh)
if not isEPSF: print("%%DocumentPaperSizes: "+papertype, file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
if not isEPSF: print("%%Pages: 1", file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
if not rcParams['ps.useafm']:
Ndict += len(ps_renderer.used_characters)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
if not rcParams['ps.useafm']:
for font_filename, chars in six.itervalues(ps_renderer.used_characters):
if len(chars):
font = FT2Font(font_filename)
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
fonttype = rcParams['ps.fonttype']
# Can not use more than 255 characters from a
# single font for Type 3
if len(glyph_ids) > 255:
fonttype = 42
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fh.flush()
convert_ttf_to_ps(
font_filename.encode(sys.getfilesystemencoding()),
fh, fonttype, glyph_ids)
print("end", file=fh)
print("%%EndProlog", file=fh)
if not isEPSF: print("%%Page: 1 1", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
if rotation: print("%d rotate"%rotation, file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# Disable any sort of miter limit
print("%s setmiterlimit" % 100000, file=fh)
# write the figure
content = self._pswriter.getvalue()
if not isinstance(content, six.text_type):
content = content.decode('ascii')
print(content, file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
if not isEPSF: print("%%EOF", file=fh)
fh.flush()
if rcParams['ps.usedistiller']:
# We are going to use an external program to process the output.
# Write to a temporary file.
fd, tmpfile = mkstemp()
with io.open(fd, 'w', encoding='latin-1') as fh:
print_figure_impl()
else:
# Write directly to outfile.
if passed_in_file_object:
requires_unicode = file_requires_unicode(outfile)
if (not requires_unicode and
(six.PY3 or not isinstance(outfile, StringIO))):
fh = io.TextIOWrapper(outfile, encoding="latin-1")
# Prevent the io.TextIOWrapper from closing the
# underlying file
def do_nothing():
pass
fh.close = do_nothing
else:
fh = outfile
print_figure_impl()
else:
with io.open(outfile, 'w', encoding='latin-1') as fh:
print_figure_impl()
if rcParams['ps.usedistiller']:
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
if file_requires_unicode(outfile):
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read().decode('latin-1'))
else:
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with io.open(outfile, 'w') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
else:
raise ValueError("outfile must be a path or a file-like object")
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = io.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height,
self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write to a temp file, we'll move it to outfile when done
fd, tmpfile = mkstemp()
with io.open(fd, 'w', encoding='latin-1') as fh:
# write the Encapsulated PostScript headers
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
print("end", file=fh)
print("%%EndProlog", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# Disable any sort of miter limit
print("%d setmiterlimit" % 100000, file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
fh.flush()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if isEPSF. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if isEPSF:
paperWidth, paperHeight = self.figure.get_size_inches()
if isLandscape:
paperWidth, paperHeight = paperHeight, paperWidth
else:
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
if is_writable_file_like(outfile):
if file_requires_unicode(outfile):
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read().decode('latin-1'))
else:
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with io.open(outfile, 'wb') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\\usepackage{psfrag}
\\usepackage[dvips]{graphicx}
\\usepackage{color}
\\pagestyle{empty}
\\begin{document}
\\begin{figure}
\\centering
\\leavevmode
%s
\\includegraphics*[angle=%s]{%s}
\\end{figure}
\\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
with io.open(latexfile, 'wb') as latexh:
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s.encode('ascii'))
except UnicodeEncodeError:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
#Replace \\ for / so latex does not think there is a function call
latexfile = latexfile.replace("\\", "/")
# Replace ~ so Latex does not think it is line break
latexfile = latexfile.replace("~", "\\string~")
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in,5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
with io.open(tmpfile) as fh:
if "Landscape" in fh.read(1000):
psfrag_rotated = True
else:
psfrag_rotated = False
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
return psfrag_rotated
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
gs_exe = ps_backend_helper.gs_exe
if ps_backend_helper.supports_ps2write: # gs version >= 9
device_name = "ps2write"
else:
device_name = "pswrite"
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=%s %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, device_name,
paper_option, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in an ps file
# where the original bbox is no more correct. Do not adjust
# bbox for now.
if ps_backend_helper.supports_ps2write:
# fo gs version >= 9 w/ ps2write device
pstoeps(tmpfile, bbox, rotated=rotated)
else:
pstoeps(tmpfile)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
command = 'ps2pdf -dAutoFilterColorImages=false \
-dAutoFilterGrayImages=false -sGrayImageFilter=FlateEncode \
-sColorImageFilter=FlateEncode %s "%s" "%s" > "%s"'% \
(paper_option, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox_header(lbrt, rotated=False):
"""
return a postscript header stringfor the given bbox lbrt=(l, b, r, t).
Optionally, return rotate command.
"""
l, b, r, t = lbrt
if rotated:
rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
else:
rotate = ""
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info]), rotate
# get_bbox is deprecated. I don't see any reason to use ghostscript to
# find the bounding box, as the required bounding box is alread known.
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
gs_exe = ps_backend_helper.gs_exe
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
# if rotated==True, the output eps file need to be rotated
if bbox:
bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
else:
bbox_info, rotate = None, None
epsfile = tmpfile + '.eps'
with io.open(epsfile, 'wb') as epsh:
write = epsh.write
with io.open(tmpfile, 'rb') as tmph:
line = tmph.readline()
# Modify the header:
while line:
if line.startswith(b'%!PS'):
write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
if bbox:
write(bbox_info.encode('ascii') + b'\n')
elif line.startswith(b'%%EndComments'):
write(line)
write(b'%%BeginProlog\n')
write(b'save\n')
write(b'countdictstack\n')
write(b'mark\n')
write(b'newpath\n')
write(b'/showpage {} def\n')
write(b'/setpagedevice {pop} def\n')
write(b'%%EndProlog\n')
write(b'%%Page 1 1\n')
if rotate:
write(rotate.encode('ascii') + b'\n')
break
elif bbox and (line.startswith(b'%%Bound') \
or line.startswith(b'%%HiResBound') \
or line.startswith(b'%%DocumentMedia') \
or line.startswith(b'%%Pages')):
pass
else:
write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith(b'%%EOF'):
write(b'cleartomark\n')
write(b'countdictstack\n')
write(b'exch sub { end } repeat\n')
write(b'restore\n')
write(b'showpage\n')
write(b'%%EOF\n')
elif line.startswith(b'%%PageBoundingBox'):
pass
else:
write(line)
line = tmph.readline()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
FigureCanvas = FigureCanvasPS
FigureManager = FigureManagerPS
| mit |
jblackburne/scikit-learn | sklearn/ensemble/weight_boosting.py | 12 | 40740 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
TuKo/brainiak | brainiak/factoranalysis/tfa.py | 7 | 30560 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Topographical Factor Analysis (TFA)
This implementation is based on the work in [Manning2014]_ and
[AndersonM2016]_.
.. [Manning2014] "Topographic factor analysis: a bayesian model for inferring
brain networks from neural data", J. R. Manning, R. Ranganath, K. A. Norman,
and D. M. Blei.PLoS One, vol. 9, no. 5, 2014.
.. [AndersonM2016] "Scaling Up Multi-Subject Neuroimaging Factor Analysis"
Michael J. Anderson, Mihai Capota, Javier S. Turek, Xia Zhu,
Theodore L. Willke, Yida Wang, Po-Hsuan Chen, Jeremy R. Manning,
Peter J. Ramadge, and Kenneth A. Norman
2016.
"""
# Authors: Xia Zhu (Intel Labs), Jeremy Manning (Dartmouth College) 2015~2016
from sklearn.base import BaseEstimator
from sklearn.metrics import mean_squared_error
from sklearn.cluster import KMeans
from scipy.optimize import least_squares
from scipy.optimize import linear_sum_assignment
from scipy.spatial import distance
from ..utils.utils import from_tri_2_sym, from_sym_2_tri
from . import tfa_extension # type: ignore
import numpy as np
import math
import gc
import logging
__all__ = [
"TFA",
]
logger = logging.getLogger(__name__)
class TFA(BaseEstimator):
"""Topographical Factor Analysis (TFA)
Given data from one subject, factorize it as a spatial factor F and
a weight matrix W.
Parameters
----------
max_iter : int, default: 10
Number of iterations to run the algorithm.
threshold : float, default: 1.0
Tolerance for terminating the parameter estimation
K : int, default: 50
Number of factors to compute
nlss_method : {'trf', 'dogbox', 'lm'}, default: 'trf'
Non-Linear Least Square (NLSS) algorithm used by scipy.least_suqares to
perform minimization. More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
nlss_loss: str or callable, default: 'linear'
Loss function used by scipy.least_squares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
jac : {'2-point', '3-point', 'cs', callable}, default: '2-point'
Method of computing the Jacobian matrix.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
x_scale : float or array_like or 'jac', default: 1.0
Characteristic scale of each variable for scipy.least_suqares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
tr_solver: {None, 'exact', 'lsmr'}, default: None
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
weight_method : {'rr','ols'}, default: 'rr'
Method for estimating weight matrix W given X and F.
'rr' means ridge regression, 'ols' means ordinary least square.
upper_ratio : float, default: 1.8
The upper bound of the ratio between factor's width
and maximum sigma of scanner coordinates.
lower_ratio : float, default: 0.02
The lower bound of the ratio between factor's width
and maximum sigma of scanner coordinates.
max_num_voxel : int, default: 5000
The maximum number of voxels to subsample.
max_num_tr : int, default: 500
The maximum number of TRs to subsample.
seed : int, default: 100
Seed for subsampling voxels and trs.
verbose : boolean, default: False
Verbose mode flag.
Attributes
----------
local_posterior_ : 1D array
Local posterior on subject's centers and widths
F_ : 2D array, in shape [n_voxel, K]
Latent factors of the subject
W_ : 2D array, in shape [K, n_tr]
Weight matrix of the subject
"""
def __init__(
self,
max_iter=10,
threshold=1.0,
K=50,
nlss_method='trf',
nlss_loss='soft_l1',
jac='2-point',
x_scale='jac',
tr_solver=None,
weight_method='rr',
upper_ratio=1.8,
lower_ratio=0.02,
max_num_tr=500,
max_num_voxel=5000,
seed=100,
verbose=False):
self.miter = max_iter
self.threshold = threshold
self.K = K
self.nlss_method = nlss_method
self.nlss_loss = nlss_loss
self.jac = jac
self.x_scale = x_scale
self.tr_solver = tr_solver
self.weight_method = weight_method
self.upper_ratio = upper_ratio
self.lower_ratio = lower_ratio
self.max_num_tr = max_num_tr
self.max_num_voxel = max_num_voxel
self.seed = seed
self.verbose = verbose
def set_K(self, K):
"""set K for the subject
Parameters
----------
K : integer
Number of latent factor.
Returns
-------
TFA
Returns the instance itself.
"""
self.K = K
return self
def set_prior(self, prior):
"""set prior for the subject
Parameters
----------
prior : 1D array, with K*(n_dim+1) elements
Subject prior of centers and widths.
Returns
-------
TFA
Returns the instance itself.
"""
self.local_prior = prior
return self
def set_seed(self, seed):
"""set seed for the subject
Parameters
----------
seed : int
Seed for subsampling voxels and trs
Returns
-------
TFA
Returns the instance itself.
"""
self.seed = seed
return self
def init_prior(self, R):
"""initialize prior for the subject
Returns
-------
TFA
Returns the instance itself.
"""
centers, widths = self.init_centers_widths(R)
# update prior
prior = np.zeros(self.K * (self.n_dim + 1))
self.set_centers(prior, centers)
self.set_widths(prior, widths)
self.set_prior(prior)
return self
def _assign_posterior(self):
"""assign posterior to prior based on Hungarian algorithm
Returns
-------
TFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.local_prior)
posterior_centers = self.get_centers(self.local_posterior_)
posterior_widths = self.get_widths(self.local_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.local_posterior_, posterior_centers[col_ind])
self.set_widths(self.local_posterior_, posterior_widths[col_ind])
return self
def _converged(self):
"""Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
"""
diff = self.local_prior - self.local_posterior_
max_diff = np.max(np.fabs(diff))
if self.verbose:
_, mse = self._mse_converged()
diff_ratio = np.sum(diff ** 2) / np.sum(self.local_posterior_ ** 2)
logger.info(
'tfa prior posterior max diff %f mse %f diff_ratio %f' %
((max_diff, mse, diff_ratio)))
if max_diff > self.threshold:
return False, max_diff
else:
return True, max_diff
def _mse_converged(self):
"""Check convergence based on mean squared error
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
mse = mean_squared_error(self.local_prior, self.local_posterior_,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse
def get_map_offset(self):
"""Compute offset of prior/posterior
Returns
-------
map_offest : 1D array
The offset to different fields in prior/posterior
"""
nfield = 4
self.map_offset = np.zeros(nfield).astype(int)
field_size = self.K * np.array([self.n_dim, 1, self.cov_vec_size, 1])
for i in np.arange(nfield - 1) + 1:
self.map_offset[i] = self.map_offset[i - 1] + field_size[i - 1]
return self.map_offset
def init_centers_widths(self, R):
"""Initialize prior of centers and widths
Returns
-------
centers : 2D array, with shape [K, n_dim]
Prior of factors' centers.
widths : 1D array, with shape [K, 1]
Prior of factors' widths.
"""
kmeans = KMeans(
init='k-means++',
n_clusters=self.K,
n_init=10,
random_state=100)
kmeans.fit(R)
centers = kmeans.cluster_centers_
widths = self._get_max_sigma(R) * np.ones((self.K, 1))
return centers, widths
def get_template(self, R):
"""Compute a template on latent factors
Parameters
----------
R : 2D array, in format [n_voxel, n_dim]
The scanner coordinate matrix of one subject's fMRI data
Returns
-------
template_prior : 1D array
The template prior.
template_centers_cov: 2D array, in shape [n_dim, n_dim]
The template on centers' covariance.
template_widths_var: float
The template on widths' variance
"""
centers, widths = self.init_centers_widths(R)
template_prior =\
np.zeros(self.K * (self.n_dim + 2 + self.cov_vec_size))
# template centers cov and widths var are const
template_centers_cov = np.cov(R.T) * math.pow(self.K, -2 / 3.0)
template_widths_var = self._get_max_sigma(R)
centers_cov_all = np.tile(from_sym_2_tri(template_centers_cov), self.K)
widths_var_all = np.tile(template_widths_var, self.K)
# initial mean of centers' mean
self.set_centers(template_prior, centers)
self.set_widths(template_prior, widths)
self.set_centers_mean_cov(template_prior, centers_cov_all)
self.set_widths_mean_var(template_prior, widths_var_all)
return template_prior, template_centers_cov, template_widths_var
def set_centers(self, estimation, centers):
"""Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior or posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
estimation[0:self.map_offset[1]] = centers.ravel()
def set_widths(self, estimation, widths):
"""Set estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
widths : 2D array, in shape [K, 1]
Estimation on widths
"""
estimation[self.map_offset[1]:self.map_offset[2]] = widths.ravel()
def set_centers_mean_cov(self, estimation, centers_mean_cov):
"""Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
estimation[self.map_offset[2]:self.map_offset[3]] =\
centers_mean_cov.ravel()
def set_widths_mean_var(self, estimation, widths_mean_var):
"""Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
estimation[self.map_offset[3]:] = widths_mean_var.ravel()
def get_centers(self, estimation):
"""Get estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
centers = estimation[0:self.map_offset[1]]\
.reshape(self.K, self.n_dim)
return centers
def get_widths(self, estimation):
"""Get estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
fields : 2D array, in shape [K, 1]
Estimation of widths
"""
widths = estimation[self.map_offset[1]:self.map_offset[2]]\
.reshape(self.K, 1)
return widths
def get_centers_mean_cov(self, estimation):
"""Get estimation on the covariance of centers' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers_mean_cov : 2D array, in shape [K, cov_vec_size]
Estimation of the covariance of centers' mean
"""
centers_mean_cov = estimation[self.map_offset[2]:self.map_offset[3]]\
.reshape(self.K, self.cov_vec_size)
return centers_mean_cov
def get_widths_mean_var(self, estimation):
"""Get estimation on the variance of widths' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
widths_mean_var : 2D array, in shape [K, 1]
Estimation on variance of widths' mean
"""
widths_mean_var = \
estimation[self.map_offset[3]:].reshape(self.K, 1)
return widths_mean_var
def get_factors(self, unique_R, inds, centers, widths):
"""Calculate factors based on centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
scanner coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
centers : 2D array, with shape [K, n_dim]
The centers of factors.
widths : 1D array, with shape [K, 1]
The widths of factors.
Returns
-------
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
"""
F = np.zeros((len(inds[0]), self.K))
tfa_extension.factor(
F,
centers,
widths,
unique_R[0],
unique_R[1],
unique_R[2],
inds[0],
inds[1],
inds[2])
return F
def get_weights(self, data, F):
"""Calculate weight matrix based on fMRI data and factors
Parameters
----------
data : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
Returns
-------
W : 2D array, with shape [K, n_tr]
The weight matrix from fMRI data.
"""
beta = np.var(data)
trans_F = F.T.copy()
W = np.zeros((self.K, data.shape[1]))
if self.weight_method == 'rr':
W = np.linalg.solve(trans_F.dot(F) + beta * np.identity(self.K),
trans_F.dot(data))
else:
W = np.linalg.solve(trans_F.dot(F), trans_F.dot(data))
return W
def _get_max_sigma(self, R):
"""Calculate maximum sigma of scanner RAS coordinates
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
max_sigma : float
The maximum sigma of scanner coordinates.
"""
max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2)
return max_sigma
def get_bounds(self, R):
"""Calculate lower and upper bounds for centers and widths
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
bounds : 2-tuple of array_like, default: None
The lower and upper bounds on factor's centers and widths.
"""
max_sigma = self._get_max_sigma(R)
final_lower = np.zeros(self.K * (self.n_dim + 1))
final_lower[0:self.K * self.n_dim] =\
np.tile(np.nanmin(R, axis=0), self.K)
final_lower[self.K * self.n_dim:] =\
np.repeat(self.lower_ratio * max_sigma, self.K)
final_upper = np.zeros(self.K * (self.n_dim + 1))
final_upper[0:self.K * self.n_dim] =\
np.tile(np.nanmax(R, axis=0), self.K)
final_upper[self.K * self.n_dim:] =\
np.repeat(self.upper_ratio * max_sigma, self.K)
bounds = (final_lower, final_upper)
return bounds
def _residual_multivariate(
self,
estimate,
unique_R,
inds,
X,
W,
template_centers,
template_centers_mean_cov,
template_widths,
template_widths_mean_var_reci,
data_sigma):
"""Residual function for estimating centers and widths
Parameters
----------
estimate : 1D array
Initial estimation on centers
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
template_centers: 2D array, with shape [K, n_dim]
The template prior on centers
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths: 1D array
The template prior on widths
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
data_sigma: float
The variance of X.
Returns
-------
final_err : 1D array
The residual function for estimating centers.
"""
centers = self.get_centers(estimate)
widths = self.get_widths(estimate)
recon = X.size
other_err = 0 if template_centers is None else (2 * self.K)
final_err = np.zeros(recon + other_err)
F = self.get_factors(unique_R, inds, centers, widths)
sigma = np.zeros((1,))
sigma[0] = data_sigma
tfa_extension.recon(final_err[0:recon], X, F, W, sigma)
if other_err > 0:
# center error
for k in np.arange(self.K):
diff = (centers[k] - template_centers[k])
cov = from_tri_2_sym(template_centers_mean_cov[k], self.n_dim)
final_err[recon + k] = math.sqrt(
self.sample_scaling *
diff.dot(np.linalg.solve(cov, diff.T)))
# width error
base = recon + self.K
dist = template_widths_mean_var_reci *\
(widths - template_widths) ** 2
final_err[base:] = np.sqrt(self.sample_scaling * dist).ravel()
return final_err
def _estimate_centers_widths(
self,
unique_R,
inds,
X,
W,
init_centers,
init_widths,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Estimate centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
init_centers : 2D array, with shape [K, n_dim]
The initial values of centers.
init_widths : 1D array
The initial values of widths.
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
final_estimate.x: 1D array
The newly estimated centers and widths.
final_estimate.cost: float
The cost value.
"""
# least_squares only accept x in 1D format
init_estimate = np.hstack(
(init_centers.ravel(), init_widths.ravel())) # .copy()
data_sigma = 1.0 / math.sqrt(2.0) * np.std(X)
final_estimate = least_squares(
self._residual_multivariate,
init_estimate,
args=(
unique_R,
inds,
X,
W,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci,
data_sigma),
method=self.nlss_method,
loss=self.nlss_loss,
bounds=self.bounds,
verbose=0,
x_scale=self.x_scale,
tr_solver=self.tr_solver)
return final_estimate.x, final_estimate.cost
def _fit_tfa(self, data, R, template_prior=None):
"""TFA main algorithm
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data from one subject.
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_prior : 1D array,
The template prior on centers and widths.
Returns
-------
TFA
Returns the instance itself.
"""
if template_prior is None:
template_centers = None
template_widths = None
template_centers_mean_cov = None
template_widths_mean_var_reci = None
else:
template_centers = self.get_centers(template_prior)
template_widths = self.get_widths(template_prior)
template_centers_mean_cov =\
self.get_centers_mean_cov(template_prior)
template_widths_mean_var_reci = 1.0 /\
self.get_widths_mean_var(template_prior)
inner_converged = False
np.random.seed(self.seed)
n = 0
while n < self.miter and not inner_converged:
self._fit_tfa_inner(
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci)
self._assign_posterior()
inner_converged, _ = self._converged()
if not inner_converged:
self.local_prior = self.local_posterior_
else:
logger.info("TFA converged at %d iteration." % (n))
n += 1
gc.collect()
return self
def get_unique_R(self, R):
"""Get unique vlaues from coordinate matrix
Parameters
----------
R : 2D array
The coordinate matrix of a subject's fMRI data
Return
------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
"""
unique_R = []
inds = []
for d in np.arange(self.n_dim):
tmp_unique, tmp_inds = np.unique(R[:, d], return_inverse=True)
unique_R.append(tmp_unique)
inds.append(tmp_inds)
return unique_R, inds
def _fit_tfa_inner(
self,
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Fit TFA model, the inner loop part
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data of a subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
TFA
Returns the instance itself.
"""
nfeature = data.shape[0]
nsample = data.shape[1]
feature_indices =\
np.random.choice(nfeature, self.max_num_voxel, replace=False)
sample_features = np.zeros(nfeature).astype(bool)
sample_features[feature_indices] = True
samples_indices =\
np.random.choice(nsample, self.max_num_tr, replace=False)
curr_data = np.zeros((self.max_num_voxel, self.max_num_tr))\
.astype(float)
curr_data = data[feature_indices]
curr_data = curr_data[:, samples_indices].copy()
curr_R = R[feature_indices].copy()
centers = self.get_centers(self.local_prior)
widths = self.get_widths(self.local_prior)
unique_R, inds = self.get_unique_R(curr_R)
F = self.get_factors(unique_R, inds, centers, widths)
W = self.get_weights(curr_data, F)
self.local_posterior_, self.total_cost = self._estimate_centers_widths(
unique_R, inds, curr_data, W, centers, widths,
template_centers, template_centers_mean_cov,
template_widths, template_widths_mean_var_reci)
return self
def fit(self, X, R, template_prior=None):
""" Topographical Factor Analysis (TFA)[Manning2014]
Parameters
----------
X : 2D array, in shape [n_voxel, n_sample]
The fMRI data of one subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_prior : None or 1D array
The template prior as an extra constraint
None when fitting TFA alone
"""
if self.verbose:
logger.info('Start to fit TFA ')
if not isinstance(X, np.ndarray):
raise TypeError("Input data should be an array")
if X.ndim != 2:
raise TypeError("Input data should be 2D array")
if not isinstance(R, np.ndarray):
raise TypeError("Input coordinate matrix should be an array")
if R.ndim != 2:
raise TypeError("Input coordinate matrix should be 2D array")
if X.shape[0] != R.shape[0]:
raise TypeError(
"The number of voxels should be the same in X and R!")
if self.weight_method != 'rr' and self.weight_method != 'ols':
raise ValueError(
"only 'rr' and 'ols' are accepted as weight_method!")
# main algorithm
self.n_dim = R.shape[1]
self.cov_vec_size = np.sum(np.arange(self.n_dim) + 1)
self.map_offset = self.get_map_offset()
self.bounds = self.get_bounds(R)
n_voxel = X.shape[0]
n_tr = X.shape[1]
self.sample_scaling = 0.5 * float(
self.max_num_voxel * self.max_num_tr) / float(n_voxel * n_tr)
if template_prior is None:
self.init_prior(R)
else:
self.local_prior = template_prior[0: self.map_offset[2]]
self._fit_tfa(X, R, template_prior)
if template_prior is None:
centers = self.get_centers(self.local_posterior_)
widths = self.get_widths(self.local_posterior_)
unique_R, inds = self.get_unique_R(R)
self.F_ = self.get_factors(unique_R, inds, centers, widths)
self.W_ = self.get_weights(X, self.F_)
return self
| apache-2.0 |
mlds-lab/egk | fastfood.py | 1 | 6837 | from __future__ import division
import numpy as np
from numpy.linalg import norm
from scipy.linalg import eigh
from sklearn.decomposition import randomized_svd
import fht
def low_rank_cov_root(covs, rank, implementation='randomized_svd'):
"""
return X: (n_data, n_dim, rank) matrix so that X[i].dot(X[i].T) ~ covs[i]
"""
n_data, n_dim = covs.shape[:2]
if implementation == 'randomized_svd':
X = np.empty((n_data, n_dim, rank))
for i in xrange(n_data):
U, s, V = randomized_svd(covs[i], rank)
X[i] = U * np.sqrt(s)
elif implementation == 'scipy':
X = np.empty((n_data, n_dim, rank))
for i in xrange(n_data):
eigval, eigvec = eigh(covs[i],
eigvals=(n_dim - rank, n_dim - 1))
X[i] = eigvec * np.sqrt(eigval)
elif implementation == 'numpy':
eigval, eigvec = np.linalg.eigh(covs)
idx = np.argsort(eigval, axis=-1)[:, -rank:]
val_idx = np.ogrid[0:n_data, 0:n_dim]
vec_idx = np.ogrid[0:n_data, 0:n_dim, 0:n_dim]
X = (eigvec[vec_idx[0], vec_idx[1], idx[:, np.newaxis]] *
np.sqrt(eigval[val_idx[0], idx][:, np.newaxis]))
return X
class FastfoodEGK(object):
def __init__(self, gamma, n_sample=None, normalize=False, rank=0,
random_seed=1):
"""
Apply low-rank approximation with rank > 0
"""
self.gamma = gamma
self.n_sample = n_sample
self.normalize = normalize
self.rank = rank
self.random_seed = random_seed
def fit(self, means, covs):
"""
n: number of data cases
m: number of features
d: dimension of Gaussians
means: (n, d)
covs: (n, d, d)
"""
rnd = np.random.RandomState(self.random_seed)
n_dim = means.shape[1]
n_dim_pow2 = 2**int(np.ceil(np.log2(n_dim)))
if self.n_sample is None:
self.n_sample = n_dim_pow2
n_sample = self.n_sample
n_block = int(np.ceil(n_sample / n_dim_pow2))
# Generate fastfood components
# B: diagonal binary scaling matrix
# Pi: permutation matrix
# G: diagonal Gaussian matrix, G_{ii} ~ N(0, 1)
# S: diagonal scaling matrix
B = rnd.choice([-1, 1], size=(n_block, n_dim_pow2))
G = rnd.normal(0, 1, size=(n_block, n_dim_pow2))
Pi = np.empty((n_block, n_dim_pow2), dtype=int)
S = np.sqrt(rnd.chisquare(n_dim_pow2, size=(n_block, n_dim_pow2)))
for i in xrange(n_block):
S[i] /= np.linalg.norm(G[i], 2)
Pi[i] = rnd.permutation(n_dim_pow2)
self.B = B
self.G = G
self.Pi = Pi
self.S = S
self.random_offset = rnd.uniform(0, 2 * np.pi, size=self.n_sample)
self.n_dim = n_dim
self.n_dim_pow2 = n_dim_pow2
self.n_block = n_block
return self
def fastfood_2d(self, X):
n_data, n_dim = X.shape
B = self.B
G = self.G
Pi = self.Pi
S = self.S
n_block = self.n_block
# Fastfood
V = np.empty((n_data, n_dim * n_block))
idx_lo = 0
for i in xrange(n_block):
BX = B[i] * X
HBX = fht.fht2(BX, 1)
PiHBX = HBX[:, Pi[i]]
GPiHBX = PiHBX * G[i]
HGPiHBX = fht.fht2(GPiHBX, 1)
SHGPiHBX = HGPiHBX * S[i]
idx_hi = idx_lo + n_dim
V[:, idx_lo:idx_hi] = SHGPiHBX
idx_lo = idx_hi
V *= np.sqrt(n_dim) / self.gamma
if self.n_sample != V.shape[1]:
V = V[:, :self.n_sample]
features = np.sqrt(2 / self.n_sample) * np.cos(V + self.random_offset)
return features
def exp_quadratic(self, X):
n_data, n_dim = X.shape[:2]
B = self.B
G = self.G
Pi = self.Pi
S = self.S
n_block = self.n_block
# Fastfood
V = np.empty((n_data, n_dim * n_block))
idx_lo = 0
for i in xrange(n_block):
BX = B[i] * X
HBX = fht.fht3(BX, 2)
PiHBX = HBX[:, :, Pi[i]]
GPiHBX = PiHBX * G[i]
HGPiHBX = fht.fht3(GPiHBX, 2)
SHGPiHBX = HGPiHBX * S[i]
BX = B[i, :, np.newaxis] * SHGPiHBX
HBX = fht.fht3(BX, 1)
PiHBX = HBX[:, Pi[i]]
GPiHBX = PiHBX * G[i, :, np.newaxis]
HGPiHBX = fht.fht3(GPiHBX, 1)
diag = HGPiHBX.diagonal(axis1=1, axis2=2)
idx_hi = idx_lo + n_dim
V[:, idx_lo:idx_hi] = diag * S[i]
idx_lo = idx_hi
if self.n_sample != V.shape[1]:
V = V[:, :self.n_sample]
return np.exp(-0.5 * V * n_dim / self.gamma**2)
def exp_low_rank(self, X):
n_data, n_dim = X.shape[:2]
B = self.B[..., np.newaxis]
G = self.G[..., np.newaxis]
Pi = self.Pi
S = self.S[..., np.newaxis]
n_block = self.n_block
# Fastfood
V = np.empty((n_data, n_dim * n_block))
idx_lo = 0
for i in xrange(n_block):
BX = B[i] * X
HBX = fht.fht3(BX, 1)
PiHBX = HBX[:, Pi[i]]
GPiHBX = PiHBX * G[i]
HGPiHBX = fht.fht3(GPiHBX, 1)
SHGPiHBX = HGPiHBX * S[i]
idx_hi = idx_lo + n_dim
V[:, idx_lo:idx_hi] = np.power(SHGPiHBX, 2).sum(axis=2)
idx_lo = idx_hi
if self.n_sample != V.shape[1]:
V = V[:, :self.n_sample]
return np.exp(-0.5 * V * n_dim / self.gamma**2)
def transform(self, means, covs):
n_data, n_dim = means.shape
n_dim_pow2 = self.n_dim_pow2
if self.rank > 0:
covs = low_rank_cov_root(covs, self.rank)
root_cov = True
else:
root_cov = False
if n_dim == n_dim_pow2:
means_padded = means
covs_padded = covs
else:
means_padded = np.zeros((n_data, n_dim_pow2))
means_padded[:, :n_dim] = means
if root_cov:
covs_padded = np.zeros((n_data, n_dim_pow2, self.rank))
covs_padded[:, :n_dim] = covs
else:
covs_padded = np.zeros((n_data, n_dim_pow2, n_dim_pow2))
covs_padded[:, :n_dim, :n_dim] = covs
cos = self.fastfood_2d(means_padded)
if root_cov:
exp_quad = self.exp_low_rank(covs_padded)
else:
exp_quad = self.exp_quadratic(covs_padded)
features = exp_quad * cos
if self.normalize:
return features / norm(features, 2, axis=1)[:, np.newaxis]
return features
def fit_transform(self, means, covs):
return self.fit(means, covs).transform(means, covs)
| mit |
qrqiuren/sms-tools | lectures/08-Sound-transformations/plots-code/hps-transformation.py | 24 | 3018 | # function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
inputFile='../../../sounds/sax-phrase-short.wav'
window='blackman'
M=601
N=1024
t=-100
minSineDur=0.1
nH=100
minf0=350
maxf0=700
f0et=5
harmDevSlope=0.01
stocf=0.1
Ns = 512
H = 128
(fs, x) = UF.wavread(inputFile)
w = get_window(window, M)
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
timeScaling = np.array([0, 0, 2.138, 2.138-1.5, 3.146, 3.146])
yhfreq, yhmag, ystocEnv = HPST.hpsTimeScale(hfreq, hmag, mYst, timeScaling)
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
UF.wavwrite(y,fs, 'hps-transformation.wav')
plt.figure(figsize=(12, 9))
maxplotfreq = 14900.0
# plot the input sound
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.title('x (sax-phrase-short.wav')
# plot spectrogram stochastic compoment
plt.subplot(4,1,2)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('harmonics + stochastic residual')
# plot spectrogram of transformed stochastic compoment
plt.subplot(4,1,3)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
harms = yhfreq*np.less(yhfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('timescaled harmonics + stochastic residual')
# plot the output sound
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.title('output sound: y')
plt.tight_layout()
plt.savefig('hps-transformation.png')
plt.show()
| agpl-3.0 |
mutaphore/ML-CTR | lr.py | 1 | 1152 | #!/usr/local/bin/python
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from clean import parse_data
if __name__ == '__main__':
f_train = 'train1M'
f_test = 'test'
print "Parsing data..."
YX = parse_data(f_train, combine=True)
YX_train, YX_test = train_test_split(YX, test_size=0.1,
random_state=42)
# random_state=np.random.random_integers(100000))
X_train = np.array(YX_train)[:,1:]
Y_train = np.array(YX_train)[:,0]
X_test = np.array(YX_test)[:,1:]
Y_test = np.array(YX_test)[:,0]
print "Training Logistic Regression classifier..."
clf = LogisticRegression(penalty='l2')
clf.fit(X_train, Y_train)
print "Cross validating..."
score = clf.score(X_test, Y_test)
print "CV score on test data %r " % score
# print "Parsing test data..."
# X_test = parse_data(f_test)[0]
# print "Predicting..."
# prob = clf.predict_proba(X_test)
# print "Writing probs..."
# f_out = open("lr_prob", 'w')
# for row in prob:
# f_out.write(str(row[0]) + '\n')
# f_out.close()
| gpl-2.0 |
lbdreyer/cartopy | docs/make_projection.py | 1 | 4088 | # (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
import itertools
import os
import numpy as np
import cartopy.crs as ccrs
def find_projections():
for obj_name, o in vars(ccrs).copy().items():
# o = getattr(ccrs, obj_name)
if (isinstance(o, type) and issubclass(o, ccrs.Projection) and
not obj_name.startswith('_') and obj_name not in ['Projection']):
# yield the projections
yield o
def projection_rst(projection_cls):
name = projection_cls.__name__
print(name)
SPECIAL_CASES = {ccrs.PlateCarree: ['PlateCarree()', 'PlateCarree(central_longitude=180)'],
ccrs.RotatedPole: ['RotatedPole(pole_longitude=177.5, pole_latitude=37.5)'],
}
COASTLINE_RESOLUTION = {ccrs.OSNI: '10m',
ccrs.OSGB: '50m',
ccrs.EuroPP: '50m'}
PRJ_SORT_ORDER = {'PlateCarree': 1, 'Mercator': 2, 'Mollweide': 2, 'Robinson': 2,
'TransverseMercator': 2, 'LambertCylindrical': 2,
'LambertConformal': 2, 'Stereographic': 2, 'Miller': 2,
'Orthographic': 2, 'InterruptedGoodeHomolosine': 3,
'RotatedPole': 3, 'OSGB': 4}
groups = [('cylindrical', [ccrs.PlateCarree, ccrs.Mercator, ccrs.TransverseMercator,
ccrs.OSGB, ccrs.LambertCylindrical, ccrs.Miller, ccrs.RotatedPole]),
('pseudo-cylindrical', [ccrs.Mollweide, ccrs.Robinson]),
# ('conic', [ccrs.aed]),
('azimuthal', [ccrs.Stereographic, ccrs.NorthPolarStereo,
ccrs.SouthPolarStereo, ccrs.Gnomonic, ccrs.Orthographic
]),
('misc', [ccrs.InterruptedGoodeHomolosine]),
]
all_projections_in_groups = list(itertools.chain.from_iterable([g[1] for g in groups]))
if __name__ == '__main__':
fname = os.path.join(os.path.dirname(__file__), 'source',
'crs', 'projections.rst')
table = open(fname, 'w')
table.write('.. _cartopy_projections:\n\n')
table.write('Cartopy projection list\n')
table.write('=======================\n\n\n')
prj_class_sorter = lambda cls: (PRJ_SORT_ORDER.get(cls.__name__, []), cls.__name__)
for prj in sorted(find_projections(), key=prj_class_sorter):
name = prj.__name__
# print prj in SPECIAL_CASES, prj in all_projections_in_groups, prj
# put the class documentation on the left, and a sidebar on the right.
aspect = (np.diff(prj().x_limits) / np.diff(prj().y_limits))[0]
width = 3 * aspect
if width == int(width):
width = int(width)
table.write(name + '\n')
table.write('-' * len(name) + '\n\n')
table.write('.. autoclass:: cartopy.crs.%s\n' % name)
# table.write('Ipsum lorum....')
# table.write("""\n\n
#
#.. sidebar:: Example
#""")
for instance_creation_code in SPECIAL_CASES.get(prj, ['%s()' % name]):
code = """
.. plot::
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
plt.figure(figsize=({width}, 3))
ax = plt.axes(projection=ccrs.{proj_constructor})
ax.coastlines(resolution={coastline_resolution!r})
ax.gridlines()
\n""".format(width=width, proj_constructor=instance_creation_code,
coastline_resolution=COASTLINE_RESOLUTION.get(prj, '110m'))
table.write(code)
| lgpl-3.0 |
j0r1/simpactcyan | python/pysimpactcyan.py | 1 | 35174 | """ Python bindings for running programs from the SimpactCyan package.
This package supplies a class `PySimpactCyan`, which can be used to run
simulations using the SimpactCyan programs. When used as a standalone
python program, it can be used to list a full config file based on just$
a few fields which override the defaults.
"""
from __future__ import print_function
import sys
import pprint
import copy
import subprocess
import json
import os
import tempfile
import shutil
import random
import time
import platform
# This helper function executes 'executable', for example
#
# [ 'simpact-cyan-opt', '--showconfigoptions' ]
#
# to retrieve all config options in JSON format. It then expands the
# options which are of type 'distTypes' into the available 1D distribution
# types listed in the JSON config
def _getExpandedSettingsOptions(executable):
with open(os.devnull, "w") as nullFile:
proc = subprocess.Popen(executable, stdout=subprocess.PIPE, stderr=nullFile)
jsonData, unusedErr = proc.communicate()
proc.wait()
jsonData = jsonData.decode("utf-8") # Needed for python3
configOptions = json.loads(jsonData)
configNames = configOptions["configNames"]
distTypesNames = [ ]
for n in configOptions:
if n != "configNames":
distTypesNames.append(n)
possibleDistNames = { }
for n in distTypesNames:
possibleDistNames[n] = [ t for t in configOptions[n] ]
# Change the config entries which have a 'distTypes' etc setting
newConfig = copy.deepcopy(configNames)
for n in configNames:
params = configNames[n]['params']
for i in range(len(params)):
p = params[i]
pName = p[0]
pValue = p[1]
if pValue in distTypesNames:
distTypes = configOptions[pValue]
defaultDistName = "fixed"
defaultDistParams = None
if len(p) == 3: # The third parameter are the defaults
defaultDistOptions = p[2]
defaultDistName = defaultDistOptions[0]
defaultDistParams = defaultDistOptions[1]
# Adjust the entry in 'newConfig' to reflect the default distribution name
newConfig[n]['params'][i] = [ pName + ".type", defaultDistName, possibleDistNames[pValue] ]
# Add entries to newConfig for all possible distributions
for distName in distTypes:
distParams = distTypes[distName]['params']
newConfName = n + "_" + str(i) + "_" + distName
newConfig[newConfName] = { 'depends': [ n, pName + ".type", distName ] }
if defaultDistName == distName and defaultDistParams:
# Default parameters are present for this distribution,
# merge them
defaultParamMap = { }
for dp,dv in defaultDistParams:
defaultParamMap[dp] = dv
modParams = copy.deepcopy(distParams)
for paramPair in modParams:
dp = paramPair[0]
if dp in defaultParamMap:
paramPair[1] = defaultParamMap[dp]
newConfig[newConfName]['params'] = [ [ pName + "." + distName + "." + p[0] ] + p[1:] for p in modParams ]
else:
# No specific defaults, just use the info from the "distTypes" data structure
newConfig[newConfName]['params'] = [ [ pName + "." + distName + "." + p[0] ] + p[1:] for p in distParams ]
newConfig[newConfName]['info'] = distTypes[distName]['info']
#pprint.pprint(newConfig)
#sys.exit(-1)
return newConfig
# userConfig is a map of key/value pairs, for each configuration option as
# will be present in the config file
#
# cfg is actually an entry of configNames, and configNames is present as
# well in case this entry has a dependency, which then needs to be checked
# first
#
# requiredKeys will be a map of keys than need to be set in the final config
# file, either mapped to None just to indicate that it needs to be present,
# or mapped to a list of possible values
def _processConfigPart(cfg, userConfig, configNames, requiredKeys):
params = cfg['params']
deps = cfg['depends']
# Check if we need to process dependencies first
if deps is not None:
depObjName = deps[0]
depObj = configNames[depObjName]
depKey = deps[1]
depVal = deps[2]
#print "processConfigPart", depObjName
#pprint.pprint(depObj)
if not _processConfigPart(depObj, userConfig, configNames, requiredKeys):
# Parent dependency not fulfilled, so this one isn't either
return False
#print "done: processConfigPart", depObjName
if not depKey in userConfig:
pprint.pprint(userConfig)
raise Exception("Key %s was not set" % depKey)
if userConfig[depKey] != depVal:
return False # Dependency not fulfilled
for k in params:
if len(k) == 3:
requiredKeys[k[0]] = k[2]
else:
requiredKeys[k[0]] = None
for p in params:
key = p[0]
val = p[1]
if len(p) == 3:
requiredKeys[key] = p[2]
else:
requiredKeys[key] = None
# See if we should check defaults
if not key in userConfig:
#if val is None:
# raise Exception("Key %s is not set" % key)
userConfig[key] = val
return True
def createConfigLines(executable, inputConfig, checkNone = True, ignoreKeys = [ ]):
def getValidNames(strList):
r = "['" + strList[0] + "'"
for i in range(1,len(strList)):
r += ", '" + strList[i] + "'"
r += "]"
return r
userConfig = copy.deepcopy(inputConfig)
configNames = _getExpandedSettingsOptions(executable)
requiredKeys = { }
for n in configNames:
cfg = configNames[n]
_processConfigPart(cfg, userConfig, configNames, requiredKeys)
for k in userConfig:
if not k in requiredKeys:
raise Exception("Encountered unknown key %s" % k)
val = userConfig[k]
possibleValues = requiredKeys[k]
if possibleValues is not None:
if not val in possibleValues:
raise Exception("Value '%s' for key %s is not allowed, should be one of %s" % (val, k, possibleValues))
if checkNone:
if val is None:
raise Exception("Key %s is not set" % k)
# Display the final config file
lines = [ ]
unusedlines = [ ]
# In principle this should contain the same info as userConfig at the end,
# but we'll introduce some ordering here so we can feed it back to R in a better
# way
resultingConfig = [ ]
names = [ key for key in configNames ]
names.sort()
for key in names:
deps = configNames[key]["depends"]
params = configNames[key]["params"]
info = configNames[key]["info"]
if info:
info = "\n".join(info)
usedparams = [ ]
unusedparams = [ ]
for p in params:
k = p[0]
if k in requiredKeys:
v = userConfig[k]
ns = 60-len(k)
k += " "*ns
if len(p) == 3: # Limited number of possibilities
usedparams.append("# Valid values are: " + getValidNames(p[2]))
if v is None:
usedparams.append("%s = " % k)
elif type(v) == float:
usedparams.append("%s = %.15g" % (k, v))
elif type(v) == int:
usedparams.append("%s = %d" % (k, v))
else:
usedparams.append("%s = %s" % (k, str(v)))
idx = len(resultingConfig)+1
if v is None:
resultingConfig.append((idx, p[0], ""))
else:
resultingConfig.append((idx, p[0], v))
else:
unusedparams.append("# " + p[0])
if usedparams:
if deps:
lines += [ "# The following depends on %s = %s" % (deps[1], deps[2]) ]
if info:
lines += [ "# " + l for l in info.splitlines() ]
lines += usedparams
lines += [ "" ]
if unusedparams:
if deps:
unusedlines += [ "# The following depends on %s = %s" % (deps[1], deps[2]) ]
unusedlines += unusedparams
unusedlines += [ "#" ]
introlines = [ "# The configuration file format is quite straightforward, it is just a set of",
"# 'key = value' lines. Lines that start with '#' are treated as comments and",
"# are ignored.",
"#",
"# If the key starts with a dollar ('$') sign, the rest of the key is ",
"# considered to be the name of a variable, which may be used later on in the",
"# file. To use such a variable in a specified value, you need to surround",
"# the variable name with '${' and '}'. For example, one could write:",
"#",
"# $PREFIX = MyCustomPrefix",
"# logsystem.outfile.logevents = ${PREFIX}-output.log",
"#",
"# and the file used will have the name 'MyCustomPrefix-output.log'.",
"#",
"# In the same way, environment variables can be used, and, in fact, ",
"# environment variables will take precedence over these internal variables.",
"# This way, it is easy to change the content of these variables on the command",
"# line",
"#",
"# Note that no calculations can be performed in this file anymore, so instead",
"# of writing 1.0/2.0, you'd need to write 0.5 for example.",
"" ]
return (userConfig, introlines + unusedlines + [ "" ] + lines, resultingConfig)
def _replaceVariables(value, variables):
newValue = ""
done = False
prevIdx = 0
while not done:
idx = value.find("${", prevIdx)
if idx < 0:
done = True
else:
nextIdx = value.find("}", idx)
if nextIdx < 0:
done = True
else:
key = value[idx+2:nextIdx]
if key in variables:
newValue += variables[key]
prevIdx = nextIdx+1
else:
newValue += value[prevIdx:nextIdx+1]
prevIdx = nextIdx + 1
newValue += value[prevIdx:]
return newValue.strip()
def _getSimpactPathBasedOnModule():
def endSlash(dirName):
if not dirName.endswith(os.sep): # Make sure it ends with "/" or "\"
dirName += os.sep
return dirName
possiblePaths = [ ]
moduleName = __name__
paths = sys.path
for p in paths:
if os.path.isdir(p):
full = os.path.join(p, moduleName + ".py")
if os.path.exists(full): # Ok, our simpact module exists in this directory
full = os.path.abspath(full)
dirName = os.path.dirname(full)
baseName = os.path.basename(dirName) # should be 'python'
if baseName.lower() == "python":
dirName = endSlash(os.path.dirname(dirName))
possiblePaths.append(dirName)
if baseName.lower() == "site-packages":
dirName = os.path.dirname(os.path.dirname(dirName))
possiblePaths.append(endSlash(os.path.join(dirName, "Library", "share", "simpact-cyan")))
dirName = os.path.dirname(dirName)
possiblePaths.append(endSlash(os.path.join(dirName, "share", "simpact-cyan")))
#print("Possible paths:")
#print(possiblePaths)
return possiblePaths
class PySimpactCyan(object):
""" This class is used to run SimpactCyan based simulations."""
def __init__(self):
self._execPrefix = "simpact-cyan"
self._dataDirectory = self._findSimpactDataDirectory()
self._execDir = self._findSimpactDirectory()
def setSimpactDirectory(self, dirName):
""" Sets the directory in which the simpact binaries were installed to `dirName`"""
self._execDir = os.path.abspath(dirName)
def setSimpactDataDirectory(self, dirName):
self._dataDirectory = dirName
def _findSimpactDataDirectory(self):
paths = [ ]
if "SIMPACT_DATA_DIR" in os.environ:
paths += [ os.environ["SIMPACT_DATA_DIR"] ]
if platform.system() == "Windows":
paths += [ "C:\\Program Files (x86)\\SimpactCyan\\data\\", "C:\\Program Files\\SimpactCyan\\data\\" ]
else:
paths += [ "/usr/share/simpact-cyan/", "/usr/local/share/simpact-cyan/" ]
paths += [ "/Applications/SimpactCyan.app/Contents/data/" ]
for p in _getSimpactPathBasedOnModule():
paths.append(p)
p2 = os.path.join(p, "data")
if not p2.endswith(os.sep):
p2 += os.sep
paths.append(p2)
for p in paths:
f = os.path.join(p, "sa_2003.csv")
if os.path.exists(f):
print("Setting data directory to", p)
return p
print("Warning: can't seem to find the simpact data directory")
return None
def _findSimpactDirectory(self):
with open(os.devnull, "w") as DEVNULL:
paths = [ ]
if platform.system() == "Windows":
paths += [ "C:\\Program Files (x86)\\SimpactCyan", "C:\\Program Files\\SimpactCyan" ]
else:
paths += [ "/Applications/SimpactCyan.app/Contents/bin" ]
for p in _getSimpactPathBasedOnModule():
paths.append(p)
p2 = os.path.join(p, "bin")
if not p2.endswith(os.sep):
p2 += os.sep
paths.append(p2)
exe = "simpact-cyan-release" # This should always exist
# First see if we can run the executable without a full path
try:
subprocess.call([ exe ], stderr=DEVNULL, stdout=DEVNULL)
return None
except:
pass
# Then try some predefined paths
for p in paths:
try:
subprocess.call([ os.path.join(p,exe) ], stderr=DEVNULL, stdout=DEVNULL)
print("Simpact executables found in %s" % p)
return p
except:
pass
print("Warning: can't seem to find a way to run the simpact executables")
return None
def setSimulationPrefix(self, prefix):
if not prefix:
raise Exception("No valid simulation prefix specified")
with open(os.devnull, "w") as DEVNULL:
try:
p = self._getExecPath(testPrefix = prefix)
subprocess.call( [ p ], stderr=DEVNULL, stdout=DEVNULL)
self._execPrefix = prefix
except Exception as e:
raise Exception("Unable to use specified prefix '%s' (can't run '%s')" % (prefix, p))
def _getExecPath(self, opt = True, release = True, testPrefix = None):
fullPath = testPrefix if testPrefix else self._execPrefix
fullPath += "-"
fullPath += "release" if release else "debug"
if self._execDir is not None:
fullPath = os.path.join(self._execDir, fullPath)
return fullPath
def runDirect(self, configFile, parallel = False, opt = True, release = True, outputFile = None, seed = -1, destDir = None, quiet = False):
fullPath = self._getExecPath(opt, release)
parallelStr = "1" if parallel else "0"
if type(opt) == bool:
algoStr = "opt" if opt else "simple"
else:
algoStr = str(opt)
if destDir is None:
destDir = os.path.abspath(os.path.dirname(configFile))
closeOutput = False
origDir = os.getcwd()
try:
os.chdir(destDir)
if outputFile is not None:
if os.path.exists(outputFile):
raise Exception("Want to write to output file '%s', but this already exists" % outputFile)
f = open(outputFile, "w+t")
closeOutput = True
else:
f = tempfile.TemporaryFile(mode='w+t')
newEnv = copy.deepcopy(os.environ)
if seed >= 0:
newEnv["MNRM_DEBUG_SEED"] = str(seed)
if self._dataDirectory is not None:
newEnv["SIMPACT_DATA_DIR"] = str(self._dataDirectory)
if not quiet:
print("Results will be stored in directory '%s'" % os.getcwd())
print("Running simpact executable '{}' ...".format(fullPath))
proc = subprocess.Popen([fullPath, configFile, parallelStr, algoStr], stdout=f, stderr=f, cwd=os.getcwd(), env=newEnv)
try:
proc.wait() # Wait for the process to finish
except:
try:
proc.kill()
except:
pass
raise
f.flush()
f.seek(0)
lines = f.readlines()
if not quiet:
print("Done.")
print()
# Show contents of output file or temporary file on screen
for l in lines:
sys.stdout.write(l)
sys.stdout.flush()
if proc.returncode != 0:
raise Exception(self._getProgramExitError(lines, proc.returncode))
finally:
os.chdir(origDir)
def _getProgramExitError(self, lines, code):
lines = [ l.strip() for l in lines if l.strip() ]
# Look for 'FATAL ERROR'
for i in range(len(lines)):
l = lines[i]
if l == "FATAL ERROR:" and i+1 < len(lines):
return lines[i+1]
# Look for last line before 'UNEXPECTED TERMINATION OF PROGRAM!'
for i in range(len(lines)):
l = lines[i]
if l == "UNEXPECTED TERMINATION OF PROGRAM!" and i > 0:
return lines[i-1]
return "Program exited with an error code ({})".format(code)
def _createConfigLines(self, inputConfig, checkNone = True, ignoreKeys = []):
executable = [ self._getExecPath(), "--showconfigoptions" ]
return createConfigLines(executable, inputConfig, checkNone, ignoreKeys)
def _checkKnownKeys(self, keyList):
executable = [ self._getExecPath(), "--showconfigoptions" ]
configNames = _getExpandedSettingsOptions(executable)
allKnownKeys = [ ]
for n in configNames:
paramList = configNames[n]["params"]
paramKeys = [ ]
for p in paramList:
paramKeys.append(p[0])
allKnownKeys += paramKeys
for k in keyList:
if not k in allKnownKeys:
raise Exception("Encountered unknown key '%s'" % k)
def getConfiguration(self, config, show = False):
# Make sure config is a dict
if not config:
config = { }
ignoreKeys = [ "population.agedistfile" ]
finalConfig, lines, sortedConfig = self._createConfigLines(config, False, ignoreKeys)
lines.append('')
if show:
sys.stdout.write('\n'.join(lines))
return sortedConfig
def showConfiguration(self, config):
self.getConfiguration(config, True)
def _getID(self, identifierFormat):
def getRandomChar():
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
pos = int(random.random()*len(chars)) % len(chars)
return chars[pos]
t = time.gmtime()
pid = os.getpid()
simType = self._execPrefix
identifier = ""
prevPos = 0
pos = identifierFormat.find("%")
while pos >= 0:
if pos < len(identifierFormat)-1 and identifierFormat[pos+1] in [ '%', 'T', 'y', 'm', 'd', 'H', 'M', 'S', 'p', 'r' ]:
identifier += identifierFormat[prevPos:pos]
prevPos = pos+2
n = identifierFormat[pos+1]
if n == '%':
identifier += "%"
elif n == 'T':
identifier += simType
elif n == 'y':
identifier += "%d" % t.tm_year
elif n == 'm':
identifier += "%02d" % t.tm_mon
elif n == 'd':
identifier += "%02d" % t.tm_mday
elif n == 'H':
identifier += "%02d" % t.tm_hour
elif n == 'M':
identifier += "%02d" % t.tm_min
elif n == 'S':
identifier += "%02d" % t.tm_sec
elif n == 'p':
identifier += "%d" % pid
elif n == 'r':
identifier += getRandomChar()
else:
raise Exception("Internal error: unexpected identifier format '%s'" % n)
pos = identifierFormat.find("%", pos+2)
else:
# No need to adjust prevPos
pos = identifierFormat.find("%", pos+1)
identifier += identifierFormat[prevPos:]
return identifier
def _getFromConfigOrSetDefault(self, config, key, defaultValue):
try:
v = config[key]
except:
v = defaultValue
config[key] = defaultValue
return v
def _writeDataFrame(self, f, data, dataName):
nrows, ncols = data.shape
colNames = data.columns
if len(colNames) == ncols:
f.write(",".join(['"{}"'.format(colNames[c]) for c in range(ncols)]))
f.write("\n")
matrix = data.as_matrix()
self._writeMatrix(f, matrix, dataName, False)
def _writeMatrix(self, f, data, dataName, writeColumnNames = True):
nrows = len(data)
ncols = -1
for r in range(nrows):
if ncols < 0:
ncols = len(data[r])
if len(data[r]) != ncols:
raise Exception("Error processing row %d for data '%s': expecting %d columns, but got %d" % (r+1, dataName, ncols, len(data[r])))
if writeColumnNames and r == 0:
colNames = [ '"Col{}"'.format(c+1) for c in range(ncols) ]
colNamesStr = ",".join(colNames)
f.write(colNamesStr + "\n")
entries = [ "%.15g" % data[r][c] for c in range(ncols) ]
entriesStr = ",".join(entries)
f.write(entriesStr)
f.write("\n")
def _writeDataFile(self, destDir, fileName, dataName, data):
fullPath = os.path.join(destDir, fileName)
if os.path.exists(fullPath):
raise Exception("Error while writing data file for '%s': file '%s' already exists" % (dataName, fileName))
with open(fullPath, "wt") as f:
isDataFrame = False
try:
s = data.columns # Assume that it's a pandas dataframe if this exists
isDataFrame = True
except:
pass
if isDataFrame:
self._writeDataFrame(f, data, dataName)
else: # Not a dataframe
self._writeMatrix(f, data, dataName)
def _toFileName(self, s):
ret = ""
for c in s:
if c.isalnum():
ret += c
else:
ret += "_"
return ret
def run(self, config, destDir, agedist = None, parallel = False, opt = True, release = True, seed = -1,
interventionConfig = None, dryRun = False, identifierFormat = "%T-%y-%m-%d-%H-%M-%S_%p_%r%r%r%r%r%r%r%r-",
dataFiles = { }, quiet = False):
if not destDir:
raise Exception("A destination directory must be specified")
# Make sure config is a dict
if not config:
config = { }
originalConfig = copy.deepcopy(config)
idStr = self._getID(identifierFormat)
if not idStr:
raise Exception("The specified identifier format leads to an empty identifier")
distFile = None
if agedist is not None:
if isinstance(agedist, dict):
distAges = agedist["Age"]
if "Percent.Male" in agedist:
distMalePct = agedist["Percent.Male"]
elif "Percent Male" in agedist:
distMalePct = agedist["Percent Male"]
else:
raise Exception("Error in age distribution: Key for male percentage must be 'Percent.Male' or 'Percent Male'")
if "Percent.Female" in agedist:
distFemalePct = agedist["Percent.Female"]
elif "Percent Female" in agedist:
distFemalePct = agedist["Percent Female"]
else:
raise Exception("Error in age distribution: Key for female percentage must be 'Percent.Female' or 'Percent Female'")
if len(distAges) != len(distMalePct) or len(distAges) != len(distFemalePct):
raise Exception("Not all columns of the 'agedist' variable seem to have the same length")
distFile = "%sagedist.csv" % idStr
config["population.agedistfile"] = distFile
else: # Assume we're referring to a file
config["population.agedistfile"] = str(agedist)
# Intervention event stuff
intTimes = None
intBaseFile = None
ivIDs = []
if interventionConfig:
# Lets make sure we order the intervention times
intTimes = [ ]
if type(interventionConfig) == dict:
for iv in interventionConfig:
t = float(interventionConfig[iv]["time"])
del interventionConfig[iv]["time"]
intTimes.append( (t, interventionConfig[iv]) )
else: # assume it's a list
for iv in interventionConfig:
t = float(iv["time"])
del iv["time"]
intTimes.append( (t, iv) )
intTimes.sort() # Make sure it's sorted on time, interpreted as a real number
if intTimes:
config["intervention.enabled"] = "yes"
isFirstTime = True
ivTimeString = ""
ivIDString = ""
count = 1
for (t,iv) in intTimes:
if not isFirstTime:
ivTimeString += ","
ivIDString += ","
ivTimeString += str(t)
ivIDString += str(count)
ivIDs.append(str(count))
isFirstTime = False
count += 1
config["intervention.times"] = ivTimeString
config["intervention.fileids"] = ivIDString
intBaseFile = "%sinterventionconfig_%%.txt" % idStr
config["intervention.baseconfigname"] = intBaseFile
if os.path.exists(destDir):
# Check that we're actually dealing with a directory
if not os.path.isdir(destDir):
raise Exception("Specified destination directory '%s' exists but does not seem to be a directory" % destDir)
else:
# Create the directory
if not quiet:
print("Specified destination directory '%s' does not exist, creating it" % destDir)
os.makedirs(destDir)
# Replace things that start with "data:" by "${SIMPACT_INDATA_PREFIX}"
dataPrefix = "data:"
for c in config:
v = config[c]
if str(v).startswith(dataPrefix):
v = "${SIMPACT_INDATA_PREFIX}" + self._toFileName(v[len(dataPrefix):]) + ".csv"
config[c] = v
# Here, the actual configuration file lines are created
finalConfig, lines, notNeeded = self._createConfigLines(config, True)
# Check some paths
configFile = os.path.abspath(os.path.join(destDir, "%sconfig.txt" % idStr))
if os.path.exists(configFile):
raise Exception("Want to write to configuration file '%s', but this already exists" % configFile)
outputFile = os.path.abspath(os.path.join(destDir, "%soutput.txt" % idStr))
if os.path.exists(outputFile):
raise Exception("Want to write to output file '%s', but this already exists" % outputFile)
if distFile:
fullDistFile = os.path.abspath(os.path.join(destDir, distFile))
if os.path.exists(fullDistFile):
raise Exception("Want to write to age distribution file '%s', but this already exists" % fullDistFile)
# Write the config file
with open(configFile, "wt") as f:
f.write("# Some variables. Note that if set, environment variables will have\n")
f.write("# Precedence.\n")
f.write("$SIMPACT_OUTPUT_PREFIX = %s\n" % idStr)
if dataFiles:
f.write("$SIMPACT_INDATA_PREFIX = %s\n" % (idStr + "data-"))
if self._dataDirectory:
f.write("$SIMPACT_DATA_DIR = %s\n" % self._dataDirectory)
f.write("\n")
for l in lines:
f.write(l + "\n")
f.close()
if distFile:
# Write the age distribution file
with open(fullDistFile, "wt") as f:
f.write("Age,Percent Male,Percent Female\n")
for i in range(len(distAges)):
f.write("%g,%g,%g\n" % (distAges[i], distMalePct[i], distFemalePct[i]))
f.close()
# write intervention config files
if intTimes:
for tIdx in range(len(intTimes)):
t = intTimes[tIdx][0]
iv = intTimes[tIdx][1]
# With the current approach, the best we can do is to check for keys that
# are never used in a config file
self._checkKnownKeys([ name for name in iv ])
fileName = intBaseFile.replace("%", ivIDs[tIdx])
fileName = os.path.join(destDir, fileName)
with open(fileName, "w") as f:
for k in iv:
f.write("%s = %s\n" % (k,iv[k]))
f.close()
# write data files
#pprint.pprint(dataFiles)
if dataFiles:
for d in dataFiles:
self._writeDataFile(destDir, idStr + "data-" + self._toFileName(d) + ".csv", d, dataFiles[d])
# Set environment variables (if necessary) and start executable
if not dryRun:
if not quiet:
print("Using identifier '%s'" % idStr)
self.runDirect(configFile, parallel, opt, release, outputFile, seed, destDir, quiet)
# Create the return structure
results = { }
replaceVars = { }
# These are things that should be replaced
if self._dataDirectory:
replaceVars["SIMPACT_DATA_DIR"] = self._dataDirectory
replaceVars["SIMPACT_OUTPUT_PREFIX"] = idStr
replaceVars["SIMPACT_INDATA_PREFIX"] = os.path.join(destDir, idStr + "data-")
# These are the output log files in a generic way
outFileSpec = ".outfile."
for n in finalConfig:
if outFileSpec in n:
value = _replaceVariables(finalConfig[n], replaceVars)
if value:
pos = n.find(outFileSpec) + len(outFileSpec)
logName = n[pos:]
fileName = os.path.join(destDir, value)
if os.path.exists(fileName):
results[logName] = fileName
# Also show the 'data:' entries in the returned dictionary
for n in originalConfig:
if str(originalConfig[n]).startswith(dataPrefix):
results[originalConfig[n]] = _replaceVariables(finalConfig[n], replaceVars)
results["configfile"] = os.path.join(destDir, configFile)
results["outputfile"] = os.path.join(destDir, outputFile)
results["id"] = idStr
if distFile:
results["agedistfile"] = os.path.join(destDir, distFile)
else:
results["agedistfile"] = _replaceVariables(finalConfig["population.agedistfile"], replaceVars)
# Get simulation time and number of events from output file
try:
with open(results["outputfile"], "rt") as f:
lines = f.readlines()
simTimePrefix = "# Current simulation time is "
numEvtsPrefix = "# Number of events executed is "
for l in lines:
if l.startswith(simTimePrefix):
results["simulationtime"] = float(l[len(simTimePrefix):])
if l.startswith(numEvtsPrefix):
results["eventsexecuted"] = int(l[len(numEvtsPrefix):])
except Exception as e:
print("WARNING: can't get simulation time or number of events from output file {}: {}".format(results["outputfile"], e))
return results
def main():
try:
executable = [ sys.argv[1], "--showconfigoptions"]
if len(sys.argv[1:]) != 1:
raise Exception("Invalid number of arguments")
except Exception as e:
print("Error: %s" % str(e), file=sys.stderr)
print(file=sys.stderr)
print("Usage: pysimpactcyan.py simpactexecutable", file=sys.stderr)
sys.exit(-1)
# Read the input
userConfig = { }
line = sys.stdin.readline()
while line:
line = line.strip()
if line:
parts = [ p.strip() for p in line.split('=') ]
key, value = parts[0], parts[1]
userConfig[key] = value
line = sys.stdin.readline()
# In principle, the 'resultingConfigNotNeeded' should contain the same things
# as finalConfig, but some ordering was introduced
(finalConfig, lines, resultingConfigNotNeeded) = createConfigLines(executable, userConfig, False)
lines.append('')
sys.stdout.write('\n'.join(lines))
if __name__ == "__main__":
main()
| gpl-3.0 |
mfjb/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
ehogan/iris | docs/iris/example_code/Meteorology/TEC.py | 6 | 1207 | """
Ionosphere space weather
========================
This space weather example plots a filled contour of rotated pole point
data with a shaded relief image underlay. The plot shows aggregated
vertical electron content in the ionosphere.
The plot exhibits an interesting outline effect due to excluding data
values below a certain threshold.
"""
import matplotlib.pyplot as plt
import numpy.ma as ma
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Enable a future option, to ensure that the netcdf load works the same way
# as in future Iris versions.
iris.FUTURE.netcdf_promote = True
# Load the "total electron content" cube.
filename = iris.sample_data_path('space_weather.nc')
cube = iris.load_cube(filename, 'total electron content')
# Explicitly mask negative electron content.
cube.data = ma.masked_less(cube.data, 0)
# Plot the cube using one hundred colour levels.
qplt.contourf(cube, 100)
plt.title('Total Electron Content')
plt.xlabel('longitude / degrees')
plt.ylabel('latitude / degrees')
plt.gca().stock_img()
plt.gca().coastlines()
iplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
DucQuang1/BuildingMachineLearningSystemsWithPython | ch02/figure4_5_no_sklearn.py | 23 | 2249 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
COLOUR_FIGURE = False
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from load import load_dataset
import numpy as np
from knn import fit_model, predict
feature_names = [
'area',
'perimeter',
'compactness',
'length of kernel',
'width of kernel',
'asymmetry coefficien',
'length of kernel groove',
]
def plot_decision(features, labels):
'''Plots decision boundary for KNN
Parameters
----------
features : ndarray
labels : sequence
Returns
-------
fig : Matplotlib Figure
ax : Matplotlib Axes
'''
y0, y1 = features[:, 2].min() * .9, features[:, 2].max() * 1.1
x0, x1 = features[:, 0].min() * .9, features[:, 0].max() * 1.1
X = np.linspace(x0, x1, 100)
Y = np.linspace(y0, y1, 100)
X, Y = np.meshgrid(X, Y)
model = fit_model(1, features[:, (0, 2)], np.array(labels))
C = predict(
np.vstack([X.ravel(), Y.ravel()]).T, model).reshape(X.shape)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .6, .6), (.6, 1., .6), (.6, .6, 1.)])
else:
cmap = ListedColormap([(1., 1., 1.), (.2, .2, .2), (.6, .6, .6)])
fig,ax = plt.subplots()
ax.set_xlim(x0, x1)
ax.set_ylim(y0, y1)
ax.set_xlabel(feature_names[0])
ax.set_ylabel(feature_names[2])
ax.pcolormesh(X, Y, C, cmap=cmap)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .0, .0), (.0, 1., .0), (.0, .0, 1.)])
ax.scatter(features[:, 0], features[:, 2], c=labels, cmap=cmap)
else:
for lab, ma in zip(range(3), "Do^"):
ax.plot(features[labels == lab, 0], features[
labels == lab, 2], ma, c=(1., 1., 1.))
return fig,ax
features, labels = load_dataset('seeds')
names = sorted(set(labels))
labels = np.array([names.index(ell) for ell in labels])
fig,ax = plot_decision(features, labels)
fig.savefig('figure4.png')
features -= features.mean(0)
features /= features.std(0)
fig,ax = plot_decision(features, labels)
fig.savefig('figure5.png')
| mit |
giorgiop/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py | 69 | 40833 | """
GUI Neutral widgets
All of these widgets require you to predefine an Axes instance and
pass that as the first arg. matplotlib doesn't try to be too smart in
layout -- you have to figure out how wide and tall you want your Axes
to be to accommodate your widget.
"""
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstaces, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use matplotlib.widgets.lock(someobj) to pr
"""
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for o'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to o'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'o owns the lock'
return self._owner is o
def locked(self):
'the lock is held'
return self._owner is not None
class Widget:
"""
OK, I couldn't resist; abstract base class for mpl GUI neutral
widgets
"""
drawon = True
eventson = True
class Button(Widget):
"""
A GUI neutral button
The following attributes are accesible
ax - the Axes the button renders into
label - a text.Text instance
color - the color of the button when not hovering
hovercolor - the color of the button when hovering
Call "on_clicked" to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
ax is the Axes instance the button will be placed into
label is a string which is the button text
image if not None, is an image to place in the button -- can
be any legal arg to imshow (numpy array, matplotlib Image
instance, or PIL image)
color is the color of the button when not activated
hovercolor is the color of the button when the mouse is over
it
"""
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.ax = ax
ax.figure.canvas.mpl_connect('button_press_event', self._click)
ax.figure.canvas.mpl_connect('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if event.inaxes != self.ax: return
if not self.eventson: return
for cid, func in self.observers.items():
func(event)
def _motion(self, event):
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this func with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class Slider(Widget):
"""
A slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.xdata
if not self.closedmin and val<=self.valmin: return
if not self.closedmax and val>=self.valmax: return
if self.slidermin is not None:
if val<=self.slidermin.val: return
if self.slidermax is not None:
if val>=self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[-1] = val, 0
xy[-2] = val, 1
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
labels - a list of text.Text instances
lines - a list of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have set_visible(False) when
box is not checked
rectangles - a list of patch.Rectangle instances
Connect to the CheckButtons with the on_clicked method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
actives is a len(buttons) list of booleans indicating whether
the button is active
"""
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
activecolor - the color of the button when clicked
labels - a list of text.Text instances
circles - a list of patch.Circle instances
Connect to the RadioButtons with the on_clicked method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
active is the index into labels for the button that is active
activecolor is the color of the button when clicked
"""
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of fig
"""
def __init__(self, targetfig, toolfig):
"""
targetfig is the figure to adjust
toolfig is the figure to embed the the subplot tool into. If
None, a default pylab figure will be created. If you are
using this from the GUI
"""
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor:
"""
Provide a vertical line cursor shared between multiple axes
from matplotlib.widgets import MultiCursor
from pylab import figure, show, nx
t = nx.arange(0.0, 2.0, 0.01)
s1 = nx.sin(2*nx.pi*t)
s2 = nx.sin(4*nx.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)
for line in self.lines: line.set_visible(False)
def onmove(self, event):
if event.inaxes is None: return
if not self.canvas.widgetlock.available(self): return
self.needclear = True
if not self.visible: return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector:
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
onmove_callback is an optional callback that will be called on mouse move
with the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(vmin, vmax)
and clear the span.
direction must be 'horizontal' or 'vertical'
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.ax = None
self.canvas = None
self.visible = True
self.cids=[]
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
self.canvas = ax.figure.canvas
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
self.cids.append(self.canvas.mpl_connect('button_press_event', self.press))
self.cids.append(self.canvas.mpl_connect('button_release_event', self.release))
self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background))
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.pressv is None or (self.ignore(event) and not self.buttonDown): return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event): return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', DeprecationWarning)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data'):
"""
Create a selector in ax. When a selection is made, clear
the span and call onselect with
onselect(pos_1, pos_2)
and clear the drawn box/line. There pos_i are arrays of length 2
containing the x- and y-coordinate.
If minspanx is not None then events smaller than minspanx
in x direction are ignored(it's the same for y).
The rect is drawn with rectprops; default
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with lineprops; default
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use type if you want the mouse to draw a line, a box or nothing
between click and actual position ny setting
drawtype = 'line', drawtype='box' or drawtype = 'none'.
spancoords is one of 'data' or 'pixels'. If 'data', minspanx
and minspanx will be interpreted in the same coordinates as
the x and ya axis, if 'pixels', they are in pixels
"""
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If RectangleSelector is not active :
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
""" Use this to activate / deactivate the RectangleSelector
from your program with an boolean variable 'active'.
"""
self.active = active
def get_active(self):
""" to get status of active mode (boolean variable)"""
return self.active
class Lasso(Widget):
def __init__(self, ax, xy, callback=None, useblit=True):
self.axes = ax
self.figure = ax.figure
self.canvas = self.figure.canvas
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.axes.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.axes.add_line(self.line)
self.callback = callback
self.cids = []
self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease))
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
def onrelease(self, event):
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.axes.lines.remove(self.line)
self.verts = None
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def onmove(self, event):
if self.verts is None: return
if event.inaxes != self.axes: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.axes.draw_artist(self.line)
self.canvas.blit(self.axes.bbox)
else:
self.canvas.draw_idle()
| gpl-3.0 |
mikebenfield/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 37 | 11979 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_eigen.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_svd.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
hugobowne/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
mvpossum/machine-learning | tp4/ejc/plot_table.py | 4 | 1251 | #! /usr/bin/env python
import sys
import os
from sys import argv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
FILE = argv[1]
PLOT_FILE = os.path.splitext(FILE)[0]+'.png'
ERROR = 'error' in FILE.lower()
legend = argv[2:]
cols = len(legend)
if cols>=4:
linestyles = ['--', '-', '--', '-', '--', '-']
colors = ['r', 'r', 'b', 'b', 'g', 'g']
else:
linestyles = ['-','-']
colors = ['r', 'b']
x = []
y = [[] for _ in range(cols)]
for line in open(FILE):
if line.strip():
line = [float(s) for s in line.split(' ') if s.strip()]
x.append(line[0])
for j in range(cols):
y[j].append(line[j+1])
fig, ax = plt.subplots()
FONT_SIZE = 18
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(FONT_SIZE)
for yv in range(cols):
ax.plot(x, y[yv], label=legend[yv], linestyle=linestyles[yv], color=colors[yv])
if ERROR:
ax.set_ylim(0,50)
else:
ax.set_ylim(0,30)
ax.set_xlim(2,32)
ax.legend(prop={'size':FONT_SIZE})
plt.xlabel('d', size=FONT_SIZE)
ylabel = 'Error (%)' if ERROR else 'Cantidad de nodos del árbol'
plt.ylabel(ylabel, size=FONT_SIZE)
plt.savefig(PLOT_FILE)
#~ plt.show()
| mit |
wdurhamh/statsmodels | statsmodels/tsa/seasonal.py | 27 | 5392 | """
Seasonal Decomposition by Moving Averages
"""
from statsmodels.compat.python import lmap, range, iteritems
import numpy as np
from pandas.core.nanops import nanmean as pd_nanmean
from .filters._utils import _maybe_get_pandas_wrapper_freq
from .filters.filtertools import convolution_filter
from statsmodels.tsa.tsatools import freq_to_period
def seasonal_mean(x, freq):
"""
Return means for each period in x. freq is an int that gives the
number of periods per cycle. E.g., 12 for monthly. NaNs are ignored
in the mean.
"""
return np.array([pd_nanmean(x[i::freq]) for i in range(freq)])
def seasonal_decompose(x, model="additive", filt=None, freq=None):
"""
Parameters
----------
x : array-like
Time series
model : str {"additive", "multiplicative"}
Type of seasonal component. Abbreviations are accepted.
filt : array-like
The filter coefficients for filtering out the seasonal component.
The default is a symmetric moving average.
freq : int, optional
Frequency of the series. Must be used if x is not a pandas
object with a timeseries index.
Returns
-------
results : obj
A object with seasonal, trend, and resid attributes.
Notes
-----
This is a naive decomposition. More sophisticated methods should
be preferred.
The additive model is Y[t] = T[t] + S[t] + e[t]
The multiplicative model is Y[t] = T[t] * S[t] * e[t]
The seasonal component is first removed by applying a convolution
filter to the data. The average of this smoothed series for each
period is the returned seasonal component.
See Also
--------
statsmodels.tsa.filters.convolution_filter
"""
_pandas_wrapper, pfreq = _maybe_get_pandas_wrapper_freq(x)
x = np.asanyarray(x).squeeze()
nobs = len(x)
if not np.all(np.isfinite(x)):
raise ValueError("This function does not handle missing values")
if model.startswith('m'):
if np.any(x <= 0):
raise ValueError("Multiplicative seasonality is not appropriate "
"for zero and negative values")
if pfreq is not None:
pfreq = freq_to_period(pfreq)
if freq and pfreq != freq:
raise ValueError("Inferred frequency of index and frequency "
"don't match. This function does not re-sample")
else:
freq = pfreq
elif freq is None:
raise ValueError("You must specify a freq or x must be a "
"pandas object with a timeseries index")
if filt is None:
if freq % 2 == 0: # split weights at ends
filt = np.array([.5] + [1] * (freq - 1) + [.5]) / freq
else:
filt = np.repeat(1./freq, freq)
trend = convolution_filter(x, filt)
# nan pad for conformability - convolve doesn't do it
if model.startswith('m'):
detrended = x / trend
else:
detrended = x - trend
period_averages = seasonal_mean(detrended, freq)
if model.startswith('m'):
period_averages /= np.mean(period_averages)
else:
period_averages -= np.mean(period_averages)
seasonal = np.tile(period_averages, nobs // freq + 1)[:nobs]
if model.startswith('m'):
resid = x / seasonal / trend
else:
resid = detrended - seasonal
results = lmap(_pandas_wrapper, [seasonal, trend, resid, x])
return DecomposeResult(seasonal=results[0], trend=results[1],
resid=results[2], observed=results[3])
class DecomposeResult(object):
def __init__(self, **kwargs):
for key, value in iteritems(kwargs):
setattr(self, key, value)
self.nobs = len(self.observed)
def plot(self):
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
fig, axes = plt.subplots(4, 1, sharex=True)
if hasattr(self.observed, 'plot'): # got pandas use it
self.observed.plot(ax=axes[0], legend=False)
axes[0].set_ylabel('Observed')
self.trend.plot(ax=axes[1], legend=False)
axes[1].set_ylabel('Trend')
self.seasonal.plot(ax=axes[2], legend=False)
axes[2].set_ylabel('Seasonal')
self.resid.plot(ax=axes[3], legend=False)
axes[3].set_ylabel('Residual')
else:
axes[0].plot(self.observed)
axes[0].set_ylabel('Observed')
axes[1].plot(self.trend)
axes[1].set_ylabel('Trend')
axes[2].plot(self.seasonal)
axes[2].set_ylabel('Seasonal')
axes[3].plot(self.resid)
axes[3].set_ylabel('Residual')
axes[3].set_xlabel('Time')
axes[3].set_xlim(0, self.nobs)
fig.tight_layout()
return fig
if __name__ == "__main__":
x = np.array([-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184])
results = seasonal_decompose(x, freq=4)
from pandas import DataFrame, DatetimeIndex
data = DataFrame(x, DatetimeIndex(start='1/1/1951',
periods=len(x),
freq='Q'))
res = seasonal_decompose(data)
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/sklearn/metrics/pairwise.py | 1 | 27532 | """
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD Style.
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import safe_asarray
from ..utils import atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import array2d
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guarenteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guarenteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = safe_asarray(X)
X = Y = atleast2d_or_csr(X, dtype=np.float)
else:
X = safe_asarray(X)
Y = safe_asarray(Y)
X = atleast2d_or_csr(X, dtype=np.float)
Y = atleast2d_or_csr(Y, dtype=np.float)
if len(X.shape) < 2:
raise ValueError("X is required to be at least two dimensional.")
if len(Y.shape) < 2:
raise ValueError("Y is required to be at least two dimensional.")
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
# TODO: a faster Cython implementation would do the clipping of negative
# values in a single pass over the output matrix.
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def manhattan_distances(X, Y=None, sum_over_features=True):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
n_samples_X, n_features_X = X.shape
n_samples_Y, n_features_Y = Y.shape
if n_features_X != n_features_Y:
raise Exception("X and Y should have the same number of features!")
D = np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :])
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((n_samples_X * n_samples_Y, n_features_X))
return D
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -\sum_i (x[i] - y[i]) ** 2 / (x[i] + y[i])
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferrable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
### once we support sparse matrices, we can use check_pairwise
if Y is None:
# optimize this case!
X = array2d(X)
if X.dtype != np.float32:
X.astype(np.float)
Y = X
if (X < 0).any():
raise ValueError("X contains negative values.")
else:
X = array2d(X)
Y = array2d(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if X.dtype != np.float32 or Y.dtype != np.float32:
# if not both are 32bit float, convert to 64bit float
X = X.astype(np.float)
Y = Y.astype(np.float)
if (X < 0).any():
raise ValueError("X contains negative values.")
if (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma * \sum_i (x[i] - y[i]) ** 2 / (x[i] + y[i]))
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
pairwise_distance_functions = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'cityblock': manhattan_distances,
}
def distance_metrics():
""" Valid metrics for pairwise_distances
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=========== ====================================
metric Function
=========== ====================================
'cityblock' sklearn.pairwise.manhattan_distances
'euclidean' sklearn.pairwise.euclidean_distances
'l1' sklearn.pairwise.manhattan_distances
'l2' sklearn.pairwise.euclidean_distances
'manhattan' sklearn.pairwise.manhattan_distances
=========== ====================================
"""
return pairwise_distance_functions
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to those
metrics listed in pairwise.pairwise_distance_functions.
Valid values for metric are:
- from scikit-learn: ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note in the case of 'euclidean' and 'cityblock' (which are valid
scipy.spatial.distance metrics), the values will use the scikit-learn
implementation, which is faster and has support for sparse matrices.
For a verbose description of the metrics from scikit-learn, see the
__doc__ of the sklearn.pairwise.distance_metrics function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.pairwise_distance_functions.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in pairwise_distance_functions:
func = pairwise_distance_functions[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
pairwise_kernel_functions = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
}
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============== ========================================
metric Function
============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
============== ========================================
"""
return pairwise_kernel_functions
kernel_params = {
"chi2": (),
"exp_chi2": set(("gamma", )),
"linear": (),
"rbf": set(("gamma",)),
"sigmoid": set(("gamma", "coef0")),
"polynomial": set(("gamma", "degree", "coef0")),
"poly": set(("gamma", "degree", "coef0")),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
""" Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.pairwise_kernel_functions.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in pairwise_kernel_functions:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in kernel_params[metric])
func = pairwise_kernel_functions[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise AttributeError("Unknown metric %s" % metric)
| unlicense |
crpurcell/RM-tools | RMutils/normalize.py | 2 | 4842 | # The APLpyNormalize class is largely based on code provided by Sarah Graves.
import numpy as np
import numpy.ma as ma
import matplotlib.cbook as cbook
from matplotlib.colors import Normalize
class APLpyNormalize(Normalize):
'''
A Normalize class for imshow that allows different stretching functions
for astronomical images.
'''
def __init__(self, stretch='linear', exponent=5, vmid=None, vmin=None,
vmax=None, clip=False):
'''
Initalize an APLpyNormalize instance.
Optional Keyword Arguments:
*vmin*: [ None | float ]
Minimum pixel value to use for the scaling.
*vmax*: [ None | float ]
Maximum pixel value to use for the scaling.
*stretch*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]
The stretch function to use (default is 'linear').
*vmid*: [ None | float ]
Mid-pixel value used for the log and arcsinh stretches. If
set to None, a default value is picked.
*exponent*: [ float ]
if self.stretch is set to 'power', this is the exponent to use.
*clip*: [ True | False ]
If clip is True and the given value falls outside the range,
the returned value will be 0 or 1, whichever is closer.
'''
# Call original initalization routine
Normalize.__init__(self, vmin=vmin, vmax=vmax, clip=clip)
# Save parameters
self.stretch = stretch
self.exponent = exponent
if stretch == 'power' and np.equal(self.exponent, None):
raise Exception("For stretch=='power', an exponent should be specified")
if np.equal(vmid, None):
if stretch == 'log':
self.midpoint = 0.05
elif stretch == 'arcsinh':
self.midpoint = -0.033
else:
self.midpoint = None
else:
self.midpoint = (vmid - vmin) / (vmax - vmin)
def __call__(self, value, clip=None):
#read in parameters
method = self.stretch
exponent = self.exponent
midpoint = self.midpoint
# ORIGINAL MATPLOTLIB CODE
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
# CUSTOM APLPY CODE
if self.stretch == 'linear':
pass
elif self.stretch == 'log':
result = ma.log10((result/self.midpoint) + 1.) \
/ ma.log10((1./self.midpoint) + 1.)
elif self.stretch == 'sqrt':
result = ma.sqrt(result)
elif self.stretch == 'arcsinh':
result = ma.arcsinh(result/self.midpoint) \
/ ma.arcsinh(1./self.midpoint)
elif self.stretch == 'power':
result = ma.power(result, exponent)
else:
raise Exception("Unknown stretch in APLpyNormalize: %s" %
self.stretch)
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
# ORIGINAL MATPLOTLIB CODE
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
# CUSTOM APLPY CODE
if cbook.iterable(value):
val = ma.asarray(value)
else:
val = value
if self.stretch == 'linear':
pass
elif self.stretch == 'log':
val = self.midpoint * \
(ma.power(10., (val*ma.log10(1./self.midpoint+1.))) - 1.)
elif self.stretch == 'sqrt':
val = val * val
elif self.stretch == 'arcsinh':
val = self.midpoint * \
ma.sinh(val*ma.arcsinh(1./self.midpoint))
elif self.stretch == 'power':
val = ma.power(val, (1./self.exponent))
else:
raise Exception("Unknown stretch in APLpyNormalize: %s" %
self.stretch)
return vmin + val * (vmax - vmin)
| mit |
wkfwkf/statsmodels | statsmodels/sandbox/regression/tests/test_gmm_poisson.py | 31 | 13338 | '''
TestGMMMultTwostepDefault() has lower precision
'''
from statsmodels.compat.python import lmap
import numpy as np
from numpy.testing.decorators import skipif
import pandas
import scipy
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.sandbox.regression import gmm
from numpy.testing import assert_allclose, assert_equal
from statsmodels.compat.scipy import NumpyVersion
def get_data():
import os
curdir = os.path.split(__file__)[0]
dt = pandas.read_csv(os.path.join(curdir, 'racd10data_with_transformed.csv'))
# Transformations compared to original data
##dt3['income'] /= 10.
##dt3['aget'] = (dt3['age'] - dt3['age'].min()) / 5.
##dt3['aget2'] = dt3['aget']**2
# How do we do this with pandas
mask = ~((np.asarray(dt['private']) == 1) & (dt['medicaid'] == 1))
mask = mask & (dt['docvis'] <= 70)
dt3 = dt[mask]
dt3['const'] = 1 # add constant
return dt3
DATA = get_data()
#------------- moment conditions for example
def moment_exponential_add(params, exog, exp=True):
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
#if not np.isfinite(predicted).all():
#print "invalid predicted", predicted
#raise RuntimeError('invalid predicted')
predicted = np.clip(predicted, 0, 1e100) # try to avoid inf
else:
predicted = np.dot(exog, params)
return predicted
def moment_exponential_mult(params, data, exp=True):
# multiplicative error model
endog = data[:,0]
exog = data[:,1:]
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
predicted = np.clip(predicted, 0, 1e100) # avoid inf
resid = endog / predicted - 1
if not np.isfinite(resid).all():
print("invalid resid", resid)
else:
resid = endog - np.dot(exog, params)
return resid
#------------------- test classes
# copied from test_gmm.py, with changes
class CheckGMM(object):
# default tolerance, overwritten by subclasses
params_tol = [5e-6, 5e-6]
bse_tol = [5e-7, 5e-7]
q_tol = [5e-6, 1e-9]
j_tol = [5e-5, 1e-9]
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
rtol, atol = self.params_tol
assert_allclose(res1.params, res2.params, rtol=rtol, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=atol)
rtol, atol = self.bse_tol
assert_allclose(res1.bse, res2.bse, rtol=rtol, atol=0)
assert_allclose(res1.bse, res2.bse, rtol=0, atol=atol)
def test_other(self):
res1, res2 = self.res1, self.res2
rtol, atol = self.q_tol
assert_allclose(res1.q, res2.Q, rtol=atol, atol=rtol)
rtol, atol = self.j_tol
assert_allclose(res1.jval, res2.J, rtol=atol, atol=rtol)
j, jpval, jdf = res1.jtest()
# j and jval should be the same
assert_allclose(res1.jval, res2.J, rtol=13, atol=13)
#pvalue is not saved in Stata results
pval = stats.chi2.sf(res2.J, res2.J_df)
#assert_allclose(jpval, pval, rtol=1e-4, atol=1e-6)
assert_allclose(jpval, pval, rtol=rtol, atol=atol)
assert_equal(jdf, res2.J_df)
def test_smoke(self):
res1 = self.res1
res1.summary()
class TestGMMAddOnestep(CheckGMM):
@classmethod
def setup_class(self):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
self.bse_tol = [5e-6, 5e-7]
q_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False})
self.res1 = res0
from .results_gmm_poisson import results_addonestep as results
self.res2 = results
class TestGMMAddTwostep(CheckGMM):
@classmethod
def setup_class(self):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
self.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_addtwostep as results
self.res2 = results
class TestGMMMultOnestep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-6, 5e-7]
self.q_tol = [0.04, 0]
self.j_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_multonestep as results
self.res2 = results
class TestGMMMultTwostep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_multtwostep as results
self.res2 = results
class TestGMMMultTwostepDefault(CheckGMM):
# compares my defaults with the same options in Stata
# agreement is not very high, maybe vce(unadjusted) is different after all
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [0.004, 5e-4]
self.params_tol = [5e-5, 5e-5]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
#wargs={'centered':True}, has_optimal_weights=True
)
self.res1 = res0
from .results_gmm_poisson import results_multtwostepdefault as results
self.res2 = results
class TestGMMMultTwostepCenter(CheckGMM):
#compares my defaults with the same options in Stata
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-4, 5e-5]
self.params_tol = [5e-5, 5e-5]
q_tol = [5e-5, 1e-8]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':True}, has_optimal_weights=False
)
self.res1 = res0
from .results_gmm_poisson import results_multtwostepcenter as results
self.res2 = results
def test_more(self):
# from Stata `overid`
J_df = 1
J_p = 0.332254330027383
J = 0.940091427212973
j, jpval, jdf = self.res1.jtest()
assert_allclose(jpval, J_p, rtol=5e-5, atol=0)
if __name__ == '__main__':
tt = TestGMMAddOnestep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMAddTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultOnestep()
tt.setup_class()
tt.test_basic()
#tt.test_other()
tt = TestGMMMultTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepDefault()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepCenter()
tt.setup_class()
tt.test_basic()
tt.test_other()
| bsd-3-clause |
rgommers/scipy | scipy/interpolate/fitpack2.py | 7 | 75025 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, ravel, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
dfitpack_int = dfitpack.types.intvar.dtype
# ############### Univariate spline ####################
_curfit_messages = {1: """
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2: """
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3: """
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10: """
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline:
"""
1-D smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing;
must be strictly increasing if `s` is 0.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If `w` is None,
weights are all equal. Default is None.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
`bbox` is None, ``bbox=[x[0], x[-1]]``. Default is None.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
``k = 3`` is a cubic spline. Default is 3.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If `s` is None, ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points. Default is None.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
Default is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
BivariateSpline :
a base class for bivariate splines.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
RectSphereBivariateSpline :
a bivariate spline over a rectangular mesh on a sphere
SmoothSphereBivariateSpline :
a smoothing bivariate spline in spherical coordinates
LSQSphereBivariateSpline :
a bivariate spline in spherical coordinates using weighted
least-squares fitting
RectBivariateSpline :
a bivariate spline over a rectangular mesh
InterpolatedUnivariateSpline :
a interpolating univariate spline for a given set of data points.
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
splrep :
a function to find the B-spline representation of a 1-D curve
splev :
a function to evaluate a B-spline or its derivatives
sproot :
a function to find the roots of a cubic B-spline
splint :
a function to evaluate the definite integral of a B-spline between two
given points
spalde :
a function to evaluate all derivatives of a B-spline
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan``. A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> rng = np.random.default_rng()
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, s, ext,
check_finite)
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
xe=bbox[1], s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@staticmethod
def validate_input(x, y, w, bbox, k, s, ext, check_finite):
x, y, bbox = np.asarray(x), np.asarray(y), np.asarray(bbox)
if w is not None:
w = np.asarray(w)
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("x and y array must not contain "
"NaNs or infs.")
if s is None or s > 0:
if not np.all(diff(x) >= 0.0):
raise ValueError("x must be increasing if s > 0")
else:
if not np.all(diff(x) > 0.0):
raise ValueError("x must be strictly increasing if s = 0")
if x.size != y.size:
raise ValueError("x and y should have a same length")
elif w is not None and not x.size == y.size == w.size:
raise ValueError("x, y, and w should have a same length")
elif bbox.shape != (2,):
raise ValueError("bbox shape should be (2,)")
elif not (1 <= k <= 5):
raise ValueError("k should be 1 <= k <= 5")
elif s is not None and not s >= 0.0:
raise ValueError("s should be s >= 0.0")
try:
ext = _extrap_modes[ext]
except KeyError as e:
raise ValueError("Unknown extrapolation mode %s." % ext) from e
return x, y, w, bbox, ext
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None, None, None, None, None, k, None, len(t), t,
c, None, None, None, None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n, t, c, k, ier = data[7], data[8], data[9], data[5], data[-1]
self._eval_args = t[:n], c[:n], k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k, m = data[5], len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in
[8, 9, 11, 12]]
args = data[:8] + (t, c, n, fpint, nrdata, data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: `x` can be unordered but the
evaluation is more efficient if `x` is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of `x` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError as e:
raise ValueError("Unknown extrapolation mode %s." % ext) from e
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k, n = data[5], data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k, n = data[5], data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a, b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d, ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z, m, ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
# if self.ext is 'const', derivative.ext will be 'zeros'
ext = 1 if self.ext == 3 else self.ext
return UnivariateSpline._from_tck(tck, ext=ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
1-D interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data.
Spline function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be strictly increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
LSQUnivariateSpline :
a spline for which knots are user-selected
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
splrep :
a function to find the B-spline representation of a 1-D curve
splev :
a function to evaluate a B-spline or its derivatives
sproot :
a function to find the roots of a cubic B-spline
splint :
a function to evaluate the definite integral of a B-spline between two
given points
spalde :
a function to evaluate all derivatives of a B-spline
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> rng = np.random.default_rng()
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
ext, check_finite)
if not np.all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
xe=bbox[1], s=0)
self._reset_class()
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
1-D spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is `k` = 3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
InterpolatedUnivariateSpline :
a interpolating univariate spline for a given set of data points.
splrep :
a function to find the B-spline representation of a 1-D curve
splev :
a function to evaluate a B-spline or its derivatives
sproot :
a function to find the roots of a cubic B-spline
splint :
a function to evaluate the definite integral of a B-spline between two
given points
spalde :
a function to evaluate all derivatives of a B-spline
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
Constructing lsq spline using the knots from another spline:
>>> x = np.arange(10)
>>> s = UnivariateSpline(x, x, s=0)
>>> s.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
>>> knt = s.get_knots()
>>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
>>> s1.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
ext, check_finite)
if not np.all(diff(x) >= 0.0):
raise ValueError('x must be increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
# ############### Bivariate spline ####################
class _BivariateSplineBase:
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
BivariateSpline :
a base class for bivariate splines.
SphereBivariateSpline :
a bivariate spline on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
Note that the axis ordering is inverted relative to
the output of meshgrid.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
x = np.asarray(x)
y = np.asarray(y)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if (x.size >= 2) and (not np.all(np.diff(x) >= 0.0)):
raise ValueError("x must be strictly increasing when `grid` is True")
if (y.size >= 2) and (not np.all(np.diff(y) >= 0.0)):
raise ValueError("y must be strictly increasing when `grid` is True")
if dx or dy:
z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z, ier = dfitpack.pardeu(tx, ty, c, kx, ky, dx, dy, x, y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z, ier = dfitpack.bispeu(tx, ty, c, kx, ky, x, y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1: """
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2: """
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3: """
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4: """
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5: """
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10: """
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3: """
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline` or `RectBivariateSpline`.
See Also
--------
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
RectSphereBivariateSpline :
a bivariate spline over a rectangular mesh on a sphere
SmoothSphereBivariateSpline :
a smoothing bivariate spline in spherical coordinates
LSQSphereBivariateSpline :
a bivariate spline in spherical coordinates using weighted
least-squares fitting
RectBivariateSpline :
a bivariate spline over a rectangular mesh.
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx,"
" ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
@staticmethod
def _validate_input(x, y, z, w, kx, ky, eps):
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
if not x.size == y.size == z.size:
raise ValueError('x, y, and z should have a same length')
if w is not None:
w = np.asarray(w)
if x.size != w.size:
raise ValueError('x, y, z, and w should have a same length')
elif not np.all(w >= 0.0):
raise ValueError('w should be positive')
if (eps is not None) and (not 0.0 < eps < 1.0):
raise ValueError('eps should be between (0, 1)')
if not x.size >= (kx + 1) * (ky + 1):
raise ValueError('The length of x, y and z should be at least'
' (kx+1) * (ky+1)')
return x, y, z, w
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x), max(x), min(y), max(y)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value within the open
interval ``(0, 1)``, the default is 1e-16.
See Also
--------
BivariateSpline :
a base class for bivariate splines.
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
RectSphereBivariateSpline :
a bivariate spline over a rectangular mesh on a sphere
SmoothSphereBivariateSpline :
a smoothing bivariate spline in spherical coordinates
LSQSphereBivariateSpline :
a bivariate spline in spherical coordinates using weighted
least-squares fitting
RectBivariateSpline :
a bivariate spline over a rectangular mesh
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=1e-16):
x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
bbox = ravel(bbox)
if not bbox.shape == (4,):
raise ValueError('bbox shape should be (4,)')
if s is not None and not s >= 0.0:
raise ValueError("s should be s >= 0.0")
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
xb, xe, yb,
ye, kx, ky,
s=s, eps=eps,
lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
xb, xe, yb,
ye, kx, ky,
s=s,
eps=eps,
lwrk2=ier)
if ier in [0, -1, -2]: # normal return
pass
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx, ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value within the open
interval ``(0, 1)``, the default is 1e-16.
See Also
--------
BivariateSpline :
a base class for bivariate splines.
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
RectSphereBivariateSpline :
a bivariate spline over a rectangular mesh on a sphere
SmoothSphereBivariateSpline :
a smoothing bivariate spline in spherical coordinates
LSQSphereBivariateSpline :
a bivariate spline in spherical coordinates using weighted
least-squares fitting
RectBivariateSpline :
a bivariate spline over a rectangular mesh.
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
bbox = ravel(bbox)
if not bbox.shape == (4,):
raise ValueError('bbox shape should be (4,)')
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
# The Fortran subroutine "surfit" (called as dfitpack.surfit_lsq)
# requires that the knot arrays passed as input should be "real
# array(s) of dimension nmax" where "nmax" refers to the greater of nx
# and ny. We pad the tx1/ty1 arrays here so that this is satisfied, and
# slice them to the desired sizes upon return.
nmax = max(nx, ny)
tx1 = zeros((nmax,), float)
ty1 = zeros((nmax,), float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb, xe, yb, ye = bbox
tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, nx, tx1, ny, ty1,
w, xb, xe, yb, ye,
kx, ky, eps, lwrk2=1)
if ier > 10:
tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z,
nx, tx1, ny, ty1, w,
xb, xe, yb, ye,
kx, ky, eps, lwrk2=ier)
if ier in [0, -1, -2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1[:nx], ty1[:ny], c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x), max(x), min(y), max(y)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((z[i]-f(x[i], y[i]))**2, axis=0) <= s`` where f is a spline
function. Default is ``s=0``, which is for interpolation.
See Also
--------
BivariateSpline :
a base class for bivariate splines.
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
RectSphereBivariateSpline :
a bivariate spline over a rectangular mesh on a sphere
SmoothSphereBivariateSpline :
a smoothing bivariate spline in spherical coordinates
LSQSphereBivariateSpline :
a bivariate spline in spherical coordinates using weighted
least-squares fitting
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y, bbox = ravel(x), ravel(y), ravel(bbox)
z = np.asarray(z)
if not np.all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
if not np.all(diff(y) > 0.0):
raise ValueError('y must be strictly increasing')
if not x.size == z.shape[0]:
raise ValueError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise ValueError('y dimension of z must have same number of '
'elements as y')
if not bbox.shape == (4,):
raise ValueError('bbox shape should be (4,)')
if s is not None and not s >= 0.0:
raise ValueError("s should be s >= 0.0")
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQUnivariateSpline :
a univariate spline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval
``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``r[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value within the open
interval ``(0, 1)``, the default is 1e-16.
See Also
--------
BivariateSpline :
a base class for bivariate splines.
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
RectSphereBivariateSpline :
a bivariate spline over a rectangular mesh on a sphere
LSQSphereBivariateSpline :
a bivariate spline in spherical coordinates using weighted
least-squares fitting
RectBivariateSpline :
a bivariate spline over a rectangular mesh.
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
# input validation
if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
raise ValueError('theta should be between [0, pi]')
if not ((0.0 <= phi).all() and (phi <= 2.0 * np.pi).all()):
raise ValueError('phi should be between [0, 2pi]')
if w is not None:
w = np.asarray(w)
if not (w >= 0.0).all():
raise ValueError('w should be positive')
if not s >= 0.0:
raise ValueError('s should be positive')
if not 0.0 < eps < 1.0:
raise ValueError('eps should be between (0, 1)')
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
theta = np.asarray(theta)
phi = np.asarray(phi)
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
dphi=dphi, grid=grid)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
Determines a smoothing bicubic spline according to a given
set of knots in the `theta` and `phi` directions.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval
``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value within the
open interval ``(0, 1)``, the default is 1e-16.
See Also
--------
BivariateSpline :
a base class for bivariate splines.
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
RectSphereBivariateSpline :
a bivariate spline over a rectangular mesh on a sphere
SmoothSphereBivariateSpline :
a smoothing bivariate spline in spherical coordinates
RectBivariateSpline :
a bivariate spline over a rectangular mesh.
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> import matplotlib.pyplot as plt
>>> theta = np.linspace(0, np.pi, num=7)
>>> phi = np.linspace(0, 2*np.pi, num=9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
tt, tp = np.asarray(tt), np.asarray(tp)
if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
raise ValueError('theta should be between [0, pi]')
if not ((0.0 <= phi).all() and (phi <= 2*np.pi).all()):
raise ValueError('phi should be between [0, 2pi]')
if not ((0.0 < tt).all() and (tt < np.pi).all()):
raise ValueError('tt should be between (0, pi)')
if not ((0.0 < tp).all() and (tp < 2*np.pi).all()):
raise ValueError('tp should be between (0, 2pi)')
if w is not None:
w = np.asarray(w)
if not (w >= 0.0).all():
raise ValueError('w should be positive')
if not 0.0 < eps < 1.0:
raise ValueError('eps should be between (0, 1)')
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier > 0:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
theta = np.asarray(theta)
phi = np.asarray(phi)
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
dphi=dphi, grid=grid)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of colatitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the open interval
``(0, pi)``.
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians. First element (``v[0]``) must lie
within the interval ``[-pi, pi)``. Last element (``v[-1]``) must satisfy
``v[-1] <= v[0] + 2*pi``.
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
BivariateSpline :
a base class for bivariate splines.
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
SmoothSphereBivariateSpline :
a smoothing bivariate spline in spherical coordinates
LSQSphereBivariateSpline :
a bivariate spline in spherical coordinates using weighted
least-squares fitting
RectBivariateSpline :
a bivariate spline over a rectangular mesh.
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Choosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in range(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=dfitpack_int)
ider = np.array([-1, 0, -1, 0], dtype=dfitpack_int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
r = np.asarray(r)
if not (0.0 < u[0] and u[-1] < np.pi):
raise ValueError('u should be between (0, pi)')
if not -np.pi <= v[0] < np.pi:
raise ValueError('v[0] should be between [-pi, pi)')
if not v[-1] <= v[0] + 2*np.pi:
raise ValueError('v[-1] should be v[0] + 2pi or less ')
if not np.all(np.diff(u) > 0.0):
raise ValueError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise ValueError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise ValueError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise ValueError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise ValueError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise ValueError('if pole_continuity is False, so must be '
'pole_flat')
if not s >= 0.0:
raise ValueError('s should be positive')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(),
v.copy(),
r.copy(),
r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
self.v0 = v[0]
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
theta = np.asarray(theta)
phi = np.asarray(phi)
return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
dphi=dphi, grid=grid)
| bsd-3-clause |
jni/gala | gala/morpho.py | 1 | 28947 | #!/usr/bin/env python
import numpy as np
from numpy import reshape, \
array, zeros, zeros_like, ones, arange, \
double, \
int8, int16, int32, int64, uint8, uint16, uint32, uint64, \
iinfo, isscalar, \
unique, \
newaxis, \
minimum, bincount, dot, nonzero, concatenate, \
setdiff1d, flatnonzero
import itertools as it
from collections import defaultdict, deque as queue
from scipy.ndimage import grey_dilation, generate_binary_structure, \
maximum_filter, minimum_filter
from scipy import ndimage as ndi
from scipy.ndimage import distance_transform_cdt
from scipy.ndimage.measurements import label, find_objects
from scipy.ndimage.morphology import binary_opening, binary_dilation
from . import iterprogress as ip
from .evaluate import relabel_from_one
from skimage import measure, util, feature
import skimage.morphology
from sklearn.externals import joblib
zero3d = array([0,0,0])
def complement(a):
return a.max()-a
def remove_merged_boundaries(labels, connectivity=1):
"""Remove boundaries in a label field when they separate the same region.
By convention, the boundary label is 0, and labels are positive.
Parameters
----------
labels : array of int
The label field to be processed.
connectivity : int in {1, ..., labels.ndim}, optional
The morphological connectivity for considering neighboring voxels.
Returns
-------
labels_out : array of int
The same label field, with unnecessary boundaries removed.
Examples
--------
>>> labels = np.array([[1, 0, 1], [0, 1, 0], [2, 0, 3]], np.int)
>>> remove_merged_boundaries(labels)
array([[1, 1, 1],
[0, 1, 0],
[2, 0, 3]])
"""
boundary = 0
labels_out = labels.copy()
is_boundary = (labels == boundary)
labels_complement = labels.copy()
labels_complement[is_boundary] = labels.max() + 1
se = ndi.generate_binary_structure(labels.ndim, connectivity)
smaller_labels = ndi.grey_erosion(labels_complement, footprint=se)
bigger_labels = ndi.grey_dilation(labels, footprint=se)
merged = is_boundary & (smaller_labels == bigger_labels)
labels_out[merged] = smaller_labels[merged]
return labels_out
def morphological_reconstruction(marker, mask, connectivity=1):
"""Perform morphological reconstruction of the marker into the mask.
See the Matlab image processing toolbox documentation for details:
http://www.mathworks.com/help/toolbox/images/f18-16264.html
"""
sel = generate_binary_structure(marker.ndim, connectivity)
diff = True
while diff:
markernew = grey_dilation(marker, footprint=sel)
markernew = minimum(markernew, mask)
diff = (markernew-marker).max() > 0
marker = markernew
return marker
def hminima(a, thresh):
"""Suppress all minima that are shallower than thresh.
Parameters
----------
a : array
The input array on which to perform hminima.
thresh : float
Any local minima shallower than this will be flattened.
Returns
-------
out : array
A copy of the input array with shallow minima suppressed.
"""
maxval = a.max()
ainv = maxval-a
return maxval - morphological_reconstruction(ainv-thresh, ainv)
imhmin = hminima
remove_small_connected_components = skimage.morphology.remove_small_objects
def regional_minima(a, connectivity=1):
"""Find the regional minima in an ndarray."""
values = unique(a)
delta = (values - minimum_filter(values, footprint=ones(3)))[1:].min()
marker = complement(a)
mask = marker+delta
return marker == morphological_reconstruction(marker, mask, connectivity)
def impose_minima(a, minima, connectivity=1):
"""Transform 'a' so that its only regional minima are those in 'minima'.
Parameters:
'a': an ndarray
'minima': a boolean array of same shape as 'a'
'connectivity': the connectivity of the structuring element used in
morphological reconstruction.
Value:
an ndarray of same shape as a with unmarked local minima paved over.
"""
m = a.max()
mask = m - a
marker = zeros_like(mask)
minima = minima.astype(bool)
marker[minima] = mask[minima]
return m - morphological_reconstruction(marker, mask, connectivity)
def minimum_seeds(current_seeds, min_seed_coordinates, connectivity=1):
"""Ensure that each point in given coordinates has its own seed."""
seeds = current_seeds.copy()
sel = generate_binary_structure(seeds.ndim, connectivity)
if seeds.dtype == bool:
seeds = label(seeds, sel)[0]
new_seeds = grey_dilation(seeds, footprint=sel)
overlap = new_seeds[min_seed_coordinates]
seed_overlap_counts = bincount(concatenate((overlap, unique(seeds)))) - 1
seeds_to_delete = (seed_overlap_counts > 1)[seeds]
seeds[seeds_to_delete] = 0
seeds_to_add = [m[overlap==0] for m in min_seed_coordinates]
start = seeds.max() + 1
num_seeds = len(seeds_to_add[0])
seeds[seeds_to_add] = arange(start, start + num_seeds)
return seeds
def split_exclusions(image, labels, exclusions, dilation=0, connectivity=1,
standard_seeds=False):
"""Ensure that no segment in 'labels' overlaps more than one exclusion."""
labels = labels.copy()
cur_label = labels.max()
dilated_exclusions = exclusions.copy()
foot = generate_binary_structure(exclusions.ndim, connectivity)
for i in range(dilation):
dilated_exclusions = grey_dilation(exclusions, footprint=foot)
hashed = labels * (exclusions.max() + 1) + exclusions
hashed[exclusions == 0] = 0
violations = bincount(hashed.ravel()) > 1
violations[0] = False
if sum(violations) != 0:
offending_labels = labels[violations[hashed]]
mask = zeros(labels.shape, dtype=bool)
for offlabel in offending_labels:
mask += labels == offlabel
if standard_seeds:
seeds = label(mask * (image == 0))[0]
else:
seeds = label(mask * dilated_exclusions)[0]
seeds[seeds > 0] += cur_label
labels[mask] = watershed(image, seeds, connectivity, mask)[mask]
return labels
def watershed(a, seeds=None, connectivity=1, mask=None, smooth_thresh=0.0,
smooth_seeds=False, minimum_seed_size=0, dams=False,
override_skimage=False, show_progress=False):
"""Perform the watershed algorithm of Vincent & Soille (1991).
Parameters
----------
a : np.ndarray, arbitrary shape and type
The input image on which to perform the watershed transform.
seeds : np.ndarray, int or bool type, same shape as `a` (optional)
The seeds for the watershed. If provided, these are the only basins
allowed, and the algorithm proceeds by flooding from the seeds.
Otherwise, every local minimum is used as a seed.
connectivity : int, {1, ..., a.ndim} (optional, default 1)
The neighborhood of each pixel, defined as in `scipy.ndimage`.
mask : np.ndarray, type bool, same shape as `a`. (optional)
If provided, perform watershed only in the parts of `a` that are set
to `True` in `mask`.
smooth_thresh : float (optional, default 0.0)
Local minima that are less deep than this threshold are suppressed,
using `hminima`.
smooth_seeds : bool (optional, default False)
Perform binary opening on the seeds, using the same connectivity as
the watershed.
minimum_seed_size : int (optional, default 0)
Remove seed regions smaller than this size.
dams : bool (optional, default False)
Place a dam where two basins meet. Set this to True if you require
0-labeled boundaries between different regions.
override_skimage : bool (optional, default False)
skimage.morphology.watershed is used to implement the main part of the
algorithm when `dams=False`. Use this flag to use the separate pure
Python implementation instead.
show_progress : bool (optional, default False)
Show a cute little ASCII progress bar (using the progressbar package)
Returns
-------
ws : np.ndarray, same shape as `a`, int type.
The watershed transform of the input image.
"""
seeded = seeds is not None
sel = generate_binary_structure(a.ndim, connectivity)
# various keyword arguments operate by modifying the input image `a`.
# However, we operate on a copy of it called `b`, so that `a` can be used
# to break ties.
b = a
if not seeded:
seeds = regional_minima(a, connectivity)
if minimum_seed_size > 0:
seeds = remove_small_connected_components(seeds, minimum_seed_size)
seeds = relabel_from_one(seeds)[0]
if smooth_seeds:
seeds = binary_opening(seeds, sel)
if smooth_thresh > 0.0:
b = hminima(a, smooth_thresh)
if seeds.dtype == bool:
seeds = label(seeds, sel)[0]
if not override_skimage and not dams:
return skimage.morphology.watershed(b, seeds, sel, None, mask)
elif seeded:
b = impose_minima(a, seeds.astype(bool), connectivity)
levels = unique(b)
a = pad(a, a.max()+1)
b = pad(b, b.max()+1)
ar = a.ravel()
br = b.ravel()
ws = pad(seeds, 0)
wsr = ws.ravel()
neighbors = build_neighbors_array(a, connectivity)
level_pixels = build_levels_dict(b)
if show_progress: wspbar = ip.StandardProgressBar('Watershed...')
else: wspbar = ip.NoProgressBar()
for i, level in ip.with_progress(enumerate(levels),
pbar=wspbar, length=len(levels)):
idxs_adjacent_to_labels = queue([idx for idx in level_pixels[level] if
any(wsr[neighbors[idx]])])
while len(idxs_adjacent_to_labels) > 0:
idx = idxs_adjacent_to_labels.popleft()
if wsr[idx] > 0: continue # in case we already processed it
nidxs = neighbors[idx] # neighbors
lnidxs = nidxs[(wsr[nidxs] != 0).astype(bool)] # labeled neighbors
adj_labels = unique(wsr[lnidxs])
if len(adj_labels) == 1 or len(adj_labels) > 1 and not dams:
# assign a label
wsr[idx] = wsr[lnidxs][ar[lnidxs].argmin()]
idxs_adjacent_to_labels.extend(nidxs[((wsr[nidxs] == 0) *
(br[nidxs] == level)).astype(bool) ])
return juicy_center(ws)
def multiscale_regular_seeds(off_limits, num_seeds):
"""Return evenly-spaced seeds, but thinned in areas with no boundaries.
Parameters
----------
off_limits : array of bool, shape (M, N)
A binary array where `True` indicates the position of a boundary,
and thus where we don't want to place seeds.
num_seeds : int
The desired number of seeds.
Returns
-------
seeds : array of int, shape (M, N)
An array of seed points. Each seed gets its own integer ID,
starting from 1.
"""
seeds_binary = np.zeros(off_limits.shape, dtype=bool)
grid = util.regular_grid(off_limits.shape, num_seeds)
seeds_binary[grid] = True
seeds_binary &= ~off_limits
seeds_img = seeds_binary[grid]
thinned_equal = False
step = 2
while not thinned_equal:
thinned = _thin_seeds(seeds_img, step)
thinned_equal = np.all(seeds_img == thinned)
seeds_img = thinned
step *= 2
seeds_binary[grid] = seeds_img
return ndi.label(seeds_binary)[0]
def _thin_seeds(seeds_img, step):
out = np.copy(seeds_img)
m, n = seeds_img.shape
for r in range(0, m, step):
for c in range(0, n, step):
window = (slice(r, min(r + 5 * step // 2, m), step // 2),
slice(c, min(c + 5 * step // 2, n), step // 2))
if np.all(seeds_img[window]):
out[window][1::2, :] = False
out[window][:, 1::2] = False
return out
def multiscale_seed_sequence(prob, l1_threshold=0, grid_density=10):
npoints = ((prob.shape[1] // grid_density) *
(prob.shape[2] // grid_density))
seeds = np.zeros(prob.shape, dtype=int)
for seed, p in zip(seeds, prob):
hm = feature.hessian_matrix(p, sigma=3)
l1, l2 = feature.hessian_matrix_eigvals(*hm)
curvy = (l1 > l1_threshold)
seed[:] = multiscale_regular_seeds(curvy, npoints)
return seeds
def pipeline_compact_watershed(prob, *,
invert_prob=True,
l1_threshold=0,
grid_density=10,
compactness=0.01,
n_jobs=1):
if invert_prob:
prob = np.max(prob) - prob
seeds = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(multiscale_seed_sequence)(p[np.newaxis, :],
l1_threshold=l1_threshold,
grid_density=grid_density)
for p in prob)
seeds = np.reshape(seeds, prob.shape)
fragments = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(compact_watershed)(p, s, compactness=compactness)
for p, s in zip(prob, seeds)
)
fragments = np.array(fragments)
max_ids = fragments.max(axis=-1).max(axis=-1)
to_add = np.concatenate(([0], np.cumsum(max_ids)[:-1]))
fragments += to_add[:, np.newaxis, np.newaxis]
return fragments
def _euclid_dist(a, b):
return np.sqrt(np.sum((a - b) ** 2))
def compact_watershed(a, seeds, *, compactness=0.01, connectivity=1):
try:
a = np.copy(a)
a[np.nonzero(seeds)] = np.min(a)
result = skimage.morphology.watershed(a, seeds,
connectivity=connectivity,
compactness=compactness)
return result
except TypeError: # old version of skimage
import warnings
warnings.warn('skimage prior to 0.13; compact watershed will be slow.')
from .mergequeue import MergeQueue
visiting_queue = MergeQueue()
seeds = pad(seeds, 0).ravel()
seed_coords = np.flatnonzero(seeds)
visited = np.zeros(a.shape, dtype=bool)
visited = pad(visited, True).ravel()
ap = pad(a.astype(float), np.inf)
apr = ap.ravel()
neigh_sum = raveled_steps_to_neighbors(ap.shape, connectivity)
result = np.zeros_like(seeds)
for c in seed_coords:
visiting_queue.push([0, True, c, seeds[c],
np.unravel_index(c, ap.shape)])
while len(visiting_queue) > 0:
_, _, next_coord, next_label, next_origin = visiting_queue.pop()
if not visited[next_coord]:
visited[next_coord] = True
result[next_coord] = next_label
neighbor_coords = next_coord + neigh_sum
for coord in neighbor_coords:
if not visited[coord]:
full_coord = np.array(np.unravel_index(coord, ap.shape))
cost = (apr[coord] +
compactness*_euclid_dist(full_coord, next_origin))
visiting_queue.push([cost, True, coord, next_label,
next_origin])
return juicy_center(result.reshape(ap.shape))
def watershed_sequence(a, seeds=None, mask=None, axis=0, n_jobs=1, **kwargs):
"""Perform a watershed on a plane-by-plane basis.
See documentation for `watershed` for available kwargs.
The watershed algorithm views image intensity as "height" and finds flood
basins within it. These basins are then viewed as the different labeled
regions of an image.
This function performs watershed on an ndarray on each plane separately,
then concatenate the results.
Parameters
----------
a : numpy ndarray, arbitrary type or shape.
The input image on which to perform the watershed transform.
seeds : bool/int numpy.ndarray, same shape as a (optional, default None)
The seeds for the watershed.
mask : bool numpy.ndarray, same shape as a (optional, default None)
If provided, perform watershed only over voxels that are True in the
mask.
axis : int, {1, ..., a.ndim} (optional, default: 0)
Which axis defines the plane sequence. For example, if the input image
is 3D and axis=1, then the output will be the watershed on a[:, 0, :],
a[:, 1, :], a[:, 2, :], ... and so on.
n_jobs : int, optional
Use joblib to distribute each plane over given number of processing
cores. If -1, `multiprocessing.cpu_count` is used.
Returns
-------
ws : numpy ndarray, int type
The labeled watershed basins.
Other parameters
----------------
**kwargs : keyword arguments passed through to the `watershed` function.
"""
if axis != 0:
a = a.swapaxes(0, axis).copy()
if seeds is not None:
seeds = seeds.swapaxes(0, axis)
if mask is not None:
mask = mask.swapaxes(0, axis)
if seeds is None:
seeds = it.repeat(None)
if mask is None:
mask = it.repeat(None)
ws = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(watershed)(i, seeds=s, mask=m, **kwargs)
for i, s, m in zip(a, seeds, mask))
counts = list(map(np.max, ws[:-1]))
counts = np.concatenate((np.array([0]), counts))
counts = np.cumsum(counts)
for c, w in zip(counts, ws):
w += c
ws = np.concatenate([w[np.newaxis, ...] for w in ws], axis=0)
if axis != 0:
ws = ws.swapaxes(0, axis).copy()
return ws
def manual_split(probs, seg, body, seeds, connectivity=1, boundary_seeds=None):
"""Manually split a body from a segmentation using seeded watershed.
Input:
- probs: the probability of boundary in the volume given.
- seg: the current segmentation.
- body: the label to be split.
- seeds: the seeds for the splitting (should be just two labels).
[-connectivity: the connectivity to use for watershed.]
[-boundary_seeds: if not None, these locations become inf in probs.]
Value:
- the segmentation with the selected body split.
"""
struct = generate_binary_structure(seg.ndim, connectivity)
body_pixels = seg == body
bbox = find_objects(body_pixels)[0]
body_pixels = body_pixels[bbox]
body_boundary = binary_dilation(body_pixels, struct) - body_pixels
non_body_pixels = True - body_pixels - body_boundary
probs = probs.copy()[bbox]
probs[non_body_pixels] = probs.min()-1
if boundary_seeds is not None:
probs[boundary_seeds[bbox]] = probs.max()+1
probs[body_boundary] = probs.max()+1
seeds = label(seeds.astype(bool)[bbox], struct)[0]
outer_seed = seeds.max()+1 # should be 3
seeds[non_body_pixels] = outer_seed
seg_new = watershed(probs, seeds,
dams=(seg==0).any(), connectivity=connectivity, show_progress=True)
seg = seg.copy()
new_seeds = unique(seeds)[:-1]
for new_seed, new_label in zip(new_seeds, [0, body, seg.max()+1]):
seg[bbox][seg_new == new_seed] = new_label
return seg
def relabel_connected(im, connectivity=1):
"""Ensure all labels in `im` are connected.
Parameters
----------
im : array of int
The input label image.
connectivity : int in {1, ..., `im.ndim`}, optional
The connectivity used to determine if two voxels are neighbors.
Returns
-------
im_out : array of int
The relabeled image.
Examples
--------
>>> image = np.array([[1, 1, 2],
... [2, 1, 1]])
>>> im_out = relabel_connected(image)
>>> im_out
array([[1, 1, 2],
[3, 1, 1]])
"""
im_out = np.zeros_like(im)
contiguous_segments = np.empty_like(im)
structure = generate_binary_structure(im.ndim, connectivity)
curr_label = 0
labels = np.unique(im)
if labels[0] == 0:
labels = labels[1:]
for label in labels:
segment = (im == label)
n_segments = ndi.label(segment, structure,
output=contiguous_segments)
seg = segment.nonzero()
contiguous_segments[seg] += curr_label
im_out[seg] += contiguous_segments[seg]
curr_label += n_segments
return im_out
def smallest_int_dtype(number, signed=False, mindtype=None):
if number < 0: signed = True
if not signed:
if number <= iinfo(uint8).max:
return uint8
if number <= iinfo(uint16).max:
return uint16
if number <= iinfo(uint32).max:
return uint32
if number <= iinfo(uint64).max:
return uint64
else:
if iinfo(int8).min <= number <= iinfo(int8).max:
return int8
if iinfo(int16).min <= number <= iinfo(int16).max:
return int16
if iinfo(int32).min <= number <= iinfo(int32).max:
return int32
if iinfo(int64).min <= number <= iinfo(int64).max:
return int64
def _is_container(a):
try:
n = len(a)
return True
except TypeError:
return False
def pad(ar, vals, axes=None):
if ar.size == 0:
return ar
if axes is None:
axes = list(range(ar.ndim))
if not _is_container(vals):
vals = [vals]
if not _is_container(axes):
axes = [axes]
padding_thickness = len(vals)
newshape = array(ar.shape)
for ax in axes:
newshape[ax] += 2
vals = array(vals)
if ar.dtype == double or ar.dtype == float:
new_dtype = double
elif ar.dtype == bool:
new_dtype = bool
else:
maxval = max([vals.max(), ar.max()])
minval = min([vals.min(), ar.min()])
if abs(minval) > maxval:
signed = True
extremeval = minval
else:
if minval < 0:
signed = True
else:
signed = False
extremeval = maxval
new_dtype = max([smallest_int_dtype(extremeval, signed), ar.dtype])
ar2 = zeros(newshape, dtype=new_dtype)
center = ones(newshape, dtype=bool)
for ax in axes:
ar2.swapaxes(0,ax)[0,...] = vals[0]
ar2.swapaxes(0,ax)[-1,...] = vals[0]
center.swapaxes(0,ax)[0,...] = False
center.swapaxes(0,ax)[-1,...] = False
ar2[center] = ar.ravel()
if padding_thickness == 1:
return ar2
else:
return pad(ar2, vals[1:], axes)
def juicy_center(ar, skinsize=1):
for i in range(ar.ndim):
ar = ar.swapaxes(0,i)
ar = ar[skinsize:-skinsize]
ar = ar.swapaxes(0,i)
return ar.copy()
def surfaces(ar, skinsize=1):
s = []
for i in range(ar.ndim):
ar = ar.swapaxes(0, i)
s.append(ar[0:skinsize].copy())
s.append(ar[-skinsize:].copy())
ar = ar.swapaxes(0, i)
return s
def hollowed(ar, skinsize=1):
"""Return a copy of ar with the center zeroed out.
'skinsize' determines how thick of a crust to leave untouched.
"""
slices = (slice(skinsize, -skinsize),)*ar.ndim
ar_out = ar.copy()
ar_out[slices] = 0
return ar_out
def build_levels_dict(a):
d = defaultdict(list)
for loc,val in enumerate(a.ravel()):
d[val].append(loc)
return d
def build_neighbors_array(ar, connectivity=1):
idxs = arange(ar.size, dtype=uint32)
return get_neighbor_idxs(ar, idxs, connectivity)
def raveled_steps_to_neighbors(shape, connectivity=1):
"""Compute the stepsize along all axes for given connectivity and shape.
Parameters
----------
shape : tuple of int
The shape of the array along which we are stepping.
connectivity : int in {1, 2, ..., ``len(shape)``}
The number of orthogonal steps we can take to reach a "neighbor".
Returns
-------
steps : array of int64
The steps needed to get to neighbors from a particular raveled
index.
Examples
--------
>>> shape = (5, 4, 9)
>>> steps = raveled_steps_to_neighbors(shape)
>>> sorted(steps)
[-36, -9, -1, 1, 9, 36]
>>> steps2 = raveled_steps_to_neighbors(shape, 2)
>>> sorted(steps2)
[-45, -37, -36, -35, -27, -10, -9, -8, -1, 1, 8, 9, 10, 27, 35, 36, 37, 45]
"""
stepsizes = np.cumprod((1,) + shape[-1:0:-1])[::-1]
steps = []
steps.extend((stepsizes, -stepsizes))
for nhops in range(2, connectivity + 1):
prod = np.array(list(it.product(*([[1, -1]] * nhops))))
multisteps = np.array(list(it.combinations(stepsizes, nhops))).T
steps.append(np.dot(prod, multisteps).ravel())
return np.concatenate(steps).astype(np.int64)
def get_neighbor_idxs(ar, idxs, connectivity=1):
"""Return indices of neighboring voxels given array, indices, connectivity.
Parameters
----------
ar : ndarray
The array in which neighbors are to be found.
idxs : int or container of int
The indices for which to find neighbors.
connectivity : int in {1, 2, ..., ``ar.ndim``}
The number of orthogonal steps allowed to be considered a
neighbor.
Returns
-------
neighbor_idxs : 2D array, shape (nidxs, nneighbors)
The neighbor indices for each index passed.
Examples
--------
>>> ar = np.arange(16).reshape((4, 4))
>>> ar
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> get_neighbor_idxs(ar, [5, 10], connectivity=1)
array([[ 9, 6, 1, 4],
[14, 11, 6, 9]])
>>> get_neighbor_idxs(ar, 9, connectivity=2)
array([[13, 10, 5, 8, 14, 12, 6, 4]])
"""
if isscalar(idxs): # in case only a single idx is given
idxs = [idxs]
idxs = array(idxs) # in case a list or other array-like is given
steps = raveled_steps_to_neighbors(ar.shape, connectivity)
return idxs[:, np.newaxis] + steps
def orphans(a):
"""Find all the segments that do not touch the volume boundary.
This function differs from agglo.Rag.orphans() in that it does not use the
graph, but rather computes orphans directly from a volume.
Parameters
----------
a : array of int
A segmented volume.
Returns
-------
orph : 1D array of int
The IDs of any segments not touching the volume boundary.
Examples
--------
>>> segs = np.array([[1, 1, 1, 2],
... [1, 3, 4, 2],
... [1, 2, 2, 2]], int)
>>> orphans(segs)
array([3, 4])
>>> orphans(segs[:2])
array([], dtype=int64)
"""
return setdiff1d(
unique(a), unique(concatenate([s.ravel() for s in surfaces(a)]))
)
def non_traversing_segments(a):
"""Find segments that enter the volume but do not leave it elsewhere.
Parameters
----------
a : array of int
A segmented volume.
Returns
-------
nt : 1D array of int
The IDs of any segments not traversing the volume.
Examples
--------
>>> segs = np.array([[1, 2, 3, 3, 4],
... [1, 2, 2, 3, 4],
... [1, 5, 5, 3, 4],
... [1, 1, 5, 3, 4]], int)
>>> non_traversing_segments(segs)
array([1, 2, 4, 5])
"""
surface = hollowed(a)
surface_ccs = measure.label(surface) + 1
surface_ccs[surface == 0] = 0
idxs = flatnonzero(surface)
pairs = np.array(list(zip(surface.ravel()[idxs],
surface_ccs.ravel()[idxs])))
unique_pairs = util.unique_rows(pairs)
surface_singles = np.bincount(unique_pairs[:, 0]) == 1
nt = np.flatnonzero(surface_singles)
return nt
def damify(a, in_place=False):
"""Add dams to a borderless segmentation."""
if not in_place:
b = a.copy()
b[seg_to_bdry(a)] = 0
return b
def seg_to_bdry(seg, connectivity=1):
"""Given a borderless segmentation, return the boundary map."""
strel = generate_binary_structure(seg.ndim, connectivity)
return maximum_filter(seg, footprint=strel) != \
minimum_filter(seg, footprint=strel)
def undam(seg):
""" Assign zero-dams to nearest non-zero region. """
bdrymap = seg==0
k = distance_transform_cdt(bdrymap, return_indices=True)
ind = nonzero(bdrymap.ravel())[0]
closest_sub = concatenate([i.ravel()[:,newaxis] for i in k[1]],axis=1)
closest_sub = closest_sub[ind,:]
closest_ind = [
dot(bdrymap.strides, i)/bdrymap.itemsize for i in closest_sub]
sp = seg.shape
seg = seg.ravel()
seg[ind] = seg[closest_ind]
seg = reshape(seg, sp)
return seg
if __name__ == '__main__':
pass
| bsd-3-clause |
StuartLittlefair/astropy | examples/coordinates/plot_sgr-coordinate-frame.py | 3 | 10583 | # -*- coding: utf-8 -*-
r"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy-coordinates-design` and the
docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we
will define a coordinate system defined by the plane of orbit of the Sagittarius
Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr
coordinate system is often referred to in terms of two angular coordinates,
:math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
https://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs')
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian, frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr,
frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(
fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]")
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(
fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]")
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(
fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]")
plt.show()
| bsd-3-clause |
jarn0ld/gnuradio | gr-fec/python/fec/polar/channel_construction.py | 17 | 4537 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
[0] Erdal Arikan: 'Channel Polarization: A Method for Constructing Capacity-Achieving Codes for Symmetric Binary-Input Memoryless Channels', 2009
foundational paper for polar codes.
'''
from channel_construction_bec import calculate_bec_channel_capacities
from channel_construction_bec import design_snr_to_bec_eta
from channel_construction_bec import bhattacharyya_bounds
from channel_construction_awgn import tal_vardy_tpm_algorithm
from helper_functions import *
Z_PARAM_FIRST_HEADER_LINE = "Bhattacharyya parameters (Z-parameters) for a polar code"
def get_frozen_bit_indices_from_capacities(chan_caps, nfrozen):
indexes = np.array([], dtype=int)
while indexes.size < nfrozen:
index = np.argmin(chan_caps)
indexes = np.append(indexes, index)
chan_caps[index] = 2.0 # make absolutely sure value is out of range!
return np.sort(indexes)
def get_frozen_bit_indices_from_z_parameters(z_params, nfrozen):
indexes = np.array([], dtype=int)
while indexes.size < nfrozen:
index = np.argmax(z_params)
indexes = np.append(indexes, index)
z_params[index] = -1.0
return np.sort(indexes)
def get_bec_frozen_indices(nblock, kfrozen, eta):
bec_caps = calculate_bec_channel_capacities(eta, nblock)
positions = get_frozen_bit_indices_from_capacities(bec_caps, kfrozen)
return positions
def get_frozen_bit_mask(frozen_indices, block_size):
frozen_mask = np.zeros(block_size, dtype=int)
frozen_mask[frozen_indices] = 1
return frozen_mask
def frozen_bit_positions(block_size, info_size, design_snr=0.0):
if not design_snr > -1.5917:
print('bad value for design_nsr, must be > -1.5917! default=0.0')
design_snr = 0.0
eta = design_snr_to_bec_eta(design_snr)
return get_bec_frozen_indices(block_size, block_size - info_size, eta)
def generate_filename(block_size, design_snr, mu):
filename = "polar_code_z_parameters_N" + str(int(block_size))
filename += "_SNR" + str(float(design_snr)) + "_MU" + str(int(mu)) + ".polar"
return filename
def default_dir():
dir_def = "~/.gnuradio/polar/"
import os
path = os.path.expanduser(dir_def)
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
return path
def save_z_parameters(z_params, block_size, design_snr, mu, alt_construction_method='Tal-Vardy algorithm'):
path = default_dir()
filename = generate_filename(block_size, design_snr, mu)
header = Z_PARAM_FIRST_HEADER_LINE + "\n"
header += "Channel construction method: " + alt_construction_method + "\n"
header += "Parameters:\n"
header += "block_size=" + str(block_size) + "\n"
header += "design_snr=" + str(design_snr) + "\n"
header += "mu=" + str(mu)
np.savetxt(path + filename, z_params, header=header)
def load_z_parameters(block_size, design_snr, mu):
path = default_dir()
filename = generate_filename(block_size, design_snr, mu)
full_file = path + filename
import os
if not os.path.isfile(full_file):
z_params = tal_vardy_tpm_algorithm(block_size, design_snr, mu)
save_z_parameters(z_params, block_size, design_snr, mu)
z_params = np.loadtxt(full_file)
return z_params
def main():
np.set_printoptions(precision=3, linewidth=150)
print 'channel construction Bhattacharyya bounds by Arikan'
n = 10
m = 2 ** n
k = m // 2
design_snr = 0.0
mu = 32
z_params = load_z_parameters(m, design_snr, mu)
z_bounds = bhattacharyya_bounds(design_snr, m)
print(z_params[-10:])
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.plot(z_bounds)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
jreback/pandas | pandas/compat/_optional.py | 1 | 3909 | import distutils.version
import importlib
import types
import warnings
# Update install.rst when updating versions!
VERSIONS = {
"bs4": "4.6.0",
"bottleneck": "1.2.1",
"fsspec": "0.7.4",
"fastparquet": "0.3.2",
"gcsfs": "0.6.0",
"lxml.etree": "4.3.0",
"matplotlib": "2.2.3",
"numexpr": "2.6.8",
"odfpy": "1.3.0",
"openpyxl": "2.5.7",
"pandas_gbq": "0.12.0",
"pyarrow": "0.15.0",
"pytest": "5.0.1",
"pyxlsb": "1.0.6",
"s3fs": "0.4.0",
"scipy": "1.2.0",
"sqlalchemy": "1.2.8",
"tables": "3.5.1",
"tabulate": "0.8.7",
"xarray": "0.12.3",
"xlrd": "1.2.0",
"xlwt": "1.3.0",
"xlsxwriter": "1.0.2",
"numba": "0.46.0",
}
# A mapping from import name to package name (on PyPI) for packages where
# these two names are different.
INSTALL_MAPPING = {
"bs4": "beautifulsoup4",
"bottleneck": "Bottleneck",
"lxml.etree": "lxml",
"odf": "odfpy",
"pandas_gbq": "pandas-gbq",
"sqlalchemy": "SQLAlchemy",
"jinja2": "Jinja2",
}
def _get_version(module: types.ModuleType) -> str:
version = getattr(module, "__version__", None)
if version is None:
# xlrd uses a capitalized attribute name
version = getattr(module, "__VERSION__", None)
if version is None:
raise ImportError(f"Can't determine version for {module.__name__}")
return version
def import_optional_dependency(
name: str, extra: str = "", raise_on_missing: bool = True, on_version: str = "raise"
):
"""
Import an optional dependency.
By default, if a dependency is missing an ImportError with a nice
message will be raised. If a dependency is present, but too old,
we raise.
Parameters
----------
name : str
The module name. This should be top-level only, so that the
version may be checked.
extra : str
Additional text to include in the ImportError message.
raise_on_missing : bool, default True
Whether to raise if the optional dependency is not found.
When False and the module is not present, None is returned.
on_version : str {'raise', 'warn'}
What to do when a dependency's version is too old.
* raise : Raise an ImportError
* warn : Warn that the version is too old. Returns None
* ignore: Return the module, even if the version is too old.
It's expected that users validate the version locally when
using ``on_version="ignore"`` (see. ``io/html.py``)
Returns
-------
maybe_module : Optional[ModuleType]
The imported module, when found and the version is correct.
None is returned when the package is not found and `raise_on_missing`
is False, or when the package's version is too old and `on_version`
is ``'warn'``.
"""
package_name = INSTALL_MAPPING.get(name)
install_name = package_name if package_name is not None else name
msg = (
f"Missing optional dependency '{install_name}'. {extra} "
f"Use pip or conda to install {install_name}."
)
try:
module = importlib.import_module(name)
except ImportError:
if raise_on_missing:
raise ImportError(msg) from None
else:
return None
minimum_version = VERSIONS.get(name)
if minimum_version:
version = _get_version(module)
if distutils.version.LooseVersion(version) < minimum_version:
assert on_version in {"warn", "raise", "ignore"}
msg = (
f"Pandas requires version '{minimum_version}' or newer of '{name}' "
f"(version '{version}' currently installed)."
)
if on_version == "warn":
warnings.warn(msg, UserWarning)
return None
elif on_version == "raise":
raise ImportError(msg)
return module
| bsd-3-clause |
evidation-health/bokeh | bokeh/compat/mplexporter/renderers/base.py | 44 | 14355 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
rsivapr/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 16 | 5067 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.linear_model import logistic
from sklearn import datasets
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = datasets.load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
check_predictions(logistic.LogisticRegression(random_state=0), X, Y1)
check_predictions(logistic.LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(logistic.LogisticRegression(C=100, random_state=0),
X, Y1)
check_predictions(logistic.LogisticRegression(C=100, random_state=0),
X_sp, Y1)
check_predictions(logistic.LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(logistic.LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, logistic.LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(logistic.LogisticRegression(C=10), X, Y2)
check_predictions(logistic.LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
"""Test logistic regression with the iris dataset"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = logistic.LogisticRegression(C=len(iris.data)).fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_sparsify():
"""Test sparsify and densify members."""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = logistic.LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = logistic.LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
"""Test that we can write to coef_ and intercept_"""
#rng = np.random.RandomState(0)
#X = rng.random_sample((5, 10))
#y = np.ones(X.shape[0])
clf = logistic.LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
logistic.LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_liblinear_random_state():
X, y = datasets.make_classification(n_samples=20)
lr1 = logistic.LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = logistic.LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
| bsd-3-clause |
AlexRobson/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 83 | 34544 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
chintak/scikit-image | doc/examples/plot_peak_local_max.py | 2 | 1456 | """
====================
Finding local maxima
====================
The ``peak_local_max`` function returns the coordinates of local peaks (maxima)
in an image. A maximum filter is used for finding local maxima. This operation
dilates the original image and merges neighboring local maxima closer than the
size of the dilation. Locations where the original image is equal to the
dilated image are returned as local maxima.
"""
from scipy import ndimage
import matplotlib.pyplot as plt
from skimage.feature import peak_local_max
from skimage import data, img_as_float
im = img_as_float(data.coins())
# image_max is the dilation of im with a 20*20 structuring element
# It is used within peak_local_max function
image_max = ndimage.maximum_filter(im, size=20, mode='constant')
# Comparison between image_max and im to find the coordinates of local maxima
coordinates = peak_local_max(im, min_distance=20)
# display results
plt.figure(figsize=(8, 3))
plt.subplot(131)
plt.imshow(im, cmap=plt.cm.gray)
plt.axis('off')
plt.title('Original')
plt.subplot(132)
plt.imshow(image_max, cmap=plt.cm.gray)
plt.axis('off')
plt.title('Maximum filter')
plt.subplot(133)
plt.imshow(im, cmap=plt.cm.gray)
plt.autoscale(False)
plt.plot([p[1] for p in coordinates], [p[0] for p in coordinates], 'r.')
plt.axis('off')
plt.title('Peak local max')
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
| bsd-3-clause |
kebarr/Geist | geist/pyplot.py | 1 | 2739 | from itertools import cycle
from time import sleep
from matplotlib.pyplot import cm, figure, gcf, imshow
from geist.colour import rgb_to_hsv, hsv
from geist.finders import Location
import numpy
class Viewer(object):
def __init__(self, gui, repo):
self._gui = gui
self._repo = repo
def save(self, name, force=False):
self._save(name, self.visible(), force=force)
def _save(self, name, image_to_save, force=False):
if name in self._repo.entries and force==False:
raise KeyError(
name + ' already exists, to overwrite, pass force=True')
self._repo[name] = image_to_save
def visible(self):
a = gcf().get_axes()[0]
x1, x2 = a.get_xbound()
y1, y2 = a.get_ybound()
x1 = int(x1 + 0.5)
x2 = int(numpy.ceil(x2 + 0.5))
y1 = int(y1 + 0.5)
y2 = int(numpy.ceil(y2 + 0.5))
return numpy.array(a.get_images()[-1].get_array()[y1:y2, x1:x2])
def location(self):
a = gcf().get_axes()[0]
x1 = int(x1 + 0.5)
x2 = int(numpy.ceil(x2 + 0.5))
y1 = int(y1 + 0.5)
y2 = int(numpy.ceil(y2 + 0.5))
return Location(x1, y1, x2 - x1, y2 - y1, image=numpy.array(a.get_images()[-1].get_array()))
def show_capture(self, wait_time=0):
sleep(wait_time)
for location in self._gui.capture_locations():
figure()
imshow(location.image, interpolation='none')
def show_repo(self, name, newfig=False):
if newfig:
figure()
imshow(self._repo[name].image, cm.Greys_r, interpolation='none')
def show_image(self, image):
imshow(image, interpolation='none')
def show_found(self, finder):
for location in self._gui.capture_locations():
figure()
image = numpy.copy(location.image)
channel = cycle([0, 1, 2])
for l, c in zip(finder.find(location), channel):
image[l.y:l.y + l.h, l.x:l.x + l.w, :] *= 0.75
image[l.y:l.y + l.h, l.x:l.x + l.w, c] = 255
imshow(image, interpolation='none')
def _get_colour(self, numpy_array):
hue, sat, val = rgb_to_hsv(numpy_array)
hmin = hue.min()
hmax = hue.max()
smin = sat.min()
smax = sat.max()
vmin = val.min()
vmax = val.max()
print hmin, hmax, smin, smax, vmin, vmax
return hsv(lambda h, s, v: (
(h >= hmin) & (h <= hmax)) &
((s >= smin) & (s <= smax)) &
((v >= vmin) & (v <= vmax)))
def get_colour(self):
return self._get_colour(self.visible())
| mit |
dataewan/deep-learning | face_generation/dlnd_face_generation.py | 1 | 17916 |
# coding: utf-8
# # Face Generation
# In this project, you'll use generative adversarial networks to generate new images of faces.
# ### Get the Data
# You'll be using two datasets in this project:
# - MNIST
# - CelebA
#
# Since the celebA dataset is complex and you're doing GANs in a project for the first time, we want you to test your neural network on MNIST before CelebA. Running the GANs on MNIST will allow you to see how well your model trains sooner.
#
# If you're using [FloydHub](https://www.floydhub.com/), set `data_dir` to "/input" and use the [FloydHub data ID](http://docs.floydhub.com/home/using_datasets/) "R5KrjnANiKVhLWAkpXhNBe".
data_dir = './data'
# FloydHub - Use with data ID "R5KrjnANiKVhLWAkpXhNBe"
data_dir = '/input'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
helper.download_extract('mnist', data_dir)
helper.download_extract('celeba', data_dir)
# ## Explore the Data
# ### MNIST
# As you're aware, the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset contains images of handwritten digits. You can view the first number of examples by changing `show_n_images`.
show_n_images = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
get_ipython().magic('matplotlib inline')
import os
from glob import glob
from matplotlib import pyplot
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
pyplot.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')
# ### CelebA
# The [CelebFaces Attributes Dataset (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) dataset contains over 200,000 celebrity images with annotations. Since you're going to be generating faces, you won't need the annotations. You can view the first number of examples by changing `show_n_images`.
show_n_images = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB')
pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB'))
# ## Preprocess the Data
# Since the project's main focus is on building the GANs, we'll preprocess the data for you. The values of the MNIST and CelebA dataset will be in the range of -0.5 to 0.5 of 28x28 dimensional images. The CelebA images will be cropped to remove parts of the image that don't include a face, then resized down to 28x28.
#
# The MNIST images are black and white images with a single [color channel](https://en.wikipedia.org/wiki/Channel_(digital_image%29) while the CelebA images have [3 color channels (RGB color channel)](https://en.wikipedia.org/wiki/Channel_(digital_image%29#RGB_Images).
# ## Build the Neural Network
# You'll build the components necessary to build a GANs by implementing the following functions below:
# - `model_inputs`
# - `discriminator`
# - `generator`
# - `model_loss`
# - `model_opt`
# - `train`
#
# ### Check the Version of TensorFlow and Access to GPU
# This will check to make sure you have the correct version of TensorFlow and access to a GPU
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# ### Input
# Implement the `model_inputs` function to create TF Placeholders for the Neural Network. It should create the following placeholders:
# - Real input images placeholder with rank 4 using `image_width`, `image_height`, and `image_channels`.
# - Z input placeholder with rank 2 using `z_dim`.
# - Learning rate placeholder with rank 0.
#
# Return the placeholders in the following the tuple (tensor of real input images, tensor of z data)
import problem_unittests as tests
def model_inputs(image_width, image_height, image_channels, z_dim):
"""
Create the model inputs
:param image_width: The input image width
:param image_height: The input image height
:param image_channels: The number of image channels
:param z_dim: The dimension of Z
:return: Tuple of (tensor of real input images, tensor of z data, learning rate)
"""
# TODO: Implement Function
inputs_real = tf.placeholder(tf.float32, shape=(None, image_width, image_height, image_channels), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return inputs_real, inputs_z, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
# ### Discriminator
# Implement `discriminator` to create a discriminator neural network that discriminates on `images`. This function should be able to reuse the variabes in the neural network. Use [`tf.variable_scope`](https://www.tensorflow.org/api_docs/python/tf/variable_scope) with a scope name of "discriminator" to allow the variables to be reused. The function should return a tuple of (tensor output of the discriminator, tensor logits of the discriminator).
def discriminator(images, reuse=False):
"""
Create the discriminator network
:param image: Tensor of input image(s)
:param reuse: Boolean if the weights should be reused
:return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator)
"""
alpha = 0.2
keep_prob=0.8
with tf.variable_scope('discriminator', reuse=reuse):
# using 4 layer network as in DCGAN Paper
# Conv 1
conv1 = tf.layers.conv2d(images, 64, 5, 2, 'SAME', kernel_initializer=tf.contrib.layers.xavier_initializer())
lrelu1 = tf.maximum(alpha * conv1, conv1)
# Conv 2
conv2 = tf.layers.conv2d(lrelu1, 128, 5, 2, 'SAME', kernel_initializer=tf.contrib.layers.xavier_initializer())
batch_norm2 = tf.layers.batch_normalization(conv2, training=True)
lrelu2 = tf.maximum(alpha * batch_norm2, batch_norm2)
drop2 = tf.nn.dropout(lrelu2, keep_prob=keep_prob)
# Conv 3
conv3 = tf.layers.conv2d(drop2, 256, 5, 1, 'SAME', kernel_initializer=tf.contrib.layers.xavier_initializer())
batch_norm3 = tf.layers.batch_normalization(conv3, training=True)
lrelu3 = tf.maximum(alpha * batch_norm3, batch_norm3)
drop3 = tf.nn.dropout(lrelu3, keep_prob=keep_prob)
# Conv 4
conv4 = tf.layers.conv2d(drop3, 512, 5, 1, 'SAME', kernel_initializer=tf.contrib.layers.xavier_initializer())
batch_norm4 = tf.layers.batch_normalization(conv4, training=True)
lrelu4 = tf.maximum(alpha * batch_norm4, batch_norm4)
drop4 = tf.nn.dropout(lrelu4, keep_prob=keep_prob)
# Flatten
flat = tf.reshape(drop4, (-1, 7*7*512))
# Logits
logits = tf.layers.dense(flat, 1)
# Output
out = tf.sigmoid(logits)
return out, logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_discriminator(discriminator, tf)
# ### Generator
# Implement `generator` to generate an image using `z`. This function should be able to reuse the variabes in the neural network. Use [`tf.variable_scope`](https://www.tensorflow.org/api_docs/python/tf/variable_scope) with a scope name of "generator" to allow the variables to be reused. The function should return the generated 28 x 28 x `out_channel_dim` images.
def generator(z, out_channel_dim, is_train=True):
"""
Create the generator network
:param z: Input z
:param out_channel_dim: The number of channels in the output image
:param is_train: Boolean if generator is being used for training
:return: The tensor output of the generator
"""
alpha = 0.2
keep_prob=0.8
with tf.variable_scope('generator', reuse=False if is_train==True else True):
# Fully connected
fc1 = tf.layers.dense(z, 7*7*512)
fc1 = tf.reshape(fc1, (-1, 7, 7, 512))
fc1 = tf.maximum(alpha*fc1, fc1)
# Starting Conv Transpose Stack
deconv2 = tf.layers.conv2d_transpose(fc1, 256, 3, 1, 'SAME', kernel_initializer=tf.contrib.layers.xavier_initializer())
batch_norm2 = tf.layers.batch_normalization(deconv2, training=is_train)
lrelu2 = tf.maximum(alpha * batch_norm2, batch_norm2)
drop2 = tf.nn.dropout(lrelu2, keep_prob=keep_prob)
deconv3 = tf.layers.conv2d_transpose(drop2, 128, 3, 1, 'SAME', kernel_initializer=tf.contrib.layers.xavier_initializer())
batch_norm3 = tf.layers.batch_normalization(deconv3, training=is_train)
lrelu3 = tf.maximum(alpha * batch_norm3, batch_norm3)
drop3 = tf.nn.dropout(lrelu3, keep_prob=keep_prob)
deconv4 = tf.layers.conv2d_transpose(drop3, 64, 3, 2, 'SAME', kernel_initializer=tf.contrib.layers.xavier_initializer())
batch_norm4 = tf.layers.batch_normalization(deconv4, training=is_train)
lrelu4 = tf.maximum(alpha * batch_norm4, batch_norm4)
drop4 = tf.nn.dropout(lrelu4, keep_prob=keep_prob)
# Logits
logits = tf.layers.conv2d_transpose(drop4, out_channel_dim, 3, 2, 'SAME', kernel_initializer=tf.contrib.layers.xavier_initializer())
# Output
out = tf.tanh(logits)
return out
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_generator(generator, tf)
# ### Loss
# Implement `model_loss` to build the GANs for training and calculate the loss. The function should return a tuple of (discriminator loss, generator loss). Use the following functions you implemented:
# - `discriminator(images, reuse=False)`
# - `generator(z, out_channel_dim, is_train=True)`
def model_loss(input_real, input_z, out_channel_dim):
"""
Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z input
:param out_channel_dim: The number of channels in the output image
:return: A tuple of (discriminator loss, generator loss)
"""
g_model = generator(input_z, out_channel_dim)
d_model_real, d_logits_real = discriminator(input_real)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real) * 0.9)
)
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake))
)
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake))
)
return d_loss, g_loss
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_loss(model_loss)
# ### Optimization
# Implement `model_opt` to create the optimization operations for the GANs. Use [`tf.trainable_variables`](https://www.tensorflow.org/api_docs/python/tf/trainable_variables) to get all the trainable variables. Filter the variables with names that are in the discriminator and generator scope names. The function should return a tuple of (discriminator training operation, generator training operation).
def model_opt(d_loss, g_loss, learning_rate, beta1):
"""
Get optimization operations
:param d_loss: Discriminator loss Tensor
:param g_loss: Generator loss Tensor
:param learning_rate: Learning Rate Placeholder
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:return: A tuple of (discriminator training operation, generator training operation)
"""
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_vars = [var for var in t_vars if var.name.startswith('generator')]
# Optimize
d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='generator')):
g_train_opt = tf.train.AdamOptimizer(learning_rate = learning_rate,beta1 = beta1).minimize(g_loss, var_list = g_vars)
return d_train_opt, g_train_opt
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_opt(model_opt, tf)
# ## Neural Network Training
# ### Show Output
# Use this function to show the current output of the generator during training. It will help you determine how well the GANs is training.
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):
"""
Show example output for the generator
:param sess: TensorFlow session
:param n_images: Number of Images to display
:param input_z: Input Z Tensor
:param out_channel_dim: The number of channels in the output image
:param image_mode: The mode to use for images ("RGB" or "L")
"""
cmap = None if image_mode == 'RGB' else 'gray'
z_dim = input_z.get_shape().as_list()[-1]
example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])
samples = sess.run(
generator(input_z, out_channel_dim, False),
feed_dict={input_z: example_z})
images_grid = helper.images_square_grid(samples, image_mode)
pyplot.imshow(images_grid, cmap=cmap)
pyplot.show()
# ### Train
# Implement `train` to build and train the GANs. Use the following functions you implemented:
# - `model_inputs(image_width, image_height, image_channels, z_dim)`
# - `model_loss(input_real, input_z, out_channel_dim)`
# - `model_opt(d_loss, g_loss, learning_rate, beta1)`
#
# Use the `show_generator_output` to show `generator` output while you train. Running `show_generator_output` for every batch will drastically increase training time and increase the size of the notebook. It's recommended to print the `generator` output every 100 batches.
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode):
"""
Train the GAN
:param epoch_count: Number of epochs
:param batch_size: Batch Size
:param z_dim: Z dimension
:param learning_rate: Learning Rate
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:param get_batches: Function to get batches
:param data_shape: Shape of the data
:param data_image_mode: The image mode to use for images ("RGB" or "L")
"""
tf.reset_default_graph()
input_real, input_z, _ = model_inputs(data_shape[1], data_shape[2], data_shape[3], z_dim)
d_loss, g_loss = model_loss(input_real, input_z, data_shape[3])
d_opt, g_opt = model_opt(d_loss, g_loss, learning_rate, beta1)
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epoch_count):
for batch_images in get_batches(batch_size):
batch_images = batch_images * 2
steps += 1
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))
_ = sess.run(d_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_opt, feed_dict={input_z: batch_z})
if steps % 100 == 0:
train_loss_d = d_loss.eval({input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(epoch_i+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
_ = show_generator_output(sess, 1, input_z, data_shape[3], data_image_mode)
# ### MNIST
# Test your GANs architecture on MNIST. After 2 epochs, the GANs should be able to generate images that look like handwritten digits. Make sure the loss of the generator is lower than the loss of the discriminator or close to 0.
batch_size = 32
z_dim = 100
learning_rate = 0.0002
beta1 = 0.5
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
epochs = 2
mnist_dataset = helper.Dataset('mnist', glob(os.path.join(data_dir, 'mnist/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, mnist_dataset.get_batches,
mnist_dataset.shape, mnist_dataset.image_mode)
# ### CelebA
# Run your GANs on CelebA. It will take around 20 minutes on the average GPU to run one epoch. You can run the whole epoch or stop when it starts to generate realistic faces.
batch_size = 64
z_dim = 100
learning_rate = 0.0002
beta1 = 0.5
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
epochs = 1
celeba_dataset = helper.Dataset('celeba', glob(os.path.join(data_dir, 'img_align_celeba/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches,
celeba_dataset.shape, celeba_dataset.image_mode)
# ### Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_face_generation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
print("Done")
| mit |
quantopian/empyrical | empyrical/tests/test_perf_attrib.py | 1 | 8950 | import numpy as np
import pandas as pd
import unittest
from empyrical.perf_attrib import perf_attrib
class PerfAttribTestCase(unittest.TestCase):
def test_perf_attrib_simple(self):
start_date = '2017-01-01'
periods = 2
dts = pd.date_range(start_date, periods=periods)
dts.name = 'dt'
tickers = ['stock1', 'stock2']
styles = ['risk_factor1', 'risk_factor2']
returns = pd.Series(data=[0.1, 0.1], index=dts)
factor_returns = pd.DataFrame(
columns=styles,
index=dts,
data={'risk_factor1': [.1, .1],
'risk_factor2': [.1, .1]}
)
index = pd.MultiIndex.from_product(
[dts, tickers], names=['dt', 'ticker'])
positions = pd.Series([0.2857142857142857, 0.7142857142857143,
0.2857142857142857, 0.7142857142857143],
index=index)
factor_loadings = pd.DataFrame(
columns=styles,
index=index,
data={'risk_factor1': [0.25, 0.25, 0.25, 0.25],
'risk_factor2': [0.25, 0.25, 0.25, 0.25]}
)
expected_perf_attrib_output = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2', 'total_returns',
'common_returns', 'specific_returns',
'tilt_returns', 'timing_returns'],
data={'risk_factor1': [0.025, 0.025],
'risk_factor2': [0.025, 0.025],
'common_returns': [0.05, 0.05],
'specific_returns': [0.05, 0.05],
'tilt_returns': [0.05, 0.05],
'timing_returns': [0.0, 0.0],
'total_returns': returns}
)
expected_exposures_portfolio = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2'],
data={'risk_factor1': [0.25, 0.25],
'risk_factor2': [0.25, 0.25]}
)
exposures_portfolio, perf_attrib_output = perf_attrib(returns,
positions,
factor_returns,
factor_loadings)
pd.util.testing.assert_frame_equal(expected_perf_attrib_output,
perf_attrib_output)
pd.util.testing.assert_frame_equal(expected_exposures_portfolio,
exposures_portfolio)
# test long and short positions
positions = pd.Series([0.5, -0.5, 0.5, -0.5], index=index)
exposures_portfolio, perf_attrib_output = perf_attrib(returns,
positions,
factor_returns,
factor_loadings)
expected_perf_attrib_output = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2', 'total_returns',
'common_returns', 'specific_returns',
'tilt_returns', 'timing_returns'],
data={'risk_factor1': [0.0, 0.0],
'risk_factor2': [0.0, 0.0],
'common_returns': [0.0, 0.0],
'specific_returns': [0.1, 0.1],
'tilt_returns': [0.0, 0.0],
'timing_returns': [0.0, 0.0],
'total_returns': returns}
)
expected_exposures_portfolio = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2'],
data={'risk_factor1': [0.0, 0.0],
'risk_factor2': [0.0, 0.0]}
)
pd.util.testing.assert_frame_equal(expected_perf_attrib_output,
perf_attrib_output)
pd.util.testing.assert_frame_equal(expected_exposures_portfolio,
exposures_portfolio)
# test long and short positions with tilt exposure
positions = pd.Series([1.0, -0.5, 1.0, -0.5], index=index)
exposures_portfolio, perf_attrib_output = perf_attrib(returns,
positions,
factor_returns,
factor_loadings)
expected_perf_attrib_output = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2', 'total_returns',
'common_returns', 'specific_returns',
'tilt_returns', 'timing_returns'],
data={'risk_factor1': [0.0125, 0.0125],
'risk_factor2': [0.0125, 0.0125],
'common_returns': [0.025, 0.025],
'specific_returns': [0.075, 0.075],
'tilt_returns': [0.025, 0.025],
'timing_returns': [0.0, 0.0],
'total_returns': returns}
)
expected_exposures_portfolio = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2'],
data={'risk_factor1': [0.125, 0.125],
'risk_factor2': [0.125, 0.125]}
)
pd.util.testing.assert_frame_equal(expected_perf_attrib_output,
perf_attrib_output)
pd.util.testing.assert_frame_equal(expected_exposures_portfolio,
exposures_portfolio)
def test_perf_attrib_regression(self):
positions = pd.read_csv('empyrical/tests/test_data/positions.csv',
index_col=0, parse_dates=True)
positions.columns = [int(col) if col != 'cash' else col
for col in positions.columns]
positions = positions.divide(positions.sum(axis='columns'),
axis='rows')
positions = positions.drop('cash', axis='columns').stack()
returns = pd.read_csv('empyrical/tests/test_data/returns.csv',
index_col=0, parse_dates=True,
header=None, squeeze=True)
factor_loadings = pd.read_csv(
'empyrical/tests/test_data/factor_loadings.csv',
index_col=[0, 1], parse_dates=True
)
factor_returns = pd.read_csv(
'empyrical/tests/test_data/factor_returns.csv',
index_col=0, parse_dates=True
)
residuals = pd.read_csv('empyrical/tests/test_data/residuals.csv',
index_col=0, parse_dates=True)
residuals.columns = [int(col) for col in residuals.columns]
intercepts = pd.read_csv('empyrical/tests/test_data/intercepts.csv',
index_col=0, header=None, squeeze=True)
risk_exposures_portfolio, perf_attrib_output = perf_attrib(
returns,
positions,
factor_returns,
factor_loadings,
)
specific_returns = perf_attrib_output['specific_returns']
common_returns = perf_attrib_output['common_returns']
combined_returns = specific_returns + common_returns
# since all returns are factor returns, common returns should be
# equivalent to total returns, and specific returns should be 0
pd.util.testing.assert_series_equal(returns,
common_returns,
check_names=False)
self.assertTrue(np.isclose(specific_returns, 0).all())
# specific and common returns combined should equal total returns
pd.util.testing.assert_series_equal(returns,
combined_returns,
check_names=False)
# check that residuals + intercepts = specific returns
self.assertTrue(np.isclose((residuals + intercepts), 0).all())
# check that exposure * factor returns = common returns
expected_common_returns = risk_exposures_portfolio.multiply(
factor_returns, axis='rows'
).sum(axis='columns')
pd.util.testing.assert_series_equal(expected_common_returns,
common_returns,
check_names=False)
# since factor loadings are ones, portfolio risk exposures
# should be ones
pd.util.testing.assert_frame_equal(
risk_exposures_portfolio,
pd.DataFrame(np.ones_like(risk_exposures_portfolio),
index=risk_exposures_portfolio.index,
columns=risk_exposures_portfolio.columns)
)
| apache-2.0 |
Quantipy/quantipy | quantipy/core/builds/powerpoint/visual_editor.py | 1 | 33091 | # encoding: utf-8
'''
@author: Majeed.sahebzadha
'''
from __future__ import print_function, unicode_literals
import numpy as np
import pandas as pd
import re
import time
import os
from collections import OrderedDict
import json
import pickle
from pptx import Presentation
from pptx.chart.data import ChartData
from add_shapes import *
from transformations import *
from os.path import (
basename,
dirname,
split
)
from pptx.enum.chart import (
XL_CHART_TYPE,
XL_LABEL_POSITION,
XL_LEGEND_POSITION,
XL_TICK_MARK,
XL_TICK_LABEL_POSITION
)
from pptx.util import (
Emu,
Pt,
Cm,
Inches
)
from pptx.enum.dml import (
MSO_THEME_COLOR,
MSO_COLOR_TYPE,
MSO_FILL
)
from pptx.enum.text import (
PP_ALIGN,
MSO_AUTO_SIZE,
MSO_ANCHOR
)
pd.set_option('display.expand_frame_repr', False)
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def file_exists(file_name):
''' check if file exists '''
return os.path.isfile(file_name)
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def pickle_file(obj_to_pickle, file_name):
if ".pickle" not in file_name:
file_name = "%s.pickle" % file_name
with open(file_name, 'wb') as handle:
pickle.dump(obj_to_pickle, handle)
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def load_pickled_file(file_name):
if ".pickle" not in file_name:
file_name = "%s.pickle" % file_name
with open(file_name, 'rb') as handle:
picked_obj = pickle.load(handle)
return picked_obj
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def save_json(obj, json_path):
''' Saves obj as a json text file at json_path
'''
from decoder_ring import as_ascii
print("Saving json: {f}".format(f=json_path))
obj = as_ascii(obj, control=False, extended=True, encoding='UTF-8')
with open(json_path, 'w') as f:
json.dump(obj, f)
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def rename_duplicate_shape_names(pres_path, overwrite=True):
'''
Ensures all shapes have a unique name.
Only renames duplicates.
Compares shape names one slide at a time.
'''
file_name = basename(pres_path).split('.')[0]
file_path = dirname(pres_path)
prs = Presentation(pres_path)
for slide in prs.slides:
shape_names = []
for shape in slide.shapes:
shape_names.append(shape.name)
renamed_shapes = [x + "_" + str(i) if shape_names.count(x)>1 else x for i, x in enumerate(shape_names)]
for s_idx, shape in enumerate(slide.shapes):
shape.name = renamed_shapes[s_idx]
if overwrite:
prs.save('{pres_path}\\{pres_name}.pptx'.format(
pres_path=file_path,
pres_name=file_name))
else:
prs.save('{pres_path}\\{pres_name}_edited.pptx'.format(
pres_path=file_path,
pres_name=file_name))
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def read_pptx(pres_path, slide_num=[], save_as_json=True):
'''
Iterates through an existing PPTX and prints info re slide and shapes.
param: pres_path - full path of target file
param: slide_num - list
parem: save_as_json - boolean
example useage:
*read_pptx(pres)
*read_pptx(pres, [20,25,15], False)
'''
if not isinstance(slide_num, list):
slide_num = [slide_num]
prs = Presentation(pres_path)
file_name = os.path.basename(pres_path).split('.')[0]
pptx_tree = OrderedDict()
pptx_tree[file_name] = OrderedDict()
pptx_tree[file_name]['slides'] = OrderedDict()
print('Analysing PPTX content...\n')
for i, sld in enumerate(prs.slides, start=1):
if slide_num:
if i in slide_num:
slide_number = str(i)
pptx_tree[file_name]['slides'][slide_number] = OrderedDict()
print('{indent:>5}slide layout name : {sld_layout_name}\n'.
format(
indent='',
sld_layout_name=sld.slide_layout.name))
pptx_tree[file_name]['slides'][slide_number]['slide layout'] = OrderedDict()
slide_layout_name = str(sld.slide_layout.name)
pptx_tree[file_name]['slides'][slide_number]['slide layout']['name'] = slide_layout_name
pptx_tree[file_name]['slides'][slide_number]['shapes'] = OrderedDict()
for x, shp in enumerate(sld.shapes):
print('{indent:>10}shape index - {x}'.
format(
indent='',
x=x))
shape_number = str(x)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number] = OrderedDict()
print('{indent:>15}shape name - {shape_name}'.
format(
indent='',
shape_name=shp.name))
shape_name = str(shp.name)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape name'] = shape_name
print('{indent:>15}shape type - {shape_type}'.
format(
indent='',
shape_type=shp.shape_type))
shape_type = str(shp.shape_type)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape type'] = shape_type
if str(shp.shape_type) == 'PLACEHOLDER (14)':
print('{indent1:>15}placeholder idx - {placeholder_idx},\n'
'{indent2:>15}placeholder type - {placeholder_type}'.
format(
indent1='',
indent2='',
placeholder_idx=shp.placeholder_format.idx,
placeholder_type=shp.placeholder_format.type))
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['placeholder idx'] = str(shp.placeholder_format.idx)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['placeholder type'] = str(shp.placeholder_format.type)
print('{indent:>15}shape dimensions - '
'left: {shape_left}, top: {shape_top}, '
'height: {shape_height}, width: {shape_width}\n'.
format(
indent='',
shape_left=shp.left,
shape_height=shp.height,
shape_top=shp.top,
shape_width=shp.width))
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions'] = OrderedDict()
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['left'] = str(shp.left)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['top'] = str(shp.top)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['width'] = str(shp.width)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['height'] = str(shp.height)
else:
print('='*110)
print('{indent:>0}Slide {i} details:\n'.
format(
indent='',
i=i))
slide_number = str(i)
pptx_tree[file_name]['slides'][slide_number] = OrderedDict()
print('{indent:>5}slide layout name : {sld_layout_name}\n'.
format(
indent='',
sld_layout_name=sld.slide_layout.name))
pptx_tree[file_name]['slides'][slide_number]['slide layout'] = OrderedDict()
slide_layout_name = str(sld.slide_layout.name)
pptx_tree[file_name]['slides'][slide_number]['slide layout']['name'] = slide_layout_name
pptx_tree[file_name]['slides'][slide_number]['shapes'] = OrderedDict()
for x, shp in enumerate(sld.shapes):
print('{indent:>10}shape index - {x}'.
format(
indent='',
x=x))
shape_number = str(x)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number] = OrderedDict()
print('{indent:>15}shape name - {shape_name}'.
format(
indent='',
shape_name=shp.name))
shape_name = str(shp.name)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape name'] = shape_name
print('{indent:>15}shape id - {shape_id}'.
format(
indent='',
shape_id=shp.id))
shape_id = str(shp.id)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape id'] = shape_id
print('{indent:>15}shape type - {shape_type}'.
format(
indent='',
shape_type=shp.shape_type))
shape_type = str(shp.shape_type)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape type'] = shape_type
if str(shp.shape_type) == 'PLACEHOLDER (14)':
print('{indent1:>15}placeholder idx - {placeholder_idx},\n'
'{indent2:>15}placeholder type - {placeholder_type}'.
format(
indent1='',
indent2='',
placeholder_idx=shp.placeholder_format.idx,
placeholder_type=shp.placeholder_format.type))
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['placeholder idx'] = str(shp.placeholder_format.idx)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['placeholder type'] = str(shp.placeholder_format.type)
print('{indent:>15}shape dimensions - '
'left: {shape_left}, top: {shape_top}, '
'height: {shape_height}, width: {shape_width}\n'.
format(
indent='',
shape_left=shp.left,
shape_height=shp.height,
shape_top=shp.top,
shape_width=shp.width))
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions'] = OrderedDict()
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['left'] = str(shp.left)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['top'] = str(shp.top)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['width'] = str(shp.width)
pptx_tree[file_name]['slides'][slide_number]['shapes'][shape_number]['shape dimensions']['height'] = str(shp.height)
if save_as_json:
save_json(pptx_tree, file_name+'.json')
print('Finished')
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def read_slide(sld):
'''
Takes a slide object and print info regarding the shapes on the given slide.
'''
for x, shp in enumerate(sld.shapes):
print('{indent:>5}shape index - {x}'.format(indent='', x=x))
print('{indent:>10}shape name - {shape_name}'.format(indent='', shape_name=shp.name))
print('{indent:>15}shape type - {shape_type}'.format(indent='', shape_type=shp.shape_type))
if str(shp.shape_type) == 'PLACEHOLDER (14)':
print('{indent:>15}placeholder idx - {placeholder_idx}, placeholder type - {placeholder_type}'.
format(indent='', placeholder_idx=shp.placeholder_format.idx, placeholder_type=shp.placeholder_format.type))
print('{indent:>15}shape dimensions - left ({shape_left}), top ({shape_top}), height ({shape_height}), width ({shape_width})\n'.
format(indent='', shape_left=shp.left, shape_top=shp.top, shape_height=shp.height, shape_width=shp.width))
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def read_chart_properties(pres_path, slide_num, chart_name):
'''
This function prints a given chart's property settings.
param: pres_path - full path of target file
param: slide_num - single slide number
param: chart_name - object name as it appears within powerpoint's Object Selection Pane
'''
prs = Presentation(pres_path)
for i, sld in enumerate(prs.slides, start=1):
if i == slide_num:
for x, shp in enumerate(sld.shapes):
if shp.name == chart_name:
print('chart >\n')
print(' chart_style: {chart_style}'.format(chart_style=shp.chart.chart_style))
print(' has_legend: {has_legend}'.format(has_legend=shp.chart.has_legend))
print(' legend: {legend}\n'.format(legend=shp.chart.legend))
print('-'*110)
caxis = shp.chart.category_axis
print('chart > category axis properties\n')
print(' has_major_gridlines: {has_major_gridlines}'.format(has_major_gridlines=caxis.has_major_gridlines))
print(' has_minor_gridlines: {has_minor_gridlines}'.format(has_minor_gridlines=caxis.has_minor_gridlines))
print(' major_tick_mark: {major_tick_mark}'.format(major_tick_mark=caxis.major_tick_mark))
print(' maximum_scale: {maximum_scale}'.format(maximum_scale=caxis.maximum_scale))
print(' minimum_scale: {minimum_scale}'.format(minimum_scale=caxis.minimum_scale))
print(' minor_tick_mark: {minor_tick_mark}'.format(minor_tick_mark=caxis.minor_tick_mark))
print(' tick_labels: {tick_labels}'.format(tick_labels=str(caxis.tick_labels)))
print(' tick_label_position: {tick_label_position}'.format(tick_label_position=caxis.tick_label_position))
print(' tick_labels_font_name: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.name))
print(' tick_labels_font_size: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.size))
print(' tick_labels_font_bold: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.bold))
print(' tick_labels_font_color: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.color))
print(' tick_labels_font_italic: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.italic))
print(' tick_labels_font_underline: {tick_labels_font}'.format(tick_labels_font=caxis.tick_labels.font.underline))
print(' tick_labels_number_format: {tick_labels_number_format}'.format(tick_labels_number_format=caxis.tick_labels.number_format))
print(' tick_labels_number_format_is_linked: {tick_labels_number_format_is_linked}'.format(tick_labels_number_format_is_linked=caxis.tick_labels.number_format_is_linked))
print(' tick_labels_offset: {tick_labels_offset}'.format(tick_labels_offset=caxis.tick_labels.offset))
print(' visible: {visible}\n'.format(visible=caxis.visible))
print('-'*110)
vaxis = shp.chart.value_axis
print('chart > value axis properties\n')
print(' has_major_gridlines: {has_major_gridlines}'.format(has_major_gridlines=vaxis.has_major_gridlines))
print(' has_minor_gridlines: {has_minor_gridlines}'.format(has_minor_gridlines=vaxis.has_minor_gridlines))
print(' major_tick_mark: {major_tick_mark}'.format(major_tick_mark=vaxis.major_tick_mark))
print(' maximum_scale: {maximum_scale}'.format(maximum_scale=vaxis.maximum_scale))
print(' minimum_scale: {minimum_scale}'.format(minimum_scale=vaxis.minimum_scale))
print(' major_unit: {major_unit}'.format(major_unit=vaxis.major_unit))
print(' minor_unit: {minor_unit}'.format(minor_unit=vaxis.minor_unit))
print(' minor_tick_mark: {minor_tick_mark}'.format(minor_tick_mark=vaxis.minor_tick_mark))
print(' tick_labels: {tick_labels}'.format(tick_labels=vaxis.tick_labels))
print(' tick_label_position: {tick_label_position}'.format(tick_label_position=vaxis.tick_label_position))
print(' tick_labels_font_name: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.name))
print(' tick_labels_font_size: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.size))
print(' tick_labels_font_bold: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.bold))
print(' tick_labels_font_color: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.color))
print(' tick_labels_font_italic: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.italic))
print(' tick_labels_font_underline: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font.underline))
print(' tick_labels_font: {tick_labels_font}'.format(tick_labels_font=vaxis.tick_labels.font))
print(' tick_labels_number_format: {tick_labels_number_format}'.format(tick_labels_number_format=vaxis.tick_labels.number_format))
print(' tick_labels_number_format_is_linked: {tick_labels_number_format_is_linked}'.format(tick_labels_number_format_is_linked=vaxis.tick_labels.number_format_is_linked))
print(' visible: {visible}\n'.format(visible=vaxis.visible))
print('-'*110)
for item in shp.chart.plots:
print('chart > plot\n')
print(' plot_categories: {plot_cats}'.format(plot_cats=item.categories))
print(' plot_gap_width: {gap_width}'.format(gap_width=item.gap_width))
print(' has_data_labels: {has_data_labels}'.format(has_data_labels=item.has_data_labels))
print(' overlap: {overlap}'.format(overlap=item.overlap))
print(' vary_by_categories: {vary_by_cat}\n'.format(vary_by_cat=item.vary_by_categories))
print('-'*110)
font = item.data_labels.font
print('chart > plot > data labels > font \n')
print(' data_label_font_name: {font_name}'.format(font_name=font.name))
print(' data_label_font_size: {font_size}'.format(font_size=font.size))
print(' data_label_font_bold: {data_label_font}'.format(data_label_font=font.bold))
print(' data_label_font_color {font_color}'.format(font_color=font.color))
print(' data_label_font_fill {font_fill}'.format(font_fill=font.fill))
print(' data_label_font_italic: {font_italic}'.format(font_italic=font.italic))
print(' data_label_font_underline: {font_underline}\n'.format(font_underline=font.underline))
print('-'*110)
for ser in item.series:
print('chart > plot > series\n')
print(' series_fill_type: {fill_type}'.format(fill_type=ser.fill.type))
print(' series_invert_if_neg: {invert_if_neg}'.format(invert_if_neg=ser.invert_if_negative))
print(' series_line: {line}'.format(line=ser.line))
print(' series_name: {name}'.format(name=ser.name))
print(' series_values: {values}'.format(values=ser.values))
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_chart_data_from_prs(pres_path, slide_num, chart_name):
'''
This function 1) pulls a given chart's data and 2) returns it as a pandas dataframe object in a list
param: pres_path - full path of target file
param: slide_num - takes a list of slides
param: chart_name - object name as it appears within powerpoint's Object Selection Pane
'''
prs = Presentation(pres_path)
collection_of_dfs = []
for i, sld in enumerate(prs.slides, start=1):
if i in slide_num:
for x, shp in enumerate(sld.shapes):
if shp.name == chart_name:
plot = shp.chart.plots[0]
columns = []
data = []
for series in plot.series:
columns.append(str(series.name))
data.append(series.values)
data = np.array(data)
rows = np.array(plot.categories)
df = pd.DataFrame(data.T, index=rows, columns=columns)
collection_of_dfs.append(df)
return(collection_of_dfs)
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def replace_chart_data_in_prs(pres_path, slide_num, chart_name, df):
'''
This function 1) enters an existing powerpoint, 2) finds given slides, 3) finds given chart by name and
4) replaces the given chart's underlying data with new data in the form of a dataframe.
param: pres_path - takes the full path of target file
param: slide_num - takes a list of slides
param: chart_name - object name as it appears within powerpoint's Object Selection Pane
param: df - takes a list of pandas dataframe objects
'''
PRES_FOLDER_FOLDER = dirname(pres_path)
PRES_NAME = basename(pres_path).replace('.pptx','')
prs = Presentation(pres_path)
loop_counter=0
for i, sld in enumerate(prs.slides, start=1):
if i in slide_num:
for x, shp in enumerate(sld.shapes):
if shp.name == chart_name:
single_df = df[loop_counter]
chart_data = ChartData()
chart_data.categories = single_df.index
for col_idx, col in enumerate(single_df.columns):
chart_data.add_series(col, (single_df.ix[:, col_idx].values))
shp.chart.replace_data(chart_data)
loop_counter+=1
prs.save('{pres_path}\\{pres_name}_edited.pptx'.format(
pres_path=PRES_FOLDER_FOLDER,
pres_name=PRES_NAME))
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_slide_layout_names(pptx):
'''
Print slide layout names
'''
for i, slide_layout in enumerate(pptx.slide_layouts):
print(slide_layout.name)
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def return_slide_layout_by_name(pptx, slide_layout_name):
'''
Loop over the slide layout object and find slide layout by name, return slide layout
object.
example: myslide = get_slide_layout_by_name(prs, 'Inhaltsverzeichnis')
slide = prs.slides.add_slide(myslide)
'''
for slide_layout in pptx.slide_layouts:
if slide_layout.name == slide_layout_name:
return slide_layout
else:
raise Exception(
('Slide layout: {sld_layout} not found\n').format(
sld_layout = slide_layout_name))
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_chart_data(shape):
plot = shape.chart.plots[0]
columns = []
data = []
for series in plot.series:
columns.append(series.name)
data.append(series.values)
data = np.array(data)
rows = np.array(plot.categories)
df = pd.DataFrame(data.T, index=rows, columns=columns)
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_chart_data_temp(shape):
plot = shape.chart.plots[0]
series_names =[]
data = []
for series in plot.series:
series_names.append(series.name)
data.append(series.values)
cols = plot.categories
df = pd.DataFrame(data, index=series_names, columns=cols)
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def replace_chart_data(shape, df):
chart_data = ChartData()
chart_data.categories = df.index
for col_idx, col in enumerate(df.columns):
chart_data.add_series(col, (df.ix[:, col_idx].values))
shape.chart.replace_data(chart_data)
return shape
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_slide(pptx, slide_num):
''''
active slides are slides which exist in the VIEW mode,
not in slide master.
'''
return pptx.slides[slide_num]
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_shape(slide_num, shape_name):
for shp in slide_num.shapes:
if shp.name == shape_name:
if shp.is_placeholder:
p_idx = shp.placeholder_format.idx
shp = slide_num.placeholders[p_idx]
return shp
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def copy_txtbox_properties(shp_copy_from, shp_copy_to):
'''
Copies over one textbox's properties to another.
'''
# get original slide's shapes dimensions
left = shp_copy_from.left
top = shp_copy_from.top
width = shp_copy_from.width
height = shp_copy_from.height
# access textframe property for both original and replica shapes
txtframe_ori = shp_copy_from.text_frame
txtframe_rep = shp_copy_to.text_frame
# transfer textbox setters from original to replica at textbox level
txtframe_rep.margin_bottom = txtframe_ori.margin_bottom
txtframe_rep.margin_left = txtframe_ori.margin_left
txtframe_rep.margin_right = txtframe_ori.margin_right
txtframe_rep.margin_top = txtframe_ori.margin_top
txtframe_rep.vertical_anchor = txtframe_ori.vertical_anchor
txtframe_rep.word_wrap = txtframe_ori.word_wrap
txtframe_rep.paragraphs[0].text = txtframe_ori.paragraphs[0].text
txtframe_rep.paragraphs[0].alignment = txtframe_ori.paragraphs[0].alignment
# color textboxes accordingly
try:
color_code = str(shp_copy_from.fill.fore_color.rgb)
txfill = shp_copy_to.fill
txfill.solid()
txfill.fore_color.rgb = RGBColor.from_string(color_code)
except:
pass
# get font size and transfer it to replica shapes
for paragraph in txtframe_ori.paragraphs:
for run in paragraph.runs:
font = run.font
try:
font_size = font.size.pt
t = txtframe_rep.paragraphs[0]
t.font.size = Pt(font_size)
except:
pass
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def copy_chart_properties(shp_copy_from, sld_rep):
original_shapes_chart_type = str(shp_copy_from.chart.chart_type).split(" ")[0]
df = get_chart_data(shp_copy_from)
#--------------------------------------------------------------------
add_bar_chart(
sld=sld_rep, dataframe=df,
left=shp_copy_from.left, top=shp_copy_from.top, width=shp_copy_from.width, height=shp_copy_from.height,
chart_style=shp_copy_from.chart.chart_style,
has_legend=shp_copy_from.chart.has_legend,
legend_position='right',
legend_in_layout=False,
legend_horz_offset = 0.1583,
legend_font_name="Calibri",
legend_font_size=10,
legend_font_bold=False,
legend_font_italic=False,
legend_font_color=(89,89,89),
legend_font_brightness=0,
caxis_visible=True,
caxis_tick_label_position='none',
caxis_tick_labels_offset=730,
caxis_has_major_gridlines=shp_copy_from.chart.category_axis.has_major_gridlines,
caxis_has_minor_gridlines=False,
caxis_major_tick_mark='outside',
caxis_minor_tick_mark='none',
caxis_tick_labels_font_name="Calibri",
caxis_tick_labels_font_size=10,
caxis_tick_labels_font_bold=False,
caxis_tick_labels_font_italic=False,
caxis_tick_labels_font_color=(89,89,89),
vaxis_visible=shp_copy_from.chart.value_axis.visible,
vaxis_tick_label_position='low',
vaxis_has_major_gridlines=True,
vaxis_has_minor_gridlines=False,
vaxis_major_tick_mark='outside',
vaxis_minor_tick_mark='none',
vaxis_max_scale=100.0,
vaxis_min_scale=0,
vaxis_major_unit=10,
vaxis_minor_unit=None,
vaxis_tick_labels_num_format='0"%"',
vaxis_tick_labels_font_name="Calibri",
vaxis_tick_labels_font_bold=True,
vaxis_tick_labels_font_size=10,
vaxis_tick_labels_font_italic=False,
vaxis_tick_labels_font_color=(89,89,89),
plot_has_data_labels=True,
data_labels_position='outside_end',
data_labels_num_format='0"%"',
data_labels_num_format_is_linked=False,
data_labels_font_name="Calibri",
data_labels_font_size=9,
data_labels_font_bold=False,
data_labels_font_italic=False,
data_labels_font_color=(0,0,0),
plot_vary_by_cat=False,
series_color_order='reverse',
invert_series_color_if_negative=False,
plot_gap_width=150,
plot_overlap=-10
) | mit |
olologin/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 69 | 3894 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
gnu-sandhi/sandhi | modules/gr36/gnuradio-core/src/examples/pfb/resampler.py | 17 | 4193 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print "Resampling from %f to %f by %f " %(fs_in, fs_out, rerate)
# Creating our own taps
taps = gr.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = gr.sig_source_c(fs_in, gr.GR_SIN_WAVE, fc, 1)
#self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = blks2.pfb_arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = blks2.pfb_arb_resampler_ccf(rerate)
self.snk_in = gr.vector_sink_c()
self.snk_0 = gr.vector_sink_c()
self.snk_1 = gr.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pylab.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp1.set_xlim([-fs_in/2, fs_in/2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
sp2.set_xlim([-fs_out/2, fs_out/2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0/fs_in
Ts_out = 1.0/fs_out
t_in = scipy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = scipy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pylab.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
r = float(fs_out)/float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
xuewei4d/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 20 | 7250 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces_dataset` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
# #############################################################################
# Load faces data
faces, _ = fetch_olivetti_faces(return_X_y=True, shuffle=True,
random_state=rng)
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
def plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=cmap,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
# #############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - PCA using randomized SVD',
decomposition.PCA(n_components=n_components, svd_solver='randomized',
whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=20),
True),
]
# #############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
# #############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
# Plot an image representing the pixelwise variance provided by the
# estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,
# via the PCA decomposition, also provides a scalar noise_variance_
# (the mean of pixelwise variance) that cannot be displayed as an image
# so we skip it.
if (hasattr(estimator, 'noise_variance_') and
estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
# #############################################################################
# Various positivity constraints applied to dictionary learning.
estimators = [
('Dictionary learning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Dictionary learning - positive dictionary',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng,
positive_dict=True),
True),
('Dictionary learning - positive code',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
fit_algorithm='cd',
random_state=rng,
positive_code=True),
True),
('Dictionary learning - positive dictionary & code',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
fit_algorithm='cd',
random_state=rng,
positive_dict=True,
positive_code=True),
True),
]
# #############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components],
cmap=plt.cm.RdBu)
# #############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
components_ = estimator.components_
plot_gallery(name, components_[:n_components], cmap=plt.cm.RdBu)
plt.show()
| bsd-3-clause |
wukan1986/kquant_data | kquant_data/xio/h5.py | 1 | 3009 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
数据的保存
"""
import h5py
import numpy as np
import pandas as pd
def write_dataframe_set_struct_keep_head(path, data, dtype, dateset_name):
"""
保存DataFrame数据
保留表头
可以用来存K线,除权除息等信息
:param path:
:param data:
:param dtype:
:param dateset_name:
:return:
"""
f = h5py.File(path, 'w')
r = data.to_records(index=False)
d = np.array(r, dtype=dtype)
f.create_dataset(dateset_name, data=d, compression="gzip", compression_opts=6)
f.close()
return
def write_dataframe_set_dtype_remove_head(path, data, dtype, dataset_name):
"""
每个单元格的数据类型都一样
强行指定类型可以让文件的占用更小
表头不保存
:param path:
:param data:
:param dtype:
:param dateset_name:
:return:
"""
f = h5py.File(path, 'w')
if dtype is None:
f.create_dataset(dataset_name, data=data.as_matrix(), compression="gzip", compression_opts=6)
else:
f.create_dataset(dataset_name, data=data, compression="gzip", compression_opts=6, dtype=dtype)
f.close()
return
def read_h5(path, dateset_name):
"""
将简单数据读取出来
返回的东西有头表,就是DataFrame,没表头就是array
:param path:
:param dateset_name:
:return:
"""
f = h5py.File(path, 'r')
d = f[dateset_name][:]
f.close()
return d
def write_dataframe_with_index_columns(path, df, values_dtype, datetime_func):
"""
保存dataframe列表,分成三部分保存,时间,列表头,数据
因为时间轴可能与默认的不一样,所以使用这种方法在一个文件中保存一张表
:param path:
:param df:
:param values_dtype:
:param columns_dtype:
:return:
"""
f = h5py.File(path, 'w')
f.create_dataset('values', data=df.as_matrix(), compression="gzip", compression_opts=6, dtype=values_dtype)
index = df.index.map(datetime_func)
f.create_dataset('index', data=index, compression="gzip", compression_opts=6, dtype=np.int64)
# # 只能存定长byte类型的,变长的会导致工具打开出错
columns = list(df.columns.map(lambda x: np.string_(x)))
# f.create_dataset('columns', data=columns, dtype=columns_dtype)
tid = h5py.h5t.C_S1.copy()
tid.set_size(32)
H5T_C_S1_32 = h5py.Datatype(tid)
f.create_dataset('columns', data=columns, compression="gzip", compression_opts=6, dtype=H5T_C_S1_32)
f.close()
return
def read_dataframe_with_index_columns(path, datetime_func):
"""
将表头数据和索引数据一起
:param path:
:return:
"""
f = h5py.File(path, 'r')
values = f['values'][:]
index = f['index'][:]
columns = f['columns'][:]
columns = columns.astype(str)
df = pd.DataFrame(values, index=index, columns=columns)
df.index = df.index.map(datetime_func)
f.close()
return df
| bsd-2-clause |
ebilionis/py-orthpol | demos/demo3.py | 2 | 2221 | """
Same as demo1.py, but generates the Chebyshev polynomials.
This demo demonstrates how to:
+ Construct a set of orthogonal univariate polynomials given a weight
function.
+ Examine certain properties of a univariate polynomial.
+ Evaluate the polynomials at one or more points.
+ Evaluate the derivatives of the polynomials at one or more points.
Author:
Ilias Bilionis
Date:
3/18/2014
"""
import orthpol
import math
import numpy as np
import matplotlib.pyplot as plt
# The desired degree
degree = 4
# The first way of doing it is by directly supplying the weight function.
wf = lambda(x): 1. / np.sqrt(1. - x)
# Construct it:
p = orthpol.OrthogonalPolynomial(degree,
left=-1., right=1., # Domain
wf=wf)
# An orthogonal polynomial is though of as a function.
# Here is how to get the number of inputs and outputs of that function
print 'Number of inputs:', p.num_input
print 'Number of outputs:', p.num_output
# Test if the polynomials are normalized (i.e., their norm is 1.):
print 'Is normalized:', p.is_normalized
# Get the degree of the polynomial:
print 'Polynomial degree:', p.degree
# Get the alpha-beta recursion coefficients:
print 'Alpha:', p.alpha
print 'Beta:', p.beta
# The following should print a description of the polynomial
print str(p)
# Now you can evaluate the polynomial at any points you want:
X = np.linspace(-1., 1., 100)
# Here is the actual evaluation
phi = p(X)
# Phi should be a 100x11 matrix: phi(i, j) = poly(i, X[j])
# Let's plot them
plt.plot(X, phi)
plt.title('Chebyshev Polynomials', fontsize=16)
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$p_i(x)$', fontsize=16)
plt.legend(['$p_{%d}(x)$' % i for i in range(p.num_output)], loc='best')
print 'Close the window to continue...'
plt.show()
# You may also compute the derivatives of the polynomials:
dphi = p.d(X)
# Let's plot them also
plt.plot(X, dphi)
plt.title('Derivatives of Chebyshev Polynomials', fontsize=16)
plt.xlabel('$x$', fontsize=16)
plt.ylabel(r'$\frac{dp_i(x)}{dx}$', fontsize=16)
plt.legend([r'$\frac{p_{%d}(x)}{dx}$' % i for i in range(p.num_output)], loc='best')
print 'Close the window to end demo...'
plt.show()
| lgpl-2.1 |
dtsukiyama/suits | named-entity-recognizer/ner.py | 1 | 4906 | # spacy 1.9
from __future__ import unicode_literals, print_function
from pathlib import Path
import random
import os
import timeit
import lxml.html
import spacy
from spacy.gold import GoldParse
from spacy.tagger import Tagger
from spacy.attrs import IS_PUNCT, LOWER
import pandas as pd
import requests
import json
import numpy as np
from elasticsearch import Elasticsearch
from elasticsearch import helpers
from helper import removeNonAscii
nlp = spacy.load('en')
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
res = requests.get('http://localhost:9200')
class entityTraining(object):
def __init__(self, index_name, text_column):
self.index_name = index_name
self.text_column = text_column
def termSearch(self, query, verify=True):
res = es.search(index=self.index_name,
size=100,
body={'query': {'match_phrase': {self.text_column: query,
}
}}
)
hits = res['hits']['total']
if not verify:
return hits
else:
print("Query: {}. Entries this terms shows up {} times".format(query, hits))
def termVerify(self, terms):
counts = []
for term in terms:
counts.append(self.termSearch(removeNonAscii(term), verify=False))
verified = list(zip(terms, counts))
return [b[0] for b in verified if b[1] >= 5]
def textHighlights(self, query, samples = 500):
"""
Args: search query and number of results
Returns: clean lower cased sentences
"""
res = es.search(index=self.index_name,
size=samples,
body={'query': {'match_phrase': {self.text_column: query,
}
}}
)
res = [b['_source'][self.text_column] for b in res['hits']['hits']]
res = [b.lstrip().lower() for b in res]
return res
def annotateTraining(self, sentences, term, name):
"""
Args: clean lower cased sentences, skills, entity category
Returns: list of annotated sentences
"""
train = []
for sentence in sentences:
train.append([(int(sentence.index(term)), int(sentence.index(term) + len(term)), name)])
return zip(sentences, train)
def createTraining(self, sentences, terms, entity_type):
"""
Args: sentences from create_all_corpus
Returns: annotated sentences for use in ner training
"""
train = []
for b in range(len(terms)):
print("Index: {}. {}: {}".format(b, entity_type, terms[b]))
train.extend(self.annotateTraining(sentences[b], terms[b], entity_type))
return train
def createAllCorpus(self, terms, samples = 10):
"""
Args: skills or skill_counts tuples, curated boolean
Returns: list of sentences from search query
"""
sentences = []
for term in terms:
sentences.append(self.textHighlights(term, samples=samples))
return sentences
def trainNer(self, nlp, train_data, model_name, iterations):
# Add new words to vocab
for raw_text, _ in train_data:
doc = nlp.make_doc(raw_text)
for word in doc:
_ = nlp.vocab[word.orth]
random.seed(0)
# You may need to change the learning rate.
nlp.entity.model.learn_rate = 0.001
for itn in range(iterations):
start = timeit.default_timer()
random.shuffle(train_data)
loss = 0.
for raw_text, entity_offsets in train_data:
doc = nlp.make_doc(raw_text)
gold = GoldParse(doc, entities=entity_offsets)
nlp.tagger(doc)
loss += nlp.entity.update(doc, gold, drop=0.5)
if loss == 0:
break
end = timeit.default_timer()-start
print("Iteration {} complete in {} minutes".format(itn, np.round(end/60, 4)))
nlp.end_training()
if not os.path.exists('models/'+model_name):
os.makedirs('models/'+model_name)
nlp.save_to_directory('models/'+model_name)
def buildNer(self, terms, samples, iterations, entity_name):
#terms = self.termVerify(terms)
sentences = self.createAllCorpus(terms, samples)
train = self.createTraining(sentences, terms, entity_name)
start = timeit.default_timer()
nlp.entity.add_label(entity_name)
self.trainNer(nlp, train, self.index_name, iterations)
end = timeit.default_timer()-start
print("Ner training complete in {} minutes".format(end/60))
| mit |
rohit21122012/DCASE2013 | runs/2016/dnn2016verylarge/dnn6.py | 9 | 32027 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
#import sys
#sys.path.insert(0, '../')
from src.ui import *
from src.general import *
from src.files import *
from src.features import *
from src.dataset import *
from src.evaluation import *
import numpy
import csv
import argparse
import textwrap
from sklearn.metrics import confusion_matrix
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import timeit
from sklearn.externals import joblib
from sklearn import preprocessing as pp
from sklearn import mixture
from sklearn.svm import SVC
import skflow
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
def main(argv):
matplotlib.use('Agg')
start = timeit.default_timer()
numpy.random.seed(123456) # let's make randomization predictable
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( [email protected] )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
params = load_parameters('dnn6.yaml')
params = process_parameters(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
foot()
plot_name = params['classifier']['method']
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
#plot_name = params['classifier']['method'] + str(params['classifier']['parameters']['n_components'])
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'],
plot_name=plot_name)
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at ["+params['path']['challenge_results']+"]"
print " "
end = timeit.default_timer()
print " "
print "Total Time : " + str(end-start)
print " "
final_result['time'] = end-start
joblib.dump(final_result, 'result' + plot_name + '.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['features'] = os.path.join(params['path']['base'], params['path']['features'],
params['features']['hash'])
params['path']['feature_normalizers'] = os.path.join(params['path']['base'], params['path']['feature_normalizers'],
params['features']['hash'])
params['path']['models'] = os.path.join(params['path']['base'], params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
params['path']['results'] = os.path.join(params['path']['base'], params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True, fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds', overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='dnn6', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['dnn6']
classifier method, currently only GMM supported
(Default value='dnn6')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'dnn6':
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'dnn6':
# model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label,len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
#print tot_data['y'].shape, numpy.repeat(label,len(data[label]), axis=0).shape
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method ["+classifier_method+"]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn6':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'])
clf.save('dnn6/dnn6model1')
print model_container['models']
# Save models
save_data(current_model_file, model_container)
#clf.save(current_model_file);
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='dnn6', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['dnn6']
classifier method, currently only GMM supported
(Default value='dnn6')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'dnn6':
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True, fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'dnn6':
current_result = dataset.scene_labels[do_classification_dnn6(feature_data, model_container)]
else:
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Store the result
results.append((dataset.absolute_to_relative(item['file']), current_result))
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn6(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(15)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn6/dnn6model1');
#for label_id, label in enumerate(model_container['models']):
# logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)),0)
#print logls
classification_result_id = numpy.argmax(logls)
return classification_result_id
def plot_cm(cm, targets, title='Confusion Matrix', cmap=plt.cm.Blues, norm=True, name='Plot'):
if(norm):
cm = cm.astype(float)/cm.sum(axis=1)[:, numpy.newaxis]
fig = plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title + ' ' + name)
plt.colorbar()
tick_marks = numpy.arange(len(targets))
plt.xticks(tick_marks, targets,rotation=45)
plt.yticks(tick_marks, targets)
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
# plt.show()
fig.savefig(name + '.png')
#plt.close()
def do_system_evaluation(dataset, result_path, plot_name, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
print str(dataset.scene_label_count)
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
#print dataset.file_meta(result[0])[0]['scene_label'] + ' ' + result[1]
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
#print ' '
print tot_cm
#plot_cm(tot_cm, dataset.scene_labels,name=plot_name)
#joblib.dump(tot_cm, plot_name + '.pkl')
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm))/numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold'+str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy')+fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold-1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][label] * 100)+fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold-1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100)+fold_values
final_result['result'] = results
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| mit |
shangwuhencc/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tools/tests/test_merge.py | 9 | 111928 | # pylint: disable=E1103
import nose
from datetime import datetime
from numpy.random import randn
from numpy import nan
import numpy as np
import random
import pandas as pd
from pandas.compat import range, lrange, lzip, zip, StringIO
from pandas import compat
from pandas.tseries.index import DatetimeIndex
from pandas.tools.merge import merge, concat, ordered_merge, MergeError
from pandas import Categorical, Timestamp
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal,
makeCustomDataframe as mkdf,
assertRaisesRegexp)
from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range, read_table, read_csv
import pandas.algos as algos
import pandas.util.testing as tm
from numpy.testing.decorators import slow
a_ = np.array
N = 50
NGROUPS = 8
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = lrange(ngroups)
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])
random.shuffle(arr)
return arr
class TestMerge(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
self.right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = algos.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = algos.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = algos.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls)
self.assert_numpy_array_equal(rs, exp_rs)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
self.assertIn('key1.foo', joined)
self.assertIn('key1.bar', joined)
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
self.assertIn('key1.foo', joined)
self.assertIn('key2.bar', joined)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
self.assert_numpy_array_equal(merged['MergedA'], target['A'])
self.assert_numpy_array_equal(merged['MergedD'], target['D'])
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
self.assertTrue(np.isnan(joined['two']['c']))
self.assertTrue(np.isnan(joined['three']['c']))
# merge column not p resent
self.assertRaises(KeyError, target.join, source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
self.assertRaises(ValueError, target.join, source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(10, 2))
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
self.assertIn(col, merged)
self.assertTrue(merged[col].isnull().all())
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
self.assertTrue(merged2.columns.equals(merged.columns))
self.assertEqual(len(merged2), 0)
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notnull()]
self.assert_numpy_array_equal(joined['key'], expected['key'])
self.assert_numpy_array_equal(joined['value'], expected['value'])
self.assertTrue(joined.index.equals(expected.index))
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
self.assertEqual(df1['B'].dtype, np.int64)
self.assertEqual(df1['D'].dtype, np.bool_)
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
for kind in JOIN_TYPES:
joined = df1.join(df2, how=kind)
expected = _join_by_hand(df1, df2, how=kind)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=kind)
expected = _join_by_hand(df2, df1, how=kind)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sortlevel(0)
df2 = df2.sortlevel(0)
joined = df1.join(df2, how='outer')
ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
df1 = df1.sortlevel(1)
df2 = df2.sortlevel(1)
joined = df1.join(df2, how='outer').sortlevel(0)
ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
self.assertTrue(joined.index.is_monotonic)
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.ix[:, expected.columns])
def test_join_hierarchical_mixed(self):
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
result = merge(new_df, other_df, left_index=True, right_index=True)
self.assertTrue(('b', 'mean') in result)
self.assertTrue('b' in result)
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype = np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype = np.float32)
joined = a.join(b)
self.assertEqual(joined.dtypes['a'], 'float64')
self.assertEqual(joined.dtypes['b'], 'float64')
self.assertEqual(joined.dtypes['c'], 'float32')
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c })
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
self.assertEqual(rs.dtypes['a'], 'int64')
self.assertEqual(rs.dtypes['b'], 'float64')
self.assertEqual(rs.dtypes['c'], 'float32')
self.assertEqual(rs.dtypes['md'], 'float32')
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
result['a'] = result['a'].astype(np.float64)
result['b'] = result['b'].astype(np.float64)
assert_frame_equal(result, expected.ix[:, result.columns])
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.ix[:, result.columns])
# GH 11519
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
s = Series(np.repeat(np.arange(8), 2),
index=np.repeat(np.arange(8), 2), name='TEST')
inner = df.join(s, how='inner')
outer = df.join(s, how='outer')
left = df.join(s, how='left')
right = df.join(s, how='right')
assert_frame_equal(inner, outer)
assert_frame_equal(inner, left)
assert_frame_equal(inner, right)
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=False)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=False)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=True)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=True)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
# inner join
result = merge(left, right, left_on='key', right_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected)
result = merge(right, left, right_on='key', left_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_misspecified(self):
self.assertRaises(ValueError, merge, self.left, self.right,
left_index=True)
self.assertRaises(ValueError, merge, self.left, self.right,
right_index=True)
self.assertRaises(ValueError, merge, self.left, self.left,
left_on='key', on='key')
self.assertRaises(ValueError, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
self.assertEqual(len(merged), exp_len)
self.assertIn('v1_x', merged)
self.assertIn('v1_y', merged)
def test_merge_different_column_key_names(self):
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],
'value': [5, 6, 7, 8]})
merged = left.merge(right, left_on='lkey', right_on='rkey',
how='outer', sort=True)
assert_almost_equal(merged['lkey'],
['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan])
assert_almost_equal(merged['rkey'],
['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'])
assert_almost_equal(merged['value_x'], [2, 3, 1, 1, 4, 4, np.nan])
assert_almost_equal(merged['value_y'], [6, np.nan, 5, 8, 5, 8, 7])
def test_merge_copy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=True)
merged['a'] = 6
self.assertTrue((left['a'] == 0).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'bar').all())
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=False)
merged['a'] = 6
self.assertTrue((left['a'] == 6).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'peekaboo').all())
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
self.assert_numpy_array_equal(joined.index, lrange(4))
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'key': [1, 1, 2, 3, 4, 5],
'rvalue': lrange(6)})
joined = merge(left, right, on='key', how='outer')
expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5.],
'value': np.array([0, 0, 1, 1, 2, 3, 4,
np.nan, np.nan]),
'rvalue': np.array([0, 1, 0, 1, 2, 2, 3, 4, 5])},
columns=['value', 'key', 'rvalue'])
assert_frame_equal(joined, expected, check_dtype=False)
self.assertTrue(joined._data.is_consolidated())
def test_handle_join_key_pass_array(self):
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'rvalue': lrange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on='key', right_on=key, how='outer')
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
self.assertTrue(merged['key'].notnull().all())
self.assertTrue(merged2['key'].notnull().all())
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')
self.assert_numpy_array_equal(merged['key_0'],
np.array([1, 1, 1, 1, 2, 2, 3, 4, 5]))
left = DataFrame({'value': lrange(3)})
right = DataFrame({'rvalue': lrange(6)})
key = np.array([0, 1, 1, 2, 2, 3])
merged = merge(left, right, left_index=True, right_on=key, how='outer')
self.assert_numpy_array_equal(merged['key_0'], key)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
self.assertRaises(MergeError, merge, df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])
df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},
index=[dt3, dt3, dt2, dt2, dt, dt])
_check_merge(df1, df2)
df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])
df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},
index=[dt2, dt2, dt, dt])
df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},
index=[dt2, dt2, dt3, dt, dt])
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({'key': [1], 'value': [2]})
right = DataFrame({'key': []})
result = merge(left, right, on='key', how='left')
assert_frame_equal(result, left)
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
def test_merge_left_empty_right_empty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_in = pd.DataFrame([], columns=['a', 'b', 'c', 'x', 'y', 'z'],
index=pd.Index([], dtype=object),
dtype=object)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp_in)
def test_merge_left_empty_right_notempty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': np.array([np.nan]*3, dtype=object),
'b': np.array([np.nan]*3, dtype=object),
'c': np.array([np.nan]*3, dtype=object),
'x': [1, 4, 7],
'y': [2, 5, 8],
'z': [3, 6, 9]},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp_out)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp_out)
def test_merge_left_notempty_right_empty(self):
# GH 10824
left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': [1, 4, 7],
'b': [2, 5, 8],
'c': [3, 6, 9],
'x': np.array([np.nan]*3, dtype=object),
'y': np.array([np.nan]*3, dtype=object),
'z': np.array([np.nan]*3, dtype=object)},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp_out)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp_out)
def test_merge_nosort(self):
# #2098, anything to do?
from datetime import datetime
d = {"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [datetime(2012, 1, 12), datetime(2011, 2, 4),
datetime(
2010, 2, 3), datetime(2012, 1, 12),
datetime(
2011, 2, 4), datetime(2012, 4, 3),
datetime(
2012, 3, 4), datetime(2008, 5, 1),
datetime(2010, 2, 3), datetime(2012, 2, 3)]}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3,
"var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
self.assertTrue((df.var3.unique() == result.var3.unique()).all())
def test_merge_nan_right(self):
df1 = DataFrame({"i1" : [0, 1], "i2" : [0, 1]})
df2 = DataFrame({"i1" : [0], "i3" : [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},
'i1_': {0: 0, 1: np.nan}, 'i3': {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0}}).set_index(None).reset_index()[['i1', 'i2', 'i1_', 'i3']]
assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1" : [0, 1], "i2" : [0.5, 1.5]})
df2 = DataFrame({"i1" : [0], "i3" : [0.7]})
result = df1.join(df2, rsuffix="_", on='i1')
expected = DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},
'i2': {0: 0.5, 1: 1.5}, 'i3': {0: 0.69999999999999996,
1: nan}})[['i1', 'i2', 'i1_', 'i3']]
assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on='key1')
tm.assertIsInstance(result, NotADataFrame)
def test_append_dtype_coerce(self):
# GH 4993
# appending with datetime will incorrectly convert datetime64
import datetime as dt
from pandas import NaT
df1 = DataFrame(index=[1,2], data=[dt.datetime(2013,1,1,0,0),
dt.datetime(2013,1,2,0,0)],
columns=['start_time'])
df2 = DataFrame(index=[4,5], data=[[dt.datetime(2013,1,3,0,0),
dt.datetime(2013,1,3,6,10)],
[dt.datetime(2013,1,4,0,0),
dt.datetime(2013,1,4,7,10)]],
columns=['start_time','end_time'])
expected = concat([
Series([NaT,NaT,dt.datetime(2013,1,3,6,10),dt.datetime(2013,1,4,7,10)],name='end_time'),
Series([dt.datetime(2013,1,1,0,0),dt.datetime(2013,1,2,0,0),dt.datetime(2013,1,3,0,0),dt.datetime(2013,1,4,0,0)],name='start_time'),
],axis=1)
result = df1.append(df2,ignore_index=True)
assert_frame_equal(result, expected)
def test_join_append_timedeltas(self):
import datetime as dt
from pandas import NaT
# timedelta64 issues with join/merge
# GH 5695
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),
dt.datetime(2013, 11, 5, 5, 56) ],
't': [ dt.timedelta(0, 22500),
dt.timedelta(0, 22500) ]})
assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td,td],index=["A","B"]))
rhs = DataFrame(Series([td],index=["A"]))
from pandas import NaT
result = lhs.join(rhs,rsuffix='r', how="left")
expected = DataFrame({ '0' : Series([td,td],index=list('AB')), '0r' : Series([td,NaT],index=list('AB')) })
assert_frame_equal(result, expected)
def test_overlapping_columns_error_message(self):
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
expected = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9],
'v3': [4, 5, 6],
'v4': [7, 8, 9]})
expected.columns = ['key', 'foo', 'foo', 'bar', 'bar']
assert_frame_equal(merge(df, df2), expected)
# #2649, #10639
df2.columns = ['key1', 'foo', 'foo']
self.assertRaises(ValueError, merge, df, df2)
def test_merge_on_datetime64tz(self):
# GH11405
left = pd.DataFrame({'key' : pd.date_range('20151010',periods=2,tz='US/Eastern'),
'value' : [1,2]})
right = pd.DataFrame({'key' : pd.date_range('20151011',periods=3,tz='US/Eastern'),
'value' : [1,2,3]})
expected = DataFrame({'key' : pd.date_range('20151010',periods=4,tz='US/Eastern'),
'value_x' : [1,2,np.nan,np.nan],
'value_y' : [np.nan,1,2,3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'value' : pd.date_range('20151010',periods=2,tz='US/Eastern'),
'key' : [1,2]})
right = pd.DataFrame({'value' : pd.date_range('20151011',periods=2,tz='US/Eastern'),
'key' : [2,3]})
expected = DataFrame({'value_x' : list(pd.date_range('20151010',periods=2,tz='US/Eastern')) + [pd.NaT],
'value_y' : [pd.NaT] + list(pd.date_range('20151011',periods=2,tz='US/Eastern')),
'key' : [1.,2,3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
df1 = DataFrame({'col1':[0,1], 'col_left':['a','b'], 'col_conflict':[1,2]})
df1_copy = df1.copy()
df2 = DataFrame({'col1':[1,2,3,4,5],'col_right':[2,2,2,2,2],
'col_conflict':[1,2,3,4,5]})
df2_copy = df2.copy()
df_result = DataFrame({'col1':[0,1,2,3,4,5],
'col_conflict_x':[1,2,np.nan,np.nan,np.nan,np.nan],
'col_left':['a','b', np.nan,np.nan,np.nan,np.nan],
'col_conflict_y':[np.nan,1,2,3,4,5],
'col_right':[np.nan, 2,2,2,2,2]},
dtype='float64')
df_result['_merge'] = Categorical(['left_only','both','right_only',
'right_only','right_only','right_only']
, categories=['left_only', 'right_only', 'both'])
df_result = df_result[['col1', 'col_conflict_x', 'col_left',
'col_conflict_y', 'col_right', '_merge' ]]
test = merge(df1, df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
test = df1.merge(df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
# No side effects
assert_frame_equal(df1, df1_copy)
assert_frame_equal(df2, df2_copy)
# Check with custom name
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(columns={'_merge':'custom_name'})
test_custom_name = merge(df1, df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
with tm.assertRaises(ValueError):
merge(df1, df2, on='col1', how='outer', indicator=5)
with tm.assertRaises(ValueError):
df1.merge(df2, on='col1', how='outer', indicator=5)
# Check result integrity
test2 = merge(df1, df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test2 = df1.merge(df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test3 = merge(df1, df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test3 = df1.merge(df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test4 = merge(df1, df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
test4 = df1.merge(df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
# Check if working name in df
for i in ['_right_indicator', '_left_indicator', '_merge']:
df_badcolumn = DataFrame({'col1':[1,2], i:[2,2]})
with tm.assertRaises(ValueError):
merge(df1, df_badcolumn, on='col1', how='outer', indicator=True)
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer', indicator=True)
# Check for name conflict with custom name
df_badcolumn = DataFrame({'col1':[1,2], 'custom_column_name':[2,2]})
with tm.assertRaises(ValueError):
merge(df1, df_badcolumn, on='col1', how='outer', indicator='custom_column_name')
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer', indicator='custom_column_name')
# Merge on multiple columns
df3 = DataFrame({'col1':[0,1], 'col2':['a','b']})
df4 = DataFrame({'col1':[1,1,3], 'col2':['b','x','y']})
hand_coded_result = DataFrame({'col1':[0,1,1,3.0],
'col2':['a','b','x','y']})
hand_coded_result['_merge'] = Categorical(
['left_only','both','right_only','right_only']
, categories=['left_only', 'right_only', 'both'])
test5 = merge(df3, df4, on=['col1', 'col2'], how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=['col1', 'col2'], how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
def _check_merge(x, y):
for how in ['inner', 'left', 'outer']:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how,
sort=True)
expected = expected.set_index('index')
assert_frame_equal(result, expected, check_names=False) # TODO check_names on merge?
class TestMergeMulti(tm.TestCase):
def setUp(self):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,
columns=['j_one', 'j_two', 'j_three'])
# a little relevant example with NAs
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
self.data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
def test_merge_on_multikey(self):
joined = self.data.join(self.to_join, on=['key1', 'key2'])
join_key = Index(lzip(self.data['key1'], self.data['key2']))
indexer = self.to_join.index.get_indexer(join_key)
ex_values = self.to_join.values.take(indexer, axis=0)
ex_values[indexer == -1] = np.nan
expected = self.data.join(DataFrame(ex_values,
columns=self.to_join.columns))
# TODO: columns aren't in the same order yet
assert_frame_equal(joined, expected.ix[:, joined.columns])
left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True)
right = expected.ix[:, joined.columns].sort_values(['key1', 'key2'],
kind='mergesort')
assert_frame_equal(left, right)
def test_left_join_multi_index(self):
icols = ['1st', '2nd', '3rd']
def bind_cols(df):
iord = lambda a: 0 if a != a else ord(a)
f = lambda ts: ts.map(iord) - ord('a')
return f(df['1st']) + f(df['3rd'])* 1e2 + df['2nd'].fillna(0) * 1e4
def run_asserts(left, right):
for sort in [False, True]:
res = left.join(right, on=icols, how='left', sort=sort)
self.assertTrue(len(left) < len(res) + 1)
self.assertFalse(res['4th'].isnull().any())
self.assertFalse(res['5th'].isnull().any())
tm.assert_series_equal(res['4th'], - res['5th'], check_names=False)
result = bind_cols(res.iloc[:, :-2])
tm.assert_series_equal(res['4th'], result, check_names=False)
self.assertTrue(result.name is None)
if sort:
tm.assert_frame_equal(res,
res.sort_values(icols, kind='mergesort'))
out = merge(left, right.reset_index(), on=icols,
sort=sort, how='left')
res.index = np.arange(len(res))
tm.assert_frame_equal(out, res)
lc = list(map(chr, np.arange(ord('a'), ord('z') + 1)))
left = DataFrame(np.random.choice(lc, (5000, 2)),
columns=['1st', '3rd'])
left.insert(1, '2nd', np.random.randint(0, 1000, len(left)))
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
left['4th'] = bind_cols(left)
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
# inject some nulls
left.loc[1::23, '1st'] = np.nan
left.loc[2::37, '2nd'] = np.nan
left.loc[3::43, '3rd'] = np.nan
left['4th'] = bind_cols(left)
i = np.random.permutation(len(left))
right = left.iloc[i, :-1]
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
def test_merge_right_vs_left(self):
# compare left vs right merge with multikey
for sort in [False, True]:
merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],
right_index=True, how='left', sort=sort)
merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],
left_index=True, how='right', sort=sort)
merged2 = merged2.ix[:, merged1.columns]
assert_frame_equal(merged1, merged2)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = tm.rands_array(10, 10000)
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({'key1': key1, 'key2': key2,
'value1': np.random.randn(20000)})
df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],
'value2': np.random.randn(10000)})
# just to hit the label compression code path
merged = merge(df, df2, how='outer')
def test_left_join_index_preserve_order(self):
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24),dtype=np.int64) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# test join with multi dtypes blocks
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'k3' : np.array([0, 1, 2]*8, dtype=np.float32),
'v': np.array(np.arange(24),dtype=np.int32) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# do a right join for an extra test
joined = merge(right, left, left_index=True,
right_on=['k1', 'k2'], how='right')
tm.assert_frame_equal(joined.ix[:, expected.columns], expected)
def test_left_join_index_multi_match_multiindex(self):
left = DataFrame([
['X', 'Y', 'C', 'a'],
['W', 'Y', 'C', 'e'],
['V', 'Q', 'A', 'h'],
['V', 'R', 'D', 'i'],
['X', 'Y', 'D', 'b'],
['X', 'Y', 'A', 'c'],
['W', 'Q', 'B', 'f'],
['W', 'R', 'C', 'g'],
['V', 'Y', 'C', 'j'],
['X', 'Y', 'B', 'd']],
columns=['cola', 'colb', 'colc', 'tag'],
index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8])
right = DataFrame([
['W', 'R', 'C', 0],
['W', 'Q', 'B', 3],
['W', 'Q', 'B', 8],
['X', 'Y', 'A', 1],
['X', 'Y', 'A', 4],
['X', 'Y', 'B', 5],
['X', 'Y', 'C', 6],
['X', 'Y', 'C', 9],
['X', 'Q', 'C', -6],
['X', 'R', 'C', -9],
['V', 'Y', 'C', 7],
['V', 'R', 'D', 2],
['V', 'R', 'D', -1],
['V', 'Q', 'A', -3]],
columns=['col1', 'col2', 'col3', 'val'])
right.set_index(['col1', 'col2', 'col3'], inplace=True)
result = left.join(right, on=['cola', 'colb', 'colc'], how='left')
expected = DataFrame([
['X', 'Y', 'C', 'a', 6],
['X', 'Y', 'C', 'a', 9],
['W', 'Y', 'C', 'e', nan],
['V', 'Q', 'A', 'h', -3],
['V', 'R', 'D', 'i', 2],
['V', 'R', 'D', 'i', -1],
['X', 'Y', 'D', 'b', nan],
['X', 'Y', 'A', 'c', 1],
['X', 'Y', 'A', 'c', 4],
['W', 'Q', 'B', 'f', 3],
['W', 'Q', 'B', 'f', 8],
['W', 'R', 'C', 'g', 0],
['V', 'Y', 'C', 'j', 7],
['X', 'Y', 'B', 'd', 5]],
columns=['cola', 'colb', 'colc', 'tag', 'val'],
index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['cola', 'colb', 'colc'],
how='left', sort=True)
tm.assert_frame_equal(result,
expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort'))
# GH7331 - maintain left frame order in left merge
right.reset_index(inplace=True)
right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist()
result = merge(left, right, how='left', on=left.columns[:-1].tolist())
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match(self):
left = DataFrame([
['c', 0],
['b', 1],
['a', 2],
['b', 3]],
columns=['tag', 'val'],
index=[2, 0, 1, 3])
right = DataFrame([
['a', 'v'],
['c', 'w'],
['c', 'x'],
['d', 'y'],
['a', 'z'],
['c', 'r'],
['e', 'q'],
['c', 's']],
columns=['tag', 'char'])
right.set_index('tag', inplace=True)
result = left.join(right, on='tag', how='left')
expected = DataFrame([
['c', 0, 'w'],
['c', 0, 'x'],
['c', 0, 'r'],
['c', 0, 's'],
['b', 1, nan],
['a', 2, 'v'],
['a', 2, 'z'],
['b', 3, nan]],
columns=['tag', 'val', 'char'],
index=[2, 2, 2, 2, 0, 1, 1, 3])
tm.assert_frame_equal(result, expected)
result = left.join(right, on='tag', how='left', sort=True)
tm.assert_frame_equal(result, expected.sort_values('tag', kind='mergesort'))
# GH7331 - maintain left frame order in left merge
result = merge(left, right.reset_index(), how='left', on='tag')
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_join_multi_dtypes(self):
# test with multi dtypes in the join index
def _test(dtype1,dtype2):
left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24),dtype=np.int64) })
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan,dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'),'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['k1', 'k2'], sort=True)
expected.sort_values(['k1', 'k2'], kind='mergesort', inplace=True)
tm.assert_frame_equal(result, expected)
for d1 in [np.int64,np.int32,np.int16,np.int8,np.uint8]:
for d2 in [np.int64,np.float64,np.float32,np.float16]:
_test(np.dtype(d1),np.dtype(d2))
def test_left_merge_na_buglet(self):
left = DataFrame({'id': list('abcde'), 'v1': randn(5),
'v2': randn(5), 'dummy': list('abcde'),
'v3': randn(5)},
columns=['id', 'v1', 'v2', 'dummy', 'v3'])
right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],
'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})
merged = merge(left, right, on='id', how='left')
rdf = right.drop(['id'], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(merged, expected)
def test_merge_na_keys(self):
data = [[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.],
[1950, "C", 4.],
[1960, "C", np.nan],
[1965, "C", 3.],
[1970, "C", 4.]]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [[1960, 'A', np.nan],
[1970, 'A', np.nan],
[1955, 'A', np.nan],
[1965, 'A', np.nan],
[1965, 'B', np.nan],
[1955, 'C', np.nan]]
other = DataFrame(other_data, columns=['year', 'panel', 'data'])
result = frame.merge(other, how='outer')
expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
@slow
def test_int64_overflow_issues(self):
from itertools import product
from collections import defaultdict
from pandas.core.groupby import _int64_overflow_possible
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
self.assertTrue(len(result) == 2000)
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
self.assertEqual(len(out), len(left))
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
self.assertTrue(result.name is None)
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
self.assertTrue(_int64_overflow_possible(shape))
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7)).astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notnull(),
'right': out['right'].notnull(),
'inner': out['left'].notnull() & out['right'].notnull(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
self.assertTrue(mask.all() ^ mask.any() or how == 'outer')
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_join_multi_levels(self):
# GH 3662
# merge multi-levels
household = DataFrame(dict(household_id = [1,2,3],
male = [0,1,0],
wealth = [196087.3,316478.7,294750]),
columns = ['household_id','male','wealth']).set_index('household_id')
portfolio = DataFrame(dict(household_id = [1,2,2,3,3,3,4],
asset_id = ["nl0000301109","nl0000289783","gb00b03mlx29","gb00b03mlx29","lu0197800237","nl0000289965",np.nan],
name = ["ABN Amro","Robeco","Royal Dutch Shell","Royal Dutch Shell","AAB Eastern Europe Equity Fund","Postbank BioTech Fonds",np.nan],
share = [1.0,0.4,0.6,0.15,0.6,0.25,1.0]),
columns = ['household_id','asset_id','name','share']).set_index(['household_id','asset_id'])
result = household.join(portfolio, how='inner')
expected = DataFrame(dict(male = [0,1,1,0,0,0],
wealth = [ 196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0 ],
name = ['ABN Amro','Robeco','Royal Dutch Shell','Royal Dutch Shell','AAB Eastern Europe Equity Fund','Postbank BioTech Fonds'],
share = [1.00,0.40,0.60,0.15,0.60,0.25],
household_id = [1,2,2,3,3,3],
asset_id = ['nl0000301109','nl0000289783','gb00b03mlx29','gb00b03mlx29','lu0197800237','nl0000289965']),
).set_index(['household_id','asset_id']).reindex(columns=['male','wealth','name','share'])
assert_frame_equal(result,expected)
assert_frame_equal(result,expected)
# equivalency
result2 = merge(household.reset_index(),portfolio.reset_index(),on=['household_id'],how='inner').set_index(['household_id','asset_id'])
assert_frame_equal(result2,expected)
result = household.join(portfolio, how='outer')
expected = concat([expected,DataFrame(dict(share = [1.00]),
index=MultiIndex.from_tuples([(4,np.nan)],
names=['household_id','asset_id']))],
axis=0).reindex(columns=expected.columns)
assert_frame_equal(result,expected)
# invalid cases
household.index.name = 'foo'
def f():
household.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
portfolio2 = portfolio.copy()
portfolio2.index.set_names(['household_id','foo'])
def f():
portfolio2.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
def test_join_multi_levels2(self):
# some more advanced merges
# GH6360
household = DataFrame(dict(household_id = [1,2,2,3,3,3,4],
asset_id = ["nl0000301109","nl0000301109","gb00b03mlx29","gb00b03mlx29","lu0197800237","nl0000289965",np.nan],
share = [1.0,0.4,0.6,0.15,0.6,0.25,1.0]),
columns = ['household_id','asset_id','share']).set_index(['household_id','asset_id'])
log_return = DataFrame(dict(
asset_id = ["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"],
t = [233, 234, 235, 180, 181],
log_return = [.09604978, -.06524096, .03532373, .03025441, .036997]
)).set_index(["asset_id","t"])
expected = DataFrame(dict(
household_id = [2, 2, 2, 3, 3, 3, 3, 3],
asset_id = ["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237"],
t = [233, 234, 235, 233, 234, 235, 180, 181],
share = [0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],
log_return = [.09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997]
)).set_index(["household_id", "asset_id", "t"]).reindex(columns=['share','log_return'])
def f():
household.join(log_return, how='inner')
self.assertRaises(NotImplementedError, f)
# this is the equivalency
result = merge(household.reset_index(),log_return.reset_index(),on=['asset_id'],how='inner').set_index(['household_id','asset_id','t'])
assert_frame_equal(result,expected)
expected = DataFrame(dict(
household_id = [1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],
asset_id = ["nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None],
t = [None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None],
share = [1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0],
log_return = [None, None, .09604978, -.06524096, .03532373, .09604978, -.06524096, .03532373, .03025441, .036997, None, None]
)).set_index(["household_id", "asset_id", "t"])
def f():
household.join(log_return, how='outer')
self.assertRaises(NotImplementedError, f)
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notnull().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.ix[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.ix[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = set(tuple(row) for row in jvalues)
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isnull().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
class TestConcatenate(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.frame = DataFrame(tm.getSeriesData())
self.mixed_frame = self.frame.copy()
self.mixed_frame['foo'] = 'bar'
def test_append(self):
begin_index = self.frame.index[:5]
end_index = self.frame.index[5:]
begin_frame = self.frame.reindex(begin_index)
end_frame = self.frame.reindex(end_index)
appended = begin_frame.append(end_frame)
assert_almost_equal(appended['A'], self.frame['A'])
del end_frame['A']
partial_appended = begin_frame.append(end_frame)
self.assertIn('A', partial_appended)
partial_appended = end_frame.append(begin_frame)
self.assertIn('A', partial_appended)
# mixed type handling
appended = self.mixed_frame[:5].append(self.mixed_frame[5:])
assert_frame_equal(appended, self.mixed_frame)
# what to test here
mixed_appended = self.mixed_frame[:5].append(self.frame[5:])
mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:])
# all equal except 'foo' column
assert_frame_equal(
mixed_appended.reindex(columns=['A', 'B', 'C', 'D']),
mixed_appended2.reindex(columns=['A', 'B', 'C', 'D']))
# append empty
empty = DataFrame({})
appended = self.frame.append(empty)
assert_frame_equal(self.frame, appended)
self.assertIsNot(appended, self.frame)
appended = empty.append(self.frame)
assert_frame_equal(self.frame, appended)
self.assertIsNot(appended, self.frame)
# overlap
self.assertRaises(ValueError, self.frame.append, self.frame,
verify_integrity=True)
# new columns
# GH 6129
df = DataFrame({'a': {'x': 1, 'y': 2}, 'b': {'x': 3, 'y': 4}})
row = Series([5, 6, 7], index=['a', 'b', 'c'], name='z')
expected = DataFrame({'a': {'x': 1, 'y': 2, 'z': 5}, 'b': {'x': 3, 'y': 4, 'z': 6}, 'c' : {'z' : 7}})
result = df.append(row)
assert_frame_equal(result, expected)
def test_append_length0_frame(self):
df = DataFrame(columns=['A', 'B', 'C'])
df3 = DataFrame(index=[0, 1], columns=['A', 'B'])
df5 = df.append(df3)
expected = DataFrame(index=[0, 1], columns=['A', 'B', 'C'])
assert_frame_equal(df5, expected)
def test_append_records(self):
arr1 = np.zeros((2,), dtype=('i4,f4,a10'))
arr1[:] = [(1, 2., 'Hello'), (2, 3., "World")]
arr2 = np.zeros((3,), dtype=('i4,f4,a10'))
arr2[:] = [(3, 4., 'foo'),
(5, 6., "bar"),
(7., 8., 'baz')]
df1 = DataFrame(arr1)
df2 = DataFrame(arr2)
result = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate((arr1, arr2)))
assert_frame_equal(result, expected)
def test_append_different_columns(self):
df = DataFrame({'bools': np.random.randn(10) > 0,
'ints': np.random.randint(0, 10, 10),
'floats': np.random.randn(10),
'strings': ['foo', 'bar'] * 5})
a = df[:5].ix[:, ['bools', 'ints', 'floats']]
b = df[5:].ix[:, ['strings', 'ints', 'floats']]
appended = a.append(b)
self.assertTrue(isnull(appended['strings'][0:4]).all())
self.assertTrue(isnull(appended['bools'][5:]).all())
def test_append_many(self):
chunks = [self.frame[:5], self.frame[5:10],
self.frame[10:15], self.frame[15:]]
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result, self.frame)
chunks[-1] = chunks[-1].copy()
chunks[-1]['foo'] = 'bar'
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result.ix[:, self.frame.columns], self.frame)
self.assertTrue((result['foo'][15:] == 'bar').all())
self.assertTrue(result['foo'][:15].isnull().all())
def test_append_preserve_index_name(self):
# #980
df1 = DataFrame(data=None, columns=['A', 'B', 'C'])
df1 = df1.set_index(['A'])
df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]],
columns=['A', 'B', 'C'])
df2 = df2.set_index(['A'])
result = df1.append(df2)
self.assertEqual(result.index.name, 'A')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
self.assertRaises(ValueError, df_list[0].join, df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.ix[:, ['A', 'B']]
df2 = df.ix[:, ['C', 'D']]
df3 = df.ix[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_append_missing_column_proper_upcast(self):
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8')})
df2 = DataFrame({'B': np.array([True, False, True, False],
dtype=bool)})
appended = df1.append(df2, ignore_index=True)
self.assertEqual(appended['A'].dtype, 'f8')
self.assertEqual(appended['B'].dtype, 'O')
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0,10,size=4).reshape(4,1))
df3 = DataFrame({5 : 'foo'},index=range(4))
# these are actual copies
result = concat([df,df2,df3],axis=1,copy=True)
for b in result._data.blocks:
self.assertIsNone(b.values.base)
# these are the same
result = concat([df,df2,df3],axis=1,copy=False)
for b in result._data.blocks:
if b.is_float:
self.assertTrue(b.values.base is df._data.blocks[0].values.base)
elif b.is_integer:
self.assertTrue(b.values.base is df2._data.blocks[0].values.base)
elif b.is_object:
self.assertIsNotNone(b.values.base)
# float block was consolidated
df4 = DataFrame(np.random.randn(4,1))
result = concat([df,df2,df3,df4],axis=1,copy=False)
for b in result._data.blocks:
if b.is_float:
self.assertIsNone(b.values.base)
elif b.is_integer:
self.assertTrue(b.values.base is df2._data.blocks[0].values.base)
elif b.is_object:
self.assertIsNotNone(b.values.base)
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 0, 1, 2, 3]])
expected = DataFrame(np.r_[df.values, df2.values],
index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values],
index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values],
columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values],
columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.ix[:, [0, 1]], df.ix[:, [2]], df.ix[:, [3]]]
level = ['three', 'two', 'one', 'zero']
result = concat(pieces, axis=1, keys=['one', 'two', 'three'],
levels=[level],
names=['group_key'])
self.assert_numpy_array_equal(result.columns.levels[0], level)
self.assertEqual(result.columns.names[0], 'group_key')
def test_concat_dataframe_keys_bug(self):
t1 = DataFrame({'value': Series([1, 2, 3],
index=Index(['a', 'b', 'c'], name='id'))})
t2 = DataFrame({'value': Series([7, 8],
index=Index(['a', 'b'], name='id'))})
# it works
result = concat([t1, t2], axis=1, keys=['t1', 't2'])
self.assertEqual(list(result.columns), [('t1', 'value'),
('t2', 'value')])
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1,2], name='foo')
bar = Series([1,2])
baz = Series([4,5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame({'foo' : [1,2], 0 : [1,2], 1 : [4,5]}, columns=['foo',0,1])
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=['red','blue','yellow'])
expected = DataFrame({'red' : [1,2], 'blue' : [1,2], 'yellow' : [4,5]}, columns=['red','blue','yellow'])
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0 : [1,2], 1 : [1,2], 2 : [4,5]})
tm.assert_frame_equal(result, expected)
def test_concat_dict(self):
frames = {'foo': DataFrame(np.random.randn(4, 3)),
'bar': DataFrame(np.random.randn(4, 3)),
'baz': DataFrame(np.random.randn(4, 3)),
'qux': DataFrame(np.random.randn(4, 3))}
sorted_keys = sorted(frames)
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys,
axis=1)
tm.assert_frame_equal(result, expected)
keys = ['baz', 'foo', 'bar']
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self):
frame1 = DataFrame({"test1": ["a", "b", "c"],
"test2": [1, 2, 3],
"test3": [4.5, 3.2, 1.2]})
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True)
nan = np.nan
expected = DataFrame([[nan, nan, nan, 4.3],
['a', 1, 4.5, 5.2],
['b', 2, 3.2, 2.2],
['c', 3, 1.2, nan]],
index=Index(["q", "x", "y", "z"]))
tm.assert_frame_equal(v1, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
result = concat([frame, frame], keys=[0, 1], names=['iteration'])
self.assertEqual(result.index.names, ('iteration',) + index.names)
tm.assert_frame_equal(result.ix[0], frame)
tm.assert_frame_equal(result.ix[1], frame)
self.assertEqual(result.index.nlevels, 3)
def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame({'dt': [datetime(2014, 1, 1),
datetime(2014, 1, 2),
datetime(2014, 1, 3)],
'b': ['A', 'B', 'C'],
'c': [1, 2, 3], 'd': [4, 5, 6]})
df['dt'] = df['dt'].apply(lambda d: Timestamp(d, tz='US/Pacific'))
df = df.set_index(['dt', 'b'])
exp_idx1 = DatetimeIndex(['2014-01-01', '2014-01-02', '2014-01-03'] * 2,
tz='US/Pacific', name='dt')
exp_idx2 = Index(['A', 'B', 'C'] * 2, name='b')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'c': [1, 2, 3] * 2, 'd': [4, 5, 6] * 2},
index=exp_idx, columns=['c', 'd'])
result = concat([df, df])
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [['foo', 'baz'], ['one', 'two']]
names = ['first', 'second']
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels,
names=names)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(levels=levels + [[0]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1],
[0, 0, 0, 0]],
names=names + [None])
expected.index = exp_index
assert_frame_equal(result, expected)
# no names
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels)
self.assertEqual(result.index.names, (None,) * 3)
# no levels
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second') + (None,))
self.assert_numpy_array_equal(result.index.levels[0], ['baz', 'foo'])
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
self.assertRaises(ValueError, concat, [df, df],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
self.assertRaises(ValueError, concat, [df, df2],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
def test_concat_rename_index(self):
a = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_a'))
b = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_b'))
result = concat([a, b], keys=['key0', 'key1'],
names=['lvl0', 'lvl1'])
exp = concat([a, b], keys=['key0', 'key1'], names=['lvl0'])
names = list(exp.index.names)
names[1] = 'lvl1'
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
self.assertEqual(result.index.names, exp.index.names)
def test_crossed_dtypes_weird_corner(self):
columns = ['A', 'B', 'C', 'D']
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='f8'),
'B': np.array([1, 2, 3, 4], dtype='i8'),
'C': np.array([1, 2, 3, 4], dtype='f8'),
'D': np.array([1, 2, 3, 4], dtype='i8')},
columns=columns)
df2 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8'),
'B': np.array([1, 2, 3, 4], dtype='f8'),
'C': np.array([1, 2, 3, 4], dtype='i8'),
'D': np.array([1, 2, 3, 4], dtype='f8')},
columns=columns)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate([df1.values, df2.values], axis=0),
columns=columns)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
result = concat(
[df, df2], keys=['one', 'two'], names=['first', 'second'])
self.assertEqual(result.index.names, ('first', 'second'))
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(np.random.randint(0,10,size=40).reshape(10,4),columns=['A','A','C','C'])
result = concat([df,df],axis=1)
assert_frame_equal(result.iloc[:,:4],df)
assert_frame_equal(result.iloc[:,4:],df)
result = concat([df,df],axis=0)
assert_frame_equal(result.iloc[:10],df)
assert_frame_equal(result.iloc[10:],df)
# multi dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
result = concat([df,df],axis=1)
assert_frame_equal(result.iloc[:,:6],df)
assert_frame_equal(result.iloc[:,6:],df)
result = concat([df,df],axis=0)
assert_frame_equal(result.iloc[:10],df)
assert_frame_equal(result.iloc[10:],df)
# append
result = df.iloc[0:8,:].append(df.iloc[8:])
assert_frame_equal(result, df)
result = df.iloc[0:8,:].append(df.iloc[8:9]).append(df.iloc[9:10])
assert_frame_equal(result, df)
expected = concat([df,df],axis=0)
result = df.append(df)
assert_frame_equal(result, expected)
def test_with_mixed_tuples(self):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({ u'A' : 'foo', (u'B',1) : 'bar' },index=range(2))
df2 = DataFrame({ u'B' : 'foo', (u'B',1) : 'bar' },index=range(2))
result = concat([df1,df2])
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
expected = concat([df,df],axis=1)
result = df.join(df,rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4,2), columns=["x", "y"])
x = DataFrame(np.random.randn(4,2), columns=["x", "y"])
y = DataFrame(np.random.randn(4,2), columns=["x", "y"])
z = DataFrame(np.random.randn(4,2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x,y,z,w],axis=1)
expected.columns=['x_x','y_x','x_y','y_y','x_x','y_x','x_y','y_y']
assert_frame_equal(dta,expected)
def test_handle_empty_objects(self):
df = DataFrame(np.random.randn(10, 4), columns=list('abcd'))
baz = df[:5].copy()
baz['foo'] = 'bar'
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0)
expected = df.ix[:, ['a', 'b', 'c', 'd', 'foo']]
expected['foo'] = expected['foo'].astype('O')
expected.loc[0:4,'foo'] = 'bar'
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(dict(A = range(10000)),index=date_range('20130101',periods=10000,freq='s'))
empty = DataFrame()
result = concat([df,empty],axis=1)
assert_frame_equal(result, df)
result = concat([empty,df],axis=1)
assert_frame_equal(result, df)
result = concat([df,empty])
assert_frame_equal(result, df)
result = concat([empty,df])
assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index=date_range('01-Jan-2013', periods=10, freq='H')
arr = np.arange(10, dtype='int64')
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1,1), index=index)
expected = DataFrame(np.repeat(arr,2).reshape(-1,2), index=index, columns = [0, 0])
result = concat([df,df], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr,2).reshape(-1,2), index=index, columns = [0, 1])
result = concat([s1,s2], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr,3).reshape(-1,3), index=index, columns = [0, 1, 2])
result = concat([s1,s2,s1], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr,5).reshape(-1,5), index=index, columns = [0, 0, 1, 2, 3])
result = concat([s1,df,s2,s2,s1], axis=1)
assert_frame_equal(result, expected)
# with names
s1.name = 'foo'
expected = DataFrame(np.repeat(arr,3).reshape(-1,3), index=index, columns = ['foo', 0, 0])
result = concat([s1,df,s2], axis=1)
assert_frame_equal(result, expected)
s2.name = 'bar'
expected = DataFrame(np.repeat(arr,3).reshape(-1,3), index=index, columns = ['foo', 0, 'bar'])
result = concat([s1,df,s2], axis=1)
assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(np.repeat(arr,3).reshape(-1,3), index=index, columns = [0, 1, 2])
result = concat([s1,df,s2], axis=1, ignore_index=True)
assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(np.tile(arr,3).reshape(-1,1), index=index.tolist() * 3, columns = [0])
result = concat([s1,df,s2])
assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr,3).reshape(-1,1), columns = [0])
result = concat([s1,df,s2], ignore_index=True)
assert_frame_equal(result, expected)
# invalid concatente of mixed dims
panel = tm.makePanel()
self.assertRaises(ValueError, lambda : concat([panel,s1],axis=1))
def test_panel_join(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[:2, :10, :3]
p2 = panel.ix[2:, 5:, 2:]
# left join
result = p1.join(p2)
expected = p1.copy()
expected['ItemC'] = p2['ItemC']
tm.assert_panel_equal(result, expected)
# right join
result = p1.join(p2, how='right')
expected = p2.copy()
expected['ItemA'] = p1['ItemA']
expected['ItemB'] = p1['ItemB']
expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])
tm.assert_panel_equal(result, expected)
# inner join
result = p1.join(p2, how='inner')
expected = panel.ix[:, 5:10, 2:3]
tm.assert_panel_equal(result, expected)
# outer join
result = p1.join(p2, how='outer')
expected = p1.reindex(major=panel.major_axis,
minor=panel.minor_axis)
expected = expected.join(p2.reindex(major=panel.major_axis,
minor=panel.minor_axis))
tm.assert_panel_equal(result, expected)
def test_panel_join_overlap(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[['ItemA', 'ItemB', 'ItemC']]
p2 = panel.ix[['ItemB', 'ItemC']]
# Expected index is
#
# ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2
joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')
p1_suf = p1.ix[['ItemB', 'ItemC']].add_suffix('_p1')
p2_suf = p2.ix[['ItemB', 'ItemC']].add_suffix('_p2')
no_overlap = panel.ix[['ItemA']]
expected = no_overlap.join(p1_suf.join(p2_suf))
tm.assert_panel_equal(joined, expected)
def test_panel_join_many(self):
tm.K = 10
panel = tm.makePanel()
tm.K = 4
panels = [panel.ix[:2], panel.ix[2:6], panel.ix[6:]]
joined = panels[0].join(panels[1:])
tm.assert_panel_equal(joined, panel)
panels = [panel.ix[:2, :-5], panel.ix[2:6, 2:], panel.ix[6:, 5:-7]]
data_dict = {}
for p in panels:
data_dict.update(compat.iteritems(p))
joined = panels[0].join(panels[1:], how='inner')
expected = Panel.from_dict(data_dict, intersect=True)
tm.assert_panel_equal(joined, expected)
joined = panels[0].join(panels[1:], how='outer')
expected = Panel.from_dict(data_dict, intersect=False)
tm.assert_panel_equal(joined, expected)
# edge cases
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='outer', lsuffix='foo', rsuffix='bar')
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='right')
def test_panel_concat_other_axes(self):
panel = tm.makePanel()
p1 = panel.ix[:, :5, :]
p2 = panel.ix[:, 5:, :]
result = concat([p1, p2], axis=1)
tm.assert_panel_equal(result, panel)
p1 = panel.ix[:, :, :2]
p2 = panel.ix[:, :, 2:]
result = concat([p1, p2], axis=2)
tm.assert_panel_equal(result, panel)
# if things are a bit misbehaved
p1 = panel.ix[:2, :, :2]
p2 = panel.ix[:, :, 2:]
p1['ItemC'] = 'baz'
result = concat([p1, p2], axis=2)
expected = panel.copy()
expected['ItemC'] = expected['ItemC'].astype('O')
expected.ix['ItemC', :, :2] = 'baz'
tm.assert_panel_equal(result, expected)
def test_panel_concat_buglet(self):
# #2257
def make_panel():
index = 5
cols = 3
def df():
return DataFrame(np.random.randn(index, cols),
index=["I%s" % i for i in range(index)],
columns=["C%s" % i for i in range(cols)])
return Panel(dict([("Item%s" % x, df()) for x in ['A', 'B', 'C']]))
panel1 = make_panel()
panel2 = make_panel()
panel2 = panel2.rename_axis(dict([(x, "%s_1" % x)
for x in panel2.major_axis]),
axis=1)
panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)
panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)
# it works!
concat([panel1, panel3], axis=1, verify_integrity=True)
def test_panel4d_concat(self):
p4d = tm.makePanel4D()
p1 = p4d.ix[:, :, :5, :]
p2 = p4d.ix[:, :, 5:, :]
result = concat([p1, p2], axis=2)
tm.assert_panel4d_equal(result, p4d)
p1 = p4d.ix[:, :, :, :2]
p2 = p4d.ix[:, :, :, 2:]
result = concat([p1, p2], axis=3)
tm.assert_panel4d_equal(result, p4d)
def test_panel4d_concat_mixed_type(self):
p4d = tm.makePanel4D()
# if things are a bit misbehaved
p1 = p4d.ix[:, :2, :, :2]
p2 = p4d.ix[:, :, :, 2:]
p1['L5'] = 'baz'
result = concat([p1, p2], axis=3)
p2['L5'] = np.nan
expected = concat([p1, p2], axis=3)
expected = expected.ix[result.labels]
tm.assert_panel4d_equal(result, expected)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = 'foo'
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
self.assertEqual(result.name, ts.name)
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype='M8[ns]'))
exp_labels = [np.repeat([0, 1, 2], [len(x) for x in pieces]),
np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index],
labels=exp_labels)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
assert_frame_equal(result, expected)
result = concat(pieces, keys=['A', 'B', 'C'], axis=1)
expected = DataFrame(pieces, index=['A', 'B', 'C']).T
assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name='A')
s2 = Series(randn(5), name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
self.assertTrue(np.array_equal(result.columns, Index(['A', 0], dtype='object')))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=['foo'])
expected = concat([df, df], keys=['foo', 'bar'])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
self.assertRaises(ValueError, concat, [None, None])
def test_concat_datetime64_block(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=10)
df = DataFrame({'time': rng})
result = concat([df, df])
self.assertTrue((result.iloc[:10]['time'] == rng).all())
self.assertTrue((result.iloc[10:]['time'] == rng).all())
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10),unit='s')
df = DataFrame({'time': rng})
result = concat([df, df])
self.assertTrue((result.iloc[:10]['time'] == rng).all())
self.assertTrue((result.iloc[10:]['time'] == rng).all())
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat([None, df0, df0[:2], df0[:1], df0],
keys=['a', 'b', 'c', 'd', 'e'])
expected = concat([df0, df0[:2], df0[:1], df0],
keys=['b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
## to join with union
## these two are of different length!
left = concat([ts1, ts2], join='outer', axis=1)
right = concat([ts2, ts1], join='outer', axis=1)
self.assertEqual(len(left), len(right))
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = 'same name'
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns=['same name', 'same name']
assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame({'firmNo' : [0,0,0,0], 'stringvar' : ['rrr', 'rrr', 'rrr', 'rrr'], 'prc' : [6,6,6,6] })
df2 = DataFrame({'misc' : [1,2,3,4], 'prc' : [6,6,6,6], 'C' : [9,10,11,12]})
expected = DataFrame([[0,6,'rrr',9,1,6],
[0,6,'rrr',10,2,6],
[0,6,'rrr',11,3,6],
[0,6,'rrr',12,4,6]])
expected.columns = ['firmNo','prc','stringvar','C','misc','prc']
result = concat([df1,df2],axis=1)
assert_frame_equal(result,expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
s1 = Series(randn(len(dates)), index=dates, name='value')
s2 = Series(randn(len(dates)), index=dates, name='value')
result = concat([s1, s2], axis=1, ignore_index=True)
self.assertTrue(np.array_equal(result.columns, [0, 1]))
def test_concat_iterables(self):
from collections import deque, Iterable
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
assert_frame_equal(concat((df for df in (df1, df2)), ignore_index=True), expected)
assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1(object):
def __len__(self):
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError:
raise IndexError
assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(Iterable):
def __iter__(self):
yield df1
yield df2
assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = mkdf(10, 2)
for obj in [1, dict(), [1, 2], (1, 2) ]:
self.assertRaises(TypeError, lambda x: concat([ df1, obj ]))
def test_concat_invalid_first_argument(self):
df1 = mkdf(10, 2)
df2 = mkdf(10, 2)
self.assertRaises(TypeError, concat, df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5,5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
assert_frame_equal(result,expected)
class TestOrderedMerge(tm.TestCase):
def setUp(self):
self.left = DataFrame({'key': ['a', 'c', 'e'],
'lvalue': [1, 2., 3]})
self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],
'rvalue': [1, 2, 3., 4]})
# GH #813
def test_basic(self):
result = ordered_merge(self.left, self.right, on='key')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1, nan, 2, nan, 3, nan],
'rvalue': [nan, 1, 2, 3, nan, 4]})
assert_frame_equal(result, expected)
def test_ffill(self):
result = ordered_merge(
self.left, self.right, on='key', fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1., 1, 2, 2, 3, 3.],
'rvalue': [nan, 1, 2, 3, 3, 4]})
assert_frame_equal(result, expected)
def test_multigroup(self):
left = concat([self.left, self.left], ignore_index=True)
# right = concat([self.right, self.right], ignore_index=True)
left['group'] = ['a'] * 3 + ['b'] * 3
# right['group'] = ['a'] * 4 + ['b'] * 4
result = ordered_merge(left, self.right, on='key', left_by='group',
fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,
'lvalue': [1., 1, 2, 2, 3, 3.] * 2,
'rvalue': [nan, 1, 2, 3, 3, 4] * 2})
expected['group'] = ['a'] * 6 + ['b'] * 6
assert_frame_equal(result, expected.ix[:, result.columns])
result2 = ordered_merge(self.right, left, on='key', right_by='group',
fill_method='ffill')
assert_frame_equal(result, result2.ix[:, result.columns])
result = ordered_merge(left, self.right, on='key', left_by='group')
self.assertTrue(result['group'].notnull().all())
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on='key')
tm.assertIsInstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat)
]
for df_seq, pattern in test_cases:
assertRaisesRegexp(ValueError, pattern, pd.concat, df_seq)
pd.concat([pd.DataFrame()])
pd.concat([None, pd.DataFrame()])
pd.concat([pd.DataFrame(), None])
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| artistic-2.0 |
wanderknight/tushare | tushare/datayes/HKequity.py | 17 | 1493 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class HKequity():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def HKEqu(self, listStatusCD='', secID='', ticker='', field=''):
"""
获取香港交易所上市股票的基本信息,包含股票交易代码及其简称、股票类型、上市状态、上市板块、上市日期等;上市状态为最新状态。
"""
code, result = self.client.getData(vs.HKEQU%(listStatusCD, secID, ticker, field))
return _ret_data(code, result)
def HKEquCA(self, secID='', ticker='', eventTypeCD='', field=''):
"""
获取香港交易所上市公司行为,包含有首发、现金增资、分红、拆细等。
"""
code, result = self.client.getData(vs.HKEQUCA%(secID, ticker, eventTypeCD, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
pgroth/independence-indicators | Temporal-Coauthor-Networks/vincent/tests/test_vega.py | 2 | 32371 | # -*- coding: utf-8 -*-
'''
Test Vincent.vega
-----------------
'''
from datetime import datetime, timedelta
from itertools import product
import time
import json
from vincent.core import (grammar, GrammarClass, GrammarDict, KeyedList,
LoadError, ValidationError)
from vincent.visualization import Visualization
from vincent.data import Data
from vincent.transforms import Transform
from vincent.properties import PropertySet
from vincent.scales import DataRef, Scale
from vincent.marks import ValueRef, MarkProperties, MarkRef, Mark
from vincent.axes import AxisProperties, Axis
from vincent.legends import LegendProperties, Legend
import nose.tools as nt
import pandas as pd
import numpy as np
sequences = {
'int': range,
'float': lambda l: list(map(float, list(range(l)))),
'char': lambda l: list(map(chr, list(range(97, 97 + l)))),
'datetime': lambda l: [datetime.now() + timedelta(days=i)
for i in range(l)],
'Timestamp': lambda l: pd.date_range('1/2/2000', periods=l),
'numpy float': lambda l: list(map(np.float32, list(range(l)))),
'numpy int': lambda l: list(map(np.int32, list(range(l))))}
def test_keyed_list():
"""Test keyed list implementation"""
class TestKey(object):
"""Test object for Keyed List"""
def __init__(self, name=None):
self.name = name
key_list = KeyedList(attr_name='name')
#Basic usage
test_key = TestKey(name='test')
key_list.append(test_key)
nt.assert_equal(test_key, key_list['test'])
#Bad key
with nt.assert_raises(KeyError) as err:
key_list['test_1']
nt.assert_equal(err.exception.args[0], ' "test_1" is an invalid key')
#Repeated keys
test_key_1 = TestKey(name='test')
key_list.append(test_key_1)
with nt.assert_raises(ValidationError) as err:
key_list['test']
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0], 'duplicate keys found')
#Setting keys
key_list.pop(-1)
test_key_2 = TestKey(name='test_2')
key_list['test_2'] = test_key_2
nt.assert_equal(key_list['test_2'], test_key_2)
mirror_key_2 = TestKey(name='test_2')
key_list['test_2'] = mirror_key_2
nt.assert_equal(key_list['test_2'], mirror_key_2)
key_list[0] = mirror_key_2
nt.assert_equal(key_list[0], mirror_key_2)
#Keysetting errors
test_key_3 = TestKey(name='test_3')
with nt.assert_raises(ValidationError) as err:
key_list['test_4'] = test_key_3
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0],
"key must be equal to 'name' attribute")
key_list = KeyedList(attr_name='type')
test_key_4 = TestKey(name='test_key_4')
with nt.assert_raises(ValidationError) as err:
key_list['test_key_4'] = test_key_4
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0], 'object must have type attribute')
def test_grammar():
"""Grammar decorator behaves correctly."""
validator_fail = False
class DummyType(object):
pass
class TestGrammarClass(object):
def __init__(self):
self.grammar = GrammarDict()
@grammar
def test_grammar(value):
if validator_fail:
raise ValueError('validator failed')
@grammar(grammar_type=DummyType)
def test_grammar_with_type(value):
if validator_fail:
raise ValueError('validator failed')
@grammar(grammar_name='a name')
def test_grammar_with_name(value):
if validator_fail:
raise ValueError('validator failed')
test = TestGrammarClass()
nt.assert_is_none(test.test_grammar)
nt.assert_dict_equal(test.grammar, {})
test.test_grammar = 'testing'
nt.assert_equal(test.test_grammar, 'testing')
nt.assert_dict_equal(test.grammar, {'test_grammar': 'testing'})
del test.test_grammar
nt.assert_is_none(test.test_grammar)
nt.assert_dict_equal(test.grammar, {})
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar', 'testing')
# grammar with type checking
test = TestGrammarClass()
validator_fail = False
dummy = DummyType()
test.test_grammar_with_type = dummy
nt.assert_equal(test.test_grammar_with_type, dummy)
nt.assert_dict_equal(test.grammar, {'test_grammar_with_type': dummy})
nt.assert_raises_regexp(ValueError, 'must be DummyType', setattr, test,
'test_grammar_with_type', 'testing')
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar_with_type', dummy)
# grammar with field name
test = TestGrammarClass()
validator_fail = False
test.test_grammar_with_name = 'testing'
nt.assert_equal(test.test_grammar_with_name, 'testing')
nt.assert_dict_equal(test.grammar, {'a name': 'testing'})
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar_with_name', 'testing')
def test_grammar_dict():
"""Test Vincent Grammar Dict"""
g_dict = GrammarDict()
test = Visualization()
test_dict = {'axes': [], 'data': [], 'marks': [],
'scales': [], 'legends': []}
test_str = ('{"axes": [], "data": [], "legends": [], '
'"marks": [], "scales": []}')
nt.assert_equal(test.grammar(), test_dict)
print(json.dumps(test.grammar, sort_keys=True))
nt.assert_equal(json.dumps(test.grammar, sort_keys=True),
test_str)
nt.assert_equal(g_dict.encoder(test), test.grammar)
def assert_grammar_typechecking(grammar_types, test_obj):
"""Assert that the grammar fields of a test object are correctly
type-checked.
`grammar_types` should be a list of (name, type) pairs, and `test_obj`
should be an instance of the object to test.
"""
class BadType(object):
pass
for name, objects in grammar_types:
for obj in objects:
tmp_obj = obj()
setattr(test_obj, name, tmp_obj)
nt.assert_equal(getattr(test_obj, name), tmp_obj)
bad_obj = BadType()
nt.assert_raises_regexp(ValueError, name + '.*' + obj.__name__,
setattr, test_obj, name, bad_obj)
nt.assert_equal(getattr(test_obj, name), tmp_obj)
def assert_manual_typechecking(bad_grammar, test_obj):
"""Some attrs use the _assert_is_type func for typechecking"""
for attr, value in bad_grammar:
with nt.assert_raises(ValueError) as err:
setattr(test_obj, attr, value)
nt.assert_equal(err.expected, ValueError)
def assert_grammar_validation(grammar_errors, test_obj):
"""Check grammar methods for validation errors"""
for attr, value, error, message in grammar_errors:
with nt.assert_raises(error) as err:
setattr(test_obj, attr, value)
nt.assert_equal(err.exception.args[0], message)
class TestGrammarClass(object):
"""Test GrammarClass's built-in methods that aren't tested elsewhere"""
def test_bad_init(self):
"""Test bad initialization"""
nt.assert_raises(ValueError, GrammarClass, width=50)
def test_validation(self):
"""Test validation of grammar"""
test = Visualization()
test.axes.append({'bad axes': 'ShouldRaiseError'})
with nt.assert_raises(ValidationError) as err:
test.validate()
nt.assert_equal(err.exception.args[0],
'invalid contents: axes[0] must be Axis')
class TestVisualization(object):
"""Test the Visualization Class"""
def test_grammar_typechecking(self):
"""Visualization fields are correctly type checked"""
grammar_types = [('name', [str]),
('width', [int]),
('height', [int]),
('data', [list, KeyedList]),
('scales', [list, KeyedList]),
('axes', [list, KeyedList]),
('marks', [list, KeyedList])]
assert_grammar_typechecking(grammar_types, Visualization())
def test_validation_checking(self):
"""Visualization fields are grammar-checked"""
grammar_errors = [('width', -1, ValueError,
'width cannot be negative'),
('height', -1, ValueError,
'height cannot be negative'),
('viewport', [1], ValueError,
'viewport must have 2 dimensions'),
('viewport', [-1, -1], ValueError,
'viewport dimensions cannot be negative'),
('padding', {'top': 2}, ValueError,
('Padding must have keys "top", "left", "right",'
' "bottom".')),
('padding',
{'top': 1, 'left': 1, 'right': 1, 'bottom': -1},
ValueError, 'Padding cannot be negative.'),
('padding', -1, ValueError,
'Padding cannot be negative.')]
assert_grammar_validation(grammar_errors, Visualization())
def test_manual_typecheck(self):
"""Test manual typechecking for elements like marks"""
test_attr = [('data', [1]), ('scales', [1]),
('axes', [1]), ('marks', [1]),
('legends', [1])]
assert_manual_typechecking(test_attr, Visualization())
def test_validation(self):
"""Test Visualization validation"""
test_obj = Visualization()
with nt.assert_raises(ValidationError) as err:
test_obj.validate()
nt.assert_equal(err.exception.args[0],
'data must be defined for valid visualization')
test_obj.data = [Data(name='test'), Data(name='test')]
with nt.assert_raises(ValidationError) as err:
test_obj.validate()
nt.assert_equal(err.exception.args[0],
'data has duplicate names')
def test_axis_labeling(self):
"""Test convenience method for axis label setting"""
#With Axes already in place
test_obj = Visualization()
test_obj.axes.extend([Axis(type='x'), Axis(type='y')])
test_obj.axis_titles(x="test1", y="test2")
nt.assert_equals(test_obj.axes['x'].title, 'test1')
nt.assert_equals(test_obj.axes['y'].title, 'test2')
#With no Axes already defined
del test_obj.axes[0]
del test_obj.axes[0]
test_obj.axis_titles(x="test1", y="test2")
nt.assert_equals(test_obj.axes['x'].title, 'test1')
nt.assert_equals(test_obj.axes['y'].title, 'test2')
def test_to_json(self):
"""Test JSON to string"""
pretty = '''{
"marks": [],
"axes": [],
"data": [],
"scales": [],
"legends": []
}'''
test = Visualization()
actual, tested = json.loads(pretty), json.loads(test.to_json())
nt.assert_dict_equal(actual, tested)
class TestData(object):
"""Test the Data class"""
def test_grammar_typechecking(self):
"""Data fields are correctly type-checked"""
grammar_types = [
('name', [str]),
('url', [str]),
('values', [list]),
('source', [str]),
('transform', [list])]
assert_grammar_typechecking(grammar_types, Data('name'))
def test_validate(self):
"""Test Data name validation"""
test_obj = Data()
del test_obj.name
nt.assert_raises(ValidationError, test_obj.validate)
def test_serialize(self):
"""Objects are serialized to JSON-compatible objects"""
def epoch(obj):
"""Convert to JS Epoch time"""
return int(time.mktime(obj.timetuple())) * 1000
types = [('test', str, 'test'),
(pd.Timestamp('2013-06-08'), int,
epoch(pd.Timestamp('2013-06-08'))),
(datetime.utcnow(), int, epoch(datetime.utcnow())),
(1, int, 1),
(1.0, float, 1.0),
(np.float32(1), float, 1.0),
(np.int32(1), int, 1),
(np.float64(1), float, 1.0),
(np.int64(1), int, 1)]
for puts, pytype, gets in types:
nt.assert_equal(Data.serialize(puts), gets)
class BadType(object):
"""Bad object for type warning"""
test_obj = BadType()
with nt.assert_raises(LoadError) as err:
Data.serialize(test_obj)
nt.assert_equals(err.exception.args[0],
'cannot serialize index of type BadType')
def test_pandas_series_loading(self):
"""Pandas Series objects are correctly loaded"""
# Test valid series types
name = ['_x', ' name']
length = [0, 1, 2]
index_key = [None, 'ix', 1]
index_types = ['int', 'char', 'datetime', 'Timestamp']
value_key = [None, 'x', 1]
value_types = ['int', 'char', 'datetime', 'Timestamp', 'float',
'numpy float', 'numpy int']
series_info = product(name, length, index_key, index_types,
value_key, value_types)
for n, l, ikey, itype, vkey, vtype in series_info:
index = sequences[itype](l)
series = pd.Series(sequences[vtype](l), index=index, name=n,)
vkey = series.name or vkey
expected = [{'idx': Data.serialize(i), 'col': vkey,
'val': Data.serialize(v)}
for i, v in zip(index, series)]
data = Data.from_pandas(series, name=n, series_key=vkey)
nt.assert_list_equal(expected, data.values)
nt.assert_equal(n, data.name)
data.to_json()
# Missing a name
series = pd.Series(np.random.randn(10))
data = Data.from_pandas(series)
nt.assert_equal(data.name, 'table')
def test_pandas_dataframe_loading(self):
"""Pandas DataFrame objects are correctly loaded"""
# name = ['_x']
# length = [0, 1, 2]
# index_key = [None, 'ix', 1]
# index_types = ['int', 'char', 'datetime', 'Timestamp']
# column_types = ['int', 'char', 'datetime', 'Timestamp']
# # Leaving out some basic types here because we're not worried about
# # serialization.
# value_types = [
# 'char', 'datetime', 'Timestamp', 'numpy float', 'numpy int']
# dataframe_info = product(
# name, length, length, index_key, index_types, column_types,
# value_types)
# for n, rows, cols, ikey, itype, ctype, vtype in dataframe_info:
# index = sequences[itype](rows)
# columns = sequences[ctype](cols)
# series = {
# c: pd.Series(sequences[vtype](rows), index=index, name=n)
# for c in columns}
# dataframe = pd.DataFrame(series)
# ikey = ikey or Data._default_index_key
# if cols == 0:
# expected = []
# else:
# expected = [
# dict([(ikey, Data.serialize(index[i]))] +
# [(str(c), Data.serialize(series[c][i]))
# for c in columns])
# for i in xrange(rows)]
# data = Data.from_pandas(dataframe, name=n, index_key=ikey)
# nt.assert_list_equal(expected, data.values)
# nt.assert_equal(n, data.name)
# data.to_json()
#Simple columns/key_on tests
df = pd.DataFrame({'one': [1, 2, 3], 'two': [6, 7, 8],
'three': [11, 12, 13], 'four': [17, 18, 19]})
get_all = [{'col': 'four', 'idx': 0, 'val': 17},
{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'three', 'idx': 0, 'val': 11},
{'col': 'two', 'idx': 0, 'val': 6},
{'col': 'four', 'idx': 1, 'val': 18},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'three', 'idx': 1, 'val': 12},
{'col': 'two', 'idx': 1, 'val': 7},
{'col': 'four', 'idx': 2, 'val': 19},
{'col': 'one', 'idx': 2, 'val': 3},
{'col': 'three', 'idx': 2, 'val': 13},
{'col': 'two', 'idx': 2, 'val': 8}]
get1 = [{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'one', 'idx': 2, 'val': 3}]
get2 = [{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'two', 'idx': 0, 'val': 6},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'two', 'idx': 1, 'val': 7},
{'col': 'one', 'idx': 2, 'val': 3},
{'col': 'two', 'idx': 2, 'val': 8}]
getkey2 = [{'col': 'one', 'idx': 6, 'val': 1},
{'col': 'one', 'idx': 7, 'val': 2},
{'col': 'one', 'idx': 8, 'val': 3}]
getkey3 = [{'col': 'one', 'idx': 11, 'val': 1},
{'col': 'two', 'idx': 11, 'val': 6},
{'col': 'one', 'idx': 12, 'val': 2},
{'col': 'two', 'idx': 12, 'val': 7},
{'col': 'one', 'idx': 13, 'val': 3},
{'col': 'two', 'idx': 13, 'val': 8}]
val_all = Data.from_pandas(df)
val1 = Data.from_pandas(df, columns=['one'])
val2 = Data.from_pandas(df, columns=['one', 'two'])
key2 = Data.from_pandas(df, columns=['one'], key_on='two')
key3 = Data.from_pandas(df, columns=['one', 'two'], key_on='three')
nt.assert_list_equal(val_all.values, get_all)
nt.assert_list_equal(val1.values, get1)
nt.assert_list_equal(val2.values, get2)
nt.assert_list_equal(key2.values, getkey2)
nt.assert_list_equal(key3.values, getkey3)
# Missing a name
dataframe = pd.DataFrame(np.random.randn(10, 3))
data = Data.from_pandas(dataframe)
nt.assert_equal(data.name, 'table')
#Bad obj
nt.assert_raises(ValueError, Data.from_pandas, {})
def test_numpy_loading(self):
"""Numpy ndarray objects are correctly loaded"""
test_data = np.random.randn(6, 3)
index = range(test_data.shape[0])
columns = ['a', 'b', 'c']
data = Data.from_numpy(test_data, name='name', columns=columns)
ikey = Data._default_index_key
expected_values = [
{ikey: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
nt.assert_equal('name', data.name)
index_key = 'akey'
data = Data.from_numpy(test_data, name='name', columns=columns,
index_key=index_key)
expected_values = [
{index_key: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
index = ['a', 'b', 'c', 'd', 'e', 'f']
data = Data.from_numpy(test_data, name='name', index=index,
columns=columns)
expected_values = [
{ikey: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
#Bad loads
with nt.assert_raises(LoadError) as err:
Data.from_numpy(test_data, 'test', columns, index=range(4))
nt.assert_equal(err.expected, LoadError)
columns = ['a', 'b']
with nt.assert_raises(LoadError) as err:
Data.from_numpy(test_data, 'test', columns, index)
nt.assert_equal(err.expected, LoadError)
def test_from_mult_iters(self):
"""Test set of iterables"""
test1 = Data.from_mult_iters(x=[0, 1, 2], y=[3, 4, 5], z=[7, 8, 9],
idx='x')
test2 = Data.from_mult_iters(fruit=['apples', 'oranges', 'grapes'],
count=[12, 16, 54], idx='fruit')
values1 = [{'col': 'y', 'idx': 0, 'val': 3},
{'col': 'y', 'idx': 1, 'val': 4},
{'col': 'y', 'idx': 2, 'val': 5},
{'col': 'z', 'idx': 0, 'val': 7},
{'col': 'z', 'idx': 1, 'val': 8},
{'col': 'z', 'idx': 2, 'val': 9}]
values2 = [{'col': 'count', 'idx': 'apples', 'val': 12},
{'col': 'count', 'idx': 'oranges', 'val': 16},
{'col': 'count', 'idx': 'grapes', 'val': 54}]
nt.assert_list_equal(test1.values, values1)
nt.assert_list_equal(test2.values, values2)
#Iter errors
nt.assert_raises(ValueError, Data.from_mult_iters, x=[0], y=[1, 2])
def test_from_iter(self):
"""Test data from single iterable"""
test_list = Data.from_iter([10, 20, 30])
test_dict = Data.from_iter({
'apples': 10, 'bananas': 20, 'oranges': 30})
get1 = [{'col': 'data', 'idx': 0, 'val': 10},
{'col': 'data', 'idx': 1, 'val': 20},
{'col': 'data', 'idx': 2, 'val': 30}]
get2 = [{'col': 'data', 'idx': 'apples', 'val': 10},
{'col': 'data', 'idx': 'bananas', 'val': 20},
{'col': 'data', 'idx': 'oranges', 'val': 30}]
nt.assert_list_equal(test_list.values, get1)
nt.assert_list_equal(test_dict.values, get2)
def test_serialize_error(self):
"""Test serialization error"""
class badType(object):
"""I am a bad actor"""
broken = badType()
nt.assert_raises(LoadError, Data.serialize, broken)
def test_keypairs(self):
Data.keypairs([0, 10, 20, 30, 40])
Data.keypairs(((0, 1), (0, 2), (0, 3)))
Data.keypairs({'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50})
class TestTransform(object):
"""Test the Transform class"""
def test_grammar_typechecking(self):
"""Transform field typechecking"""
grammar_types = [
('fields', [list]), ('from_', [str]),
('as_', [list]), ('keys', [list]), ('sort', [str]),
('test', [str]), ('field', [str]), ('expr', [str]),
('by', [str, list]), ('value', [str]), ('median', [bool]),
('with_', [str]), ('key', [str]), ('with_key', [str]),
('links', [str]), ('size', [list]), ('iterations', [int]),
('charge', [int, str]), ('link_distance', [int, str]),
('link_strength', [int, str]), ('friction', [int, float]),
('theta', [int, float]), ('gravity', [int, float]),
('alpha', [int, float]), ('point', [str]),
('height', [str])]
assert_grammar_typechecking(grammar_types, Transform())
class TestValueRef(object):
"""Test the ValueRef class"""
def test_grammar_typechecking(self):
"""ValueRef fields are correctly type-checked"""
grammar_types = [
('value', [str]),
('value', [int]),
('value', [float]),
('field', [str]),
('scale', [str]),
('mult', [int]),
('mult', [float]),
('offset', [int]),
('offset', [float]),
('band', [bool])]
assert_grammar_typechecking(grammar_types, ValueRef())
def test_json_serialization(self):
"""ValueRef JSON is correctly serialized"""
vref = ValueRef()
nt.assert_equal(json.dumps({}), vref.to_json(pretty_print=False))
props = {
'value': 'test-value',
'band': True}
vref = ValueRef(**props)
nt.assert_equal(json.dumps(props, sort_keys=True),
vref.to_json(pretty_print=False))
props = {
'value': 'test-value',
'field': 'test-field',
'scale': 'test-scale',
'mult': 1.2,
'offset': 4,
'band': True}
vref = ValueRef(**props)
nt.assert_equal(json.dumps(props, sort_keys=True),
vref.to_json(pretty_print=False))
class TestPropertySet(object):
"""Test the PropertySet Class"""
def test_grammar_typechecking(self):
"""PropertySet fields are correctly type-checked"""
# All fields must be ValueRef for Mark properties
fields = [
'x', 'x2', 'width', 'y', 'y2', 'height', 'opacity', 'fill',
'fill_opacity', 'stroke', 'stroke_width', 'stroke_opacity',
'size', 'shape', 'path', 'inner_radius', 'outer_radius',
'start_angle', 'end_angle', 'interpolate', 'tension', 'url',
'align', 'baseline', 'text', 'dx', 'dy', 'angle', 'font',
'font_size', 'font_weight', 'font_style']
grammar_types = [(f, [ValueRef]) for f in fields]
assert_grammar_typechecking(grammar_types, PropertySet())
def test_validation_checking(self):
"""ValueRef fields are grammar-checked"""
grammar_errors = [('fill_opacity', ValueRef(value=-1), ValueError,
'fill_opacity must be between 0 and 1'),
('fill_opacity', ValueRef(value=2), ValueError,
'fill_opacity must be between 0 and 1'),
('stroke_width', ValueRef(value=-1), ValueError,
'stroke width cannot be negative'),
('stroke_opacity', ValueRef(value=-1), ValueError,
'stroke_opacity must be between 0 and 1'),
('stroke_opacity', ValueRef(value=2), ValueError,
'stroke_opacity must be between 0 and 1'),
('size', ValueRef(value=-1), ValueError,
'size cannot be negative')]
assert_grammar_validation(grammar_errors, PropertySet())
bad_shape = ValueRef(value="BadShape")
nt.assert_raises(ValueError, PropertySet, shape=bad_shape)
def test_manual_typecheck(self):
"""Test manual typechecking for elements like marks"""
test_attr = [('fill', ValueRef(value=1)),
('fill_opacity', ValueRef(value='str')),
('stroke', ValueRef(value=1)),
('stroke_width', ValueRef(value='str')),
('stroke_opacity', ValueRef(value='str')),
('size', ValueRef(value='str')),
('shape', ValueRef(value=1)),
('path', ValueRef(value=1))]
assert_manual_typechecking(test_attr, PropertySet())
class TestMarkProperties(object):
"""Test the MarkProperty Class"""
def test_grammar_typechecking(self):
"""Test grammar of MarkProperty"""
fields = ['enter', 'exit', 'update', 'hover']
grammar_types = [(f, [PropertySet]) for f in fields]
assert_grammar_typechecking(grammar_types, MarkProperties())
class TestMarkRef(object):
"""Test the MarkRef Class"""
def test_grammar_typechecking(self):
"""Test grammar of MarkRef"""
grammar_types = [('data', [str]), ('transform', [list])]
assert_grammar_typechecking(grammar_types, MarkRef())
class TestMark(object):
"""Test Mark Class"""
def test_grammar_typechecking(self):
"""Test grammar of Mark"""
grammar_types = [('name', [str]), ('description', [str]),
('from_', [MarkRef]),
('properties', [MarkProperties]), ('key', [str]),
('key', [str]), ('delay', [ValueRef]),
('ease', [str]), ('marks', [list]),
('scales', [list, KeyedList])]
assert_grammar_typechecking(grammar_types, Mark())
def test_validation_checking(self):
"""Mark fields are grammar checked"""
nt.assert_raises(ValueError, Mark, type='panda')
class TestDataRef(object):
"""Test DataRef class"""
def test_grammar_typechecking(self):
"""Test grammar of DataRef"""
grammar_types = [('data', [str]), ('field', [str])]
assert_grammar_typechecking(grammar_types, DataRef())
class TestScale(object):
"""Test Scale class"""
def test_grammar_typechecking(self):
"""Test grammar of Scale"""
grammar_types = [('name', [str]), ('type', [str]),
('domain', [list, DataRef]),
('domain_min', [float, int, DataRef]),
('domain_max', [float, int, DataRef]),
('range', [list, str]),
('range_min', [float, int, DataRef]),
('range_max', [float, int, DataRef]),
('reverse', [bool]), ('round', [bool]),
('points', [bool]), ('clamp', [bool]),
('nice', [bool, str]),
('exponent', [float, int]),
('zero', [bool])]
assert_grammar_typechecking(grammar_types, Scale())
class TestAxisProperties(object):
"""Test AxisProperties Class"""
def test_grammar_typechecking(self):
"""Test grammar of AxisProperties"""
grammar_types = [('major_ticks', [PropertySet]),
('minor_ticks', [PropertySet]),
('labels', [PropertySet]),
('axis', [PropertySet])]
assert_grammar_typechecking(grammar_types, AxisProperties())
class TestAxis(object):
"""Test Axis Class"""
def test_grammar_typechecking(self):
"""Test grammar of Axis"""
grammar_types = [('title', [str]),
('title_offset', [int]),
('grid', [bool]),
('scale', [str]),
('orient', [str]), ('format', [str]),
('ticks', [int]), ('values', [list]),
('subdivide', [int, float]),
('tick_padding', [int]), ('tick_size', [int]),
('tick_size_major', [int]),
('tick_size_minor', [int]),
('tick_size_end', [int]),
('offset', [int]),
('properties', [AxisProperties])]
assert_grammar_typechecking(grammar_types, Axis())
def test_validation_checking(self):
"""Axis fields are grammar checked"""
nt.assert_raises(ValueError, Axis, type='panda')
class TestLegendProperties(object):
"""Test LegendProperties class"""
def test_grammar_typechecking(self):
"""Test grammar of LegendProperties"""
grammar_types = [('title', [ValueRef]),
('labels', [ValueRef]),
('symbols', [ValueRef]),
('gradient', [ValueRef]),
('legend', [ValueRef])]
assert_grammar_typechecking(grammar_types, LegendProperties())
class TestLegend(object):
"""Test Legend Class"""
def test_grammar_typechecking(self):
"""Test grammar of Legend"""
grammar_types = [('size', [str]),
('shape', [str]),
('fill', [str]),
('stroke', [str]),
('title', [str]),
('format', [str]),
('values', [list]),
('properties', [LegendProperties])]
assert_grammar_typechecking(grammar_types, Legend())
def test_validation_checking(self):
"""Legend fields are grammar checked"""
nt.assert_raises(ValueError, Legend, orient='center')
| gpl-2.0 |
chrisburr/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 18 | 3891 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying Non-negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
PatrickChrist/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
IBM/differential-privacy-library | diffprivlib/models/k_means.py | 1 | 10686 | # MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
K-means clustering algorithm satisfying differential privacy.
"""
import warnings
import numpy as np
import sklearn.cluster as sk_cluster
from sklearn.utils import check_array
from diffprivlib.accountant import BudgetAccountant
from diffprivlib.mechanisms import LaplaceBoundedDomain, GeometricFolded
from diffprivlib.utils import PrivacyLeakWarning, warn_unused_args
from diffprivlib.validation import check_bounds, clip_to_bounds
class KMeans(sk_cluster.KMeans):
r"""K-Means clustering with differential privacy.
Implements the DPLloyd approach presented in [SCL16]_, leveraging the :class:`sklearn.cluster.KMeans` class for full
integration with Scikit Learn.
Parameters
----------
epsilon : float, default: 1.0
Privacy parameter :math:`\epsilon`.
bounds: tuple, optional
Bounds of the data, provided as a tuple of the form (min, max). `min` and `max` can either be scalars, covering
the min/max of the entire data, or vectors with one entry per feature. If not provided, the bounds are computed
on the data when ``.fit()`` is first called, resulting in a :class:`.PrivacyLeakWarning`.
n_clusters : int, default: 8
The number of clusters to form as well as the number of centroids to generate.
accountant : BudgetAccountant, optional
Accountant to keep track of privacy budget.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers. If the algorithm stops before fully converging, these will not be consistent
with ``labels_``.
labels_ :
Labels of each point
inertia_ : float
Sum of squared distances of samples to their closest cluster center.
n_iter_ : int
Number of iterations run.
References
----------
.. [SCL16] Su, Dong, Jianneng Cao, Ninghui Li, Elisa Bertino, and Hongxia Jin. "Differentially private k-means
clustering." In Proceedings of the sixth ACM conference on data and application security and privacy, pp. 26-37.
ACM, 2016.
"""
def __init__(self, epsilon=1.0, bounds=None, n_clusters=8, accountant=None, **unused_args):
super().__init__(n_clusters=n_clusters)
self.epsilon = epsilon
self.bounds = bounds
self.accountant = BudgetAccountant.load_default(accountant)
warn_unused_args(unused_args)
self.cluster_centers_ = None
self.bounds_processed = None
self.labels_ = None
self.inertia_ = None
self.n_iter_ = None
self._n_threads = 1
def fit(self, X, y=None, sample_weight=None):
"""Computes k-means clustering with differential privacy.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Training instances to cluster.
y : Ignored
not used, present here for API consistency by convention.
sample_weight : ignored
Ignored by diffprivlib. Present for consistency with sklearn API.
Returns
-------
self : class
"""
self.accountant.check(self.epsilon, 0)
if sample_weight is not None:
warn_unused_args("sample_weight")
del y
X = check_array(X, accept_sparse=False, dtype=[np.float64, np.float32])
n_samples, n_dims = X.shape
if n_samples < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (n_samples, self.n_clusters))
iters = self._calc_iters(n_dims, n_samples)
if self.bounds is None:
warnings.warn("Bounds have not been specified and will be calculated on the data provided. This will "
"result in additional privacy leakage. To ensure differential privacy and no additional "
"privacy leakage, specify `bounds` for each dimension.", PrivacyLeakWarning)
self.bounds = (np.min(X, axis=0), np.max(X, axis=0))
self.bounds = check_bounds(self.bounds, n_dims, min_separation=1e-5)
X = clip_to_bounds(X, self.bounds)
centers = self._init_centers(n_dims)
labels = None
distances = None
# Run _update_centers first to ensure consistency of `labels` and `centers`, since convergence unlikely
for _ in range(-1, iters):
if labels is not None:
centers = self._update_centers(X, centers=centers, labels=labels, dims=n_dims, total_iters=iters)
distances, labels = self._distances_labels(X, centers)
self.cluster_centers_ = centers
self.labels_ = labels
self.inertia_ = distances[np.arange(len(labels)), labels].sum()
self.n_iter_ = iters
self.accountant.spend(self.epsilon, 0)
return self
def _init_centers(self, dims):
if self.bounds_processed is None:
bounds_processed = np.zeros(shape=(dims, 2))
for dim in range(dims):
lower = self.bounds[0][dim]
upper = self.bounds[1][dim]
bounds_processed[dim, :] = [upper - lower, lower]
self.bounds_processed = bounds_processed
cluster_proximity = np.min(self.bounds_processed[:, 0]) / 2.0
while cluster_proximity > 0:
centers = np.zeros(shape=(self.n_clusters, dims))
cluster, retry = 0, 0
while retry < 100:
if cluster >= self.n_clusters:
break
temp_center = np.random.random(dims) * (self.bounds_processed[:, 0] - 2 * cluster_proximity) + \
self.bounds_processed[:, 1] + cluster_proximity
if cluster == 0:
centers[0, :] = temp_center
cluster += 1
continue
min_distance = ((centers[:cluster, :] - temp_center) ** 2).sum(axis=1).min()
if np.sqrt(min_distance) >= 2 * cluster_proximity:
centers[cluster, :] = temp_center
cluster += 1
retry = 0
else:
retry += 1
if cluster >= self.n_clusters:
return centers
cluster_proximity /= 2.0
return None
def _distances_labels(self, X, centers):
distances = np.zeros((X.shape[0], self.n_clusters))
for cluster in range(self.n_clusters):
distances[:, cluster] = ((X - centers[cluster, :]) ** 2).sum(axis=1)
labels = np.argmin(distances, axis=1)
return distances, labels
def _update_centers(self, X, centers, labels, dims, total_iters):
"""Updates the centers of the KMeans algorithm for the current iteration, while satisfying differential
privacy.
Differential privacy is satisfied by adding (integer-valued, using :class:`.GeometricFolded`) random noise to
the count of nearest neighbours to the previous cluster centers, and adding (real-valued, using
:class:`.LaplaceBoundedDomain`) random noise to the sum of values per dimension.
"""
epsilon_0, epsilon_i = self._split_epsilon(dims, total_iters)
geometric_mech = GeometricFolded(epsilon=epsilon_0, sensitivity=1, lower=0.5, upper=float("inf"))
for cluster in range(self.n_clusters):
if cluster not in labels:
continue
cluster_count = sum(labels == cluster)
noisy_count = geometric_mech.randomise(cluster_count)
cluster_sum = np.sum(X[labels == cluster], axis=0)
noisy_sum = np.zeros_like(cluster_sum)
for i in range(dims):
laplace_mech = LaplaceBoundedDomain(epsilon=epsilon_i,
sensitivity=self.bounds[1][i] - self.bounds[0][i],
lower=noisy_count * self.bounds[0][i],
upper=noisy_count * self.bounds[1][i])
noisy_sum[i] = laplace_mech.randomise(cluster_sum[i])
centers[cluster, :] = noisy_sum / noisy_count
return centers
def _split_epsilon(self, dims, total_iters, rho=0.225):
"""Split epsilon between sum perturbation and count perturbation, as proposed by Su et al.
Parameters
----------
dims : int
Number of dimensions to split `epsilon` across.
total_iters : int
Total number of iterations to split `epsilon` across.
rho : float, default: 0.225
Coordinate normalisation factor.
Returns
-------
epsilon_0 : float
The epsilon value for satisfying differential privacy on the count of a cluster.
epsilon_i : float
The epsilon value for satisfying differential privacy on each dimension of the center of a cluster.
"""
epsilon_i = 1
epsilon_0 = np.cbrt(4 * dims * rho ** 2)
normaliser = self.epsilon / total_iters / (epsilon_i * dims + epsilon_0)
return epsilon_i * normaliser, epsilon_0 * normaliser
def _calc_iters(self, n_dims, n_samples, rho=0.225):
"""Calculate the number of iterations to allow for the KMeans algorithm."""
epsilon_m = np.sqrt(500 * (self.n_clusters ** 3) / (n_samples ** 2) *
(n_dims + np.cbrt(4 * n_dims * (rho ** 2))) ** 3)
iters = max(min(self.epsilon / epsilon_m, 7), 2)
return int(iters)
| mit |
neurodata/ndmg | setup.py | 1 | 2313 | #!/usr/bin/env python
"""
setup.py
~~~~~~~~
on package install:
- generates metadata
- installs json files for use in m2g_cloud
- installs `m2g` script keywords to the command line
- ensures python version
- installs m2g dependencies
Use `pip install .` to install the package.
Use `pip install -e .` to install the package in developer mode.
See our README for more details on package installation : https://github.com/neurodata/m2g/blob/staging/README.md
"""
from setuptools import setup, find_packages
from m2g import __version__
# initial setup
kwargs = {}
# add metadata
kwargs.update(
dict(
name="m2g",
version=__version__,
description="Neuro Data MRI to Graphs Pipeline",
author="Derek Pisner, Alex Loftus, Greg Kiar, Eric Bridgeford, and Will Gray Roncal",
author_email="[email protected], [email protected], [email protected], [email protected], [email protected]",
url="https://github.com/neurodata/m2g",
download_url="https://github.com/neurodata/m2g/tarball/" + __version__,
keywords=["connectome", "mri", "pipeline"],
classifiers=["Programming Language :: Python :: 3.6"],
)
)
# add utility info
kwargs.update(
dict(
packages=find_packages(),
package_data={"templates": ["*.json"]},
include_package_data=False, # only include the m2g_cloud template jsons
entry_points={
"console_scripts": [
"m2g=m2g.scripts.m2g_bids:main",
"m2g_dwi_pipeline=m2g.scripts.m2g_dwi_pipeline:main",
"m2g_cloud=m2g.scripts.m2g_cloud:main",
"m2g_bids=m2g.scripts.m2g_bids:main", # for backwards compatibility
]
},
python_requires=">=3.6",
)
)
# add requirements
kwargs.update(
dict(
install_requires=[
"nibabel",
"numpy",
"dipy>=1.0.0",
"scipy",
"boto3",
"awscli",
"matplotlib",
"nilearn",
"vtk",
"pyvtk",
"fury==0.5.1",
"requests",
"plotly",
"pybids>=0.9.0",
"scikit-image",
"networkx>=2.4",
"configparser>=3.7.4",
"pytest",
]
)
)
# run setup
setup(**kwargs)
| apache-2.0 |
dsm054/pandas | pandas/tests/frame/test_convert_to.py | 4 | 12747 | # -*- coding: utf-8 -*-
from datetime import datetime
import pytest
import pytz
import collections
from collections import OrderedDict, defaultdict
import numpy as np
from pandas import compat
from pandas.compat import long
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(TestData):
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
assert (test_data.to_dict(orient='records') ==
expected_records)
assert (test_data_mixed.to_dict(orient='records') ==
expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([tsmp, tsmp], name='B'),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([1, 2], name='B'),
}
tm.assert_dict_equal(test_data.to_dict(orient='series'),
expected_series)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_dict_equal(test_data.to_dict(orient='split'),
expected_split)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
expected_split_mixed)
def test_to_dict_index_not_unique_with_index_orient(self):
# GH22801
# Data loss when indexes are not unique. Raise ValueError.
df = DataFrame({'a': [1, 2], 'b': [0.5, 0.75]}, index=['A', 'A'])
pytest.raises(ValueError, df.to_dict, orient='index')
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
pytest.raises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
# convert_datetime64 defaults to None
expected = df.index.values[0]
result = df.to_records()['index'][0]
assert expected == result
# check for FutureWarning if convert_datetime64=False is passed
with tm.assert_produces_warning(FutureWarning):
expected = df.index.values[0]
result = df.to_records(convert_datetime64=False)['index'][0]
assert expected == result
# check for FutureWarning if convert_datetime64=True is passed
with tm.assert_produces_warning(FutureWarning):
expected = df.index[0]
result = df.to_records(convert_datetime64=True)['index'][0]
assert expected == result
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
assert 'bar' in r
assert 'one' not in r
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
compat.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <[email protected]>\n'
'To: <[email protected]>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all(x in frame for x in ['Type', 'Subject', 'From'])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
assert 'X' in rs.dtype.fields
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
assert 'index' in rs.dtype.fields
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
assert 'level_0' in rs.dtype.fields
def test_to_records_with_unicode_index(self):
# GH13172
# unicode_literals conflict with to_records
result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a')\
.to_records()
expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue #11879. to_records used to raise an exception when used
# with column names containing non-ascii characters in Python 2
result = DataFrame(data={u"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
# to be specified using dictionary instead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", u"accented_name_é"],
"formats": ['=i8', '=f8']}
)
tm.assert_almost_equal(result, expected)
def test_to_records_with_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('mapping', [
dict,
collections.defaultdict(list),
collections.OrderedDict])
def test_to_dict(self, mapping):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
# GH16122
recons_data = DataFrame(test_data).to_dict(into=mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l", mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s", mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp", mapping)
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r", mapping)
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': np.nan, 'B': '3'}]
assert isinstance(recons_data, list)
assert (len(recons_data) == 3)
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k2][k])
df = DataFrame(test_data)
df['duped'] = df[df.columns[0]]
recons_data = df.to_dict("i")
comp_data = test_data.copy()
comp_data['duped'] = comp_data[df.columns[0]]
for k, v in compat.iteritems(comp_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k2][k])
@pytest.mark.parametrize('mapping', [
list,
collections.defaultdict,
[]])
def test_to_dict_errors(self, mapping):
# GH16122
df = DataFrame(np.random.randn(3, 3))
with pytest.raises(TypeError):
df.to_dict(into=mapping)
def test_to_dict_not_unique_warning(self):
# GH16927: When converting to a dict, if a column has a non-unique name
# it will be dropped, throwing a warning.
df = DataFrame([[1, 2, 3]], columns=['a', 'a', 'b'])
with tm.assert_produces_warning(UserWarning):
df.to_dict()
@pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern'])
def test_to_records_datetimeindex_with_tz(self, tz):
# GH13937
dr = date_range('2016-01-01', periods=10,
freq='S', tz=tz)
df = DataFrame({'datetime': dr}, index=dr)
expected = df.to_records()
result = df.tz_convert("UTC").to_records()
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
def test_to_dict_box_scalars(self):
# 14216
# make sure that we are boxing properly
d = {'a': [1], 'b': ['b']}
result = DataFrame(d).to_dict()
assert isinstance(list(result['a'])[0], (int, long))
assert isinstance(list(result['b'])[0], (int, long))
result = DataFrame(d).to_dict(orient='records')
assert isinstance(result[0]['a'], (int, long))
def test_frame_to_dict_tz(self):
# GH18372 When converting to dict with orient='records' columns of
# datetime that are tz-aware were not converted to required arrays
data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
(datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)]
df = DataFrame(list(data), columns=["d", ])
result = df.to_dict(orient='records')
expected = [
{'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)},
{'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)},
]
tm.assert_dict_equal(result[0], expected[0])
tm.assert_dict_equal(result[1], expected[1])
@pytest.mark.parametrize('into, expected', [
(dict, {0: {'int_col': 1, 'float_col': 1.0},
1: {'int_col': 2, 'float_col': 2.0},
2: {'int_col': 3, 'float_col': 3.0}}),
(OrderedDict, OrderedDict([(0, {'int_col': 1, 'float_col': 1.0}),
(1, {'int_col': 2, 'float_col': 2.0}),
(2, {'int_col': 3, 'float_col': 3.0})])),
(defaultdict(list), defaultdict(list,
{0: {'int_col': 1, 'float_col': 1.0},
1: {'int_col': 2, 'float_col': 2.0},
2: {'int_col': 3, 'float_col': 3.0}}))
])
def test_to_dict_index_dtypes(self, into, expected):
# GH 18580
# When using to_dict(orient='index') on a dataframe with int
# and float columns only the int columns were cast to float
df = DataFrame({'int_col': [1, 2, 3],
'float_col': [1.0, 2.0, 3.0]})
result = df.to_dict(orient='index', into=into)
cols = ['int_col', 'float_col']
result = DataFrame.from_dict(result, orient='index')[cols]
expected = DataFrame.from_dict(expected, orient='index')[cols]
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
elenita1221/BDA_py_demos | demos_ch2/demo2_2.py | 19 | 3023 | """Bayesian data analysis, 3rd ed
Chapter 2, demo 2
Illustrate the effect of a prior. Comparison of posterior distributions with
different parameter values for Beta prior distribution.
"""
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Grid
x = np.linspace(0.36, 0.54, 150)
# Posterior with data (437,543) and uniform prior Beta(1,1)
au = 438
bu = 544
# Calculate densities
pdu = beta.pdf(x, au, bu)
# Compare 3 cases
# Arrays of different priors: Beta(0.485*n,(1-0.485)*n), for n = 2, 20, 200
ap = np.array([0.485 * (2*10**i) for i in range(3)])
bp = np.array([(1-0.485) * (2*10**i) for i in range(3)])
# Corresponding posteriors with data (437,543)
ai = 437 + ap
bi = 543 + bp
# Calculate prior and posterior densities
pdp = beta.pdf(x, ap[:,np.newaxis], bp[:,np.newaxis])
pdi = beta.pdf(x, ai[:,np.newaxis], bi[:,np.newaxis])
"""
The above two expressions uses numpy broadcasting inside the `beta.pdf`
function. Arrays `ap` and `bp` have shape (3,) i.e. they are 1d arrays of
length 3. Array `x` has shape (150,) and the output `pdp` is an array of shape
(3,150).
Instead of using the `beta.pdf` function, we could have also calculated other
arithmetics. For example `out = x + (ap * bp)[:,np.newaxis]` returns an array
of shape (3,150), where each element `out[i,j] = x[j] + ap[i] * bp[i]`.
With broadcasting, unnecessary repetition is avoided, i.e. it is not necessary
to create an array of `ap` repeated 150 times into the memory. More info can be
found on the numpy documentation. Compare to `bsxfun` in Matlab.
"""
# Plot 3 subplots
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True, sharey=True,
figsize=(8, 12))
# Leave space for the legend on bottom and remove some space from the top
fig.subplots_adjust(bottom=0.2, top=0.94)
for i in range(3):
# Add vertical line
known = axes[i].axvline(0.485, color='#4daf4a', linewidth=1.5, alpha=0.5)
# Plot three precalculated densities
post1, = axes[i].plot(x, pdu, color='#ff8f20', linewidth=2.5)
prior, = axes[i].plot(x, pdp[i], 'k:', linewidth=1.5)
post2, = axes[i].plot(x, pdi[i], 'k--', linewidth=1.5)
plt.yticks(())
# Set the title for this subplot
axes[i].set_title(r'$\alpha/(\alpha+\beta) = 0.485,\quad \alpha+\beta = {}$'
.format(2*10**i), fontsize=18)
# Limit xaxis
axes[0].autoscale(axis='x', tight=True)
axes[0].set_ylim((0,30))
# Add legend to the last subplot
axes[-1].legend(
(post1, prior, post2, known),
( 'posterior with uniform prior',
'informative prior',
'posterior with informative prior',
r'known $\theta=0.485$ in general population'),
loc='upper center',
bbox_to_anchor=(0.5, -0.15)
)
# Display the figure
plt.show()
| gpl-3.0 |
williamdlees/TRIgS | PlotGermline.py | 2 | 14518 | # Copyright (c) 2015 William Lees
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Read an IgBlastPlus file and plot germline usage
__author__ = 'William Lees'
__docformat__ = "restructuredtext en"
import os
import sys
import argparse
import csv
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import itertools
def main(argv):
parser = argparse.ArgumentParser(description='Read an IgBlastPlus file and plot germline usage.')
parser.add_argument('infiles', help='input files, names separated by commas. (IgBLASTPlus format, nt or aa)')
parser.add_argument('field', help='input field (e.g. "V-GENE and allele"')
parser.add_argument('detail', help='F (family), G (germline) or A (allele)')
parser.add_argument('-a', '--alpha_sort', help='sort columns alphabetically (default is by decreasing size)', action='store_true')
parser.add_argument('-b', '--barcolour', help='colour or list of colours for bars')
parser.add_argument('-c', '--cols', help='Number of columns for plot')
parser.add_argument('-co', '--cons', help='Consolidate plots into a single chart or table', action='store_true')
parser.add_argument('-d', '--dupheader', help='Prefix for duplicate count, eg "DUPCOUNT=" for Presto')
parser.add_argument('-f', '--frequency', help='Express chart in terms of frequency rather than number of reads', action='store_true')
parser.add_argument('-g', '--gradientfill', help='fill bars with a gradiented colour', action='store_true')
parser.add_argument('-gh', '--grid_horizontal', help='horizontal grid lines', action='store_true')
parser.add_argument('-gv', '--grid_vertical', help='vertical grid lines every n bars')
parser.add_argument('-l', '--limit', help='limit to at most this many most frequent categories')
parser.add_argument('-s', '--save', help='Save output to file (as opposed to interactive display)')
parser.add_argument('-sz', '--size', help='Figure size (x,y)')
parser.add_argument('-t', '--titles', help='titles for each plot, separated by commas')
parser.add_argument('-w', '--width', help='relative bar width (number between 0 and 1)')
parser.add_argument('-y', '--ymax', help='Max y-value to use on all charts')
args = parser.parse_args()
infiles = args.infiles.split(',')
if args.detail not in ['F', 'G', 'A']:
print 'Error: detail must be one of F, G, A'
exit()
detail = args.detail
field = args.field
consolidate = args.cons
limit = int(args.limit) if args.limit else None
ncols = int(args.cols) if args.cols else 1
frequency = args.frequency
ymax = float(args.ymax) if args.ymax else None
outfile = args.save if args.save else None
titles = args.titles.split(',') if args.titles else infiles
bar_width = float(args.width) if args.width else 1.0
mapcolour = args.barcolour if args.barcolour else 'blue'
mapcolour = mapcolour.split(',')
grid_vertical = int(args.grid_vertical) if args.grid_vertical else False
nrows = len(infiles) / ncols
if len(infiles) % ncols != 0:
nrows += 1
(sizex, sizey) = args.size.split(',') if args.size else (8*ncols,4*nrows)
alpha_sort = args.alpha_sort
dupheader = args.dupheader
stats = []
found_records = False
for infile in infiles:
(heights, legends) = determine_stats(alpha_sort, detail, dupheader, field, frequency, infile, limit)
if len(heights) > 0:
found_records = True
else:
print 'No records matching the criteria were found in %s.' % infile
stats.append([heights, legends])
if not found_records:
quit()
if consolidate:
fullstats = []
for infile in infiles:
fullstats.append(determine_stats(alpha_sort, detail, dupheader, field, frequency, infile, None))
all_germlines_required = []
for stat in stats:
(_, legends) = stat
for legend in legends:
if legend not in all_germlines_required:
all_germlines_required.append(legend)
if len(all_germlines_required) < 1:
quit() # nothing to plot
all_germlines_required.sort()
fullheightlist = []
for (fullstat, title) in zip(fullstats, itertools.cycle(titles)):
stat_lookup = {}
(heights, legends) = fullstat
for (height, legend) in zip(heights, legends):
stat_lookup[legend] = height
all_heights = []
for germline in all_germlines_required:
all_heights.append(stat_lookup[germline] if germline in stat_lookup else 0)
fullheightlist.append(all_heights)
if not outfile or len(outfile) < 5 or outfile[-4:] != '.csv':
plt.figure(figsize=(float(sizex),float(sizey)))
if not consolidate:
plot_number = 1
for (stat, title, colour) in zip(stats, itertools.cycle(titles), itertools.cycle(mapcolour)):
(heights, legends) = stat
if len(heights) > 1:
plot_file(heights, legends, frequency, ymax, nrows, ncols, plot_number, title, colour, bar_width, args.gradientfill, args.grid_horizontal, grid_vertical)
plot_number += 1
else:
plot_multi(fullheightlist, all_germlines_required, frequency, ymax, titles, mapcolour, bar_width, args.gradientfill, args.grid_horizontal, grid_vertical)
plt.tight_layout()
if outfile:
plt.savefig(outfile)
else:
plt.show()
else:
with open(outfile, 'wb') as fo:
writer = csv.writer(fo)
if not consolidate:
for (stat, title) in zip(stats, itertools.cycle(titles)):
(heights, legends) = stat
writer.writerow([''])
writer.writerow([title])
writer.writerow(['Germline'] + legends)
writer.writerow(['Occurrences'] + heights)
else:
writer.writerow(['Germline'] + all_germlines_required)
for (heights, title) in zip(fullheightlist, itertools.cycle(titles)):
writer.writerow([title] + heights)
def plot_file(heights, legends, frequency, ymax, nrows, ncols, plot_number, title, mapcolour, bar_width, gradientfill, grid_horizontal, grid_vertical):
x_pos = np.arange(len(heights))
ax = plt.subplot(nrows, ncols, plot_number)
plt.xticks(x_pos+0.5, legends, rotation=-70, ha='center')
ax.tick_params(direction='out', top=False, right=False)
plt.xlabel(title)
if ymax:
plt.ylim(0, ymax)
if frequency:
plt.ylabel('Frequency')
else:
plt.ylabel('Reads')
plt.xlim(0, len(heights))
bar_pos = x_pos
if bar_width < 1.:
bar_pos = bar_pos + (1-bar_width)/2.
if grid_horizontal:
plt.grid(which='major', axis='y', c='black', linestyle='-', alpha=0.6, zorder=1)
if grid_vertical:
pos = grid_vertical
while pos < len(x_pos):
plt.plot([x_pos[pos], x_pos[pos]], [0, ymax], c='black', linestyle='-', alpha=0.6, zorder=1)
pos += grid_vertical
if gradientfill:
gbar(bar_pos, heights, mapcolour, width=bar_width)
else:
plt.bar(bar_pos, heights, width=bar_width, color=mapcolour, zorder=10)
# Remove every other y label because we get far too many by default
locs, labels = plt.yticks()
newlocs = []
newlabels = []
for i in range(0, len(labels)):
if i % 2 != 0:
newlocs.append(locs[i])
if frequency:
newlabels.append(str(float(locs[i])))
else:
newlabels.append(str(int(locs[i])))
plt.yticks(newlocs, newlabels)
ax.set_aspect('auto')
plt.tight_layout()
def plot_multi(heightlist, legends, frequency, ymax, titles, mapcolour, bar_width, gradientfill, grid_horizontal, grid_vertical):
x_pos = np.arange(len(legends))
ax = plt.subplot(1, 1, 1)
plt.xticks(x_pos+0.5, legends, rotation=-70, ha='center')
ax.tick_params(direction='out', top=False, right=False)
if ymax:
plt.ylim(0, ymax)
if frequency:
plt.ylabel('Frequency')
else:
plt.ylabel('Reads')
plt.xlim(0, len(legends))
bar_pos = x_pos
if bar_width < 1.:
bar_pos = bar_pos + (1-bar_width)/2.
if grid_horizontal:
plt.grid(which='major', axis='y', c='black', linestyle='-', alpha=0.6, zorder=1)
if grid_vertical:
pos = grid_vertical
while pos < len(x_pos):
plt.plot([x_pos[pos], x_pos[pos]], [0, ymax], c='black', linestyle='-', alpha=0.6, zorder=1)
pos += grid_vertical
bar_width = bar_width/len(heightlist)
i = 0
for heights,colour in zip(heightlist, itertools.cycle(mapcolour)):
if gradientfill:
gbar(bar_pos + i*bar_width, heightlist[i], colour, width=bar_width)
else:
plt.bar(bar_pos + i*bar_width, heightlist[i], width=bar_width, color=colour, zorder=10)
i += 1
# Remove every other y label because we get far too many by default
locs, labels = plt.yticks()
newlocs = []
newlabels = []
for i in range(0, len(labels)):
if i % 2 != 0:
newlocs.append(locs[i])
if frequency:
newlabels.append(str(float(locs[i])))
else:
newlabels.append(str(int(locs[i])))
plt.yticks(newlocs, newlabels)
ax.set_aspect('auto')
plt.tight_layout()
def gbar(x, y, mapcolour, width=1, bottom=0):
X = [[.6, .6], [.7, .7]]
c = mcolors.ColorConverter().to_rgb
cm = make_colormap([c('white'), c(mapcolour)])
for left, top in zip(x, y):
right = left + width
plt.imshow(X, interpolation='bicubic', cmap=cm, extent=(left, right, bottom, top), alpha=1, zorder=10)
plt.plot([left, left], [bottom, top], color='black', linestyle='-', zorder=20)
plt.plot([right, right], [bottom, top], color='black', linestyle='-', zorder=20)
plt.plot([right, left], [top, top], color='black', linestyle='-', zorder=20)
# From http://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def determine_stats(alpha_sort, detail, dupheader, field, frequency, infile, limit):
germ_usage = {}
with open(infile, 'r') as fi:
ln = fi.readline()
sep = ("\t" if "\t" in ln else ",")
fi.seek(0)
reader = csv.DictReader(fi, delimiter=sep)
for row in reader:
if 'unproductive' not in row['Functionality'] and field in row and row[field] != '':
germs = to_germ(row[field], detail)
for germ in germs:
germ_usage[germ] = germ_usage.get(germ, 0) + (
get_size(row['Sequence ID'], dupheader) if dupheader else 1)
vals = {}
total_reads = 0
for k, v in germ_usage.items():
total_reads += v
if v in vals:
vals[v].append(k)
else:
vals[v] = [k]
heights = []
legends = []
indeces = sorted(vals.keys(), reverse=True)
if limit:
indeces = indeces[:limit]
for i in indeces:
for val in vals[i]:
heights.append(i)
legends.append(val)
if alpha_sort:
# Juggle the order, now that we know which values we'll be plotting
germ_usage = {}
for z in zip(legends, heights):
germ_usage[z[0]] = z[1]
heights = []
legends = []
for k in sorted(germ_usage.keys()):
heights.append(germ_usage[k])
legends.append(k)
if frequency:
for i in range(len(heights)):
heights[i] = float(heights[i]) / total_reads
return heights, legends
# Convert germline field to a list with the requested amount of detail
def to_germ(germlines, detail):
result = []
for germline in germlines.split(','):
if detail == 'A':
g = germline
elif detail == 'G':
g = germline.split('*')[0]
else:
g = germline.split('-')[0]
if g not in result:
result.append(g)
return result
# Find duplicate size, or return 1
def get_size(s, dupheader):
count = None
if dupheader in s:
spl = s.split(dupheader)
for i in range(1, len(spl[1])):
if spl[1][0:i].isdigit():
count = int(spl[1][0:i])
else:
break
return count if count else 1
if __name__=="__main__":
main(sys.argv)
| mit |
dhermes/bezier | docs/make_images.py | 1 | 59464 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to make images that are intended for docs.
To actually execute these functions with the desired inputs, run:
.. code-block:: console
$ nox -s docs_images
"""
import os
try:
from matplotlib import patches
from matplotlib import path as _path_mod
import matplotlib.pyplot as plt
except ImportError:
patches = None
_path_mod = None
plt = None
import numpy as np
try:
import seaborn
except ImportError:
seaborn = None
import bezier
from bezier import _geometric_intersection
from bezier import _helpers
from bezier import _plot_helpers
from bezier.hazmat import clipping
from bezier.hazmat import geometric_intersection as _py_geometric_intersection
BLUE = "blue"
GREEN = "green"
RED = "red"
if seaborn is not None:
seaborn.set() # Required in ``seaborn >= 0.8``
# As of ``0.9.0``, this palette has
# (BLUE, ORANGE, GREEN, RED, PURPLE, BROWN).
_COLORS = seaborn.color_palette(palette="deep", n_colors=6)
BLUE = _COLORS[0]
GREEN = _COLORS[2]
RED = _COLORS[3]
del _COLORS
_DOCS_DIR = os.path.abspath(os.path.dirname(__file__))
IMAGES_DIR = os.path.join(_DOCS_DIR, "images")
NO_IMAGES = "GENERATE_IMAGES" not in os.environ
def save_image(figure, filename):
"""Save an image to the docs images directory.
Args:
filename (str): The name of the file (not containing
directory info).
"""
path = os.path.join(IMAGES_DIR, filename)
figure.savefig(path, bbox_inches="tight")
plt.close(figure)
def stack1d(*points):
"""Fill out the columns of matrix with a series of points.
This is because ``np.hstack()`` will just make another 1D vector
out of them and ``np.vstack()`` will put them in the rows.
Args:
points (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e.
arrays with shape ``(2,)``.
Returns:
numpy.ndarray: The array with each point in ``points`` as its
columns.
"""
result = np.empty((2, len(points)), order="F")
for index, point in enumerate(points):
result[:, index] = point
return result
def linearization_error(nodes):
"""Image for :func:`.linearization_error` docstring."""
if NO_IMAGES:
return
curve = bezier.Curve.from_nodes(nodes)
line = bezier.Curve.from_nodes(nodes[:, (0, -1)])
midpoints = np.hstack([curve.evaluate(0.5), line.evaluate(0.5)])
ax = curve.plot(256, color=BLUE)
line.plot(256, ax=ax, color=GREEN)
ax.plot(
midpoints[0, :], midpoints[1, :], color="black", linestyle="dashed"
)
ax.axis("scaled")
save_image(ax.figure, "linearization_error.png")
def newton_refine1(s, new_s, curve1, t, new_t, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return
points = np.hstack([curve1.evaluate(s), curve2.evaluate(t)])
points_new = np.hstack([curve1.evaluate(new_s), curve2.evaluate(new_t)])
ax = curve1.plot(256, color=BLUE)
curve2.plot(256, ax=ax, color=GREEN)
ax.plot(
points[0, :],
points[1, :],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
ax.plot(
points_new[0, :],
points_new[1, :],
color="black",
linestyle="None",
marker="o",
)
ax.axis("scaled")
save_image(ax.figure, "newton_refine1.png")
def newton_refine2(s_vals, curve1, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve1.plot(256, color=BLUE)
ax.lines[-1].zorder = 1
curve2.plot(256, ax=ax, color=GREEN)
ax.lines[-1].zorder = 1
points = curve1.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette("blue", 5)
ax.scatter(
points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2
)
ax.axis("scaled")
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
save_image(ax.figure, "newton_refine2.png")
def newton_refine3(s_vals, curve1, curve2):
"""Image for :func:`.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve1.plot(256, color=BLUE)
ax.lines[-1].zorder = 1
curve2.plot(256, ax=ax, color=GREEN)
ax.lines[-1].zorder = 1
points = curve1.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette("blue", 6)
ax.scatter(
points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2
)
ax.axis("scaled")
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 0.5625)
save_image(ax.figure, "newton_refine3.png")
def segment_intersection1(start0, end0, start1, end1, s):
"""Image for :func:`.segment_intersection` docstring."""
if NO_IMAGES:
return
line0 = bezier.Curve.from_nodes(stack1d(start0, end0))
line1 = bezier.Curve.from_nodes(stack1d(start1, end1))
ax = line0.plot(2, color=BLUE)
line1.plot(256, ax=ax, color=GREEN)
(x_val,), (y_val,) = line0.evaluate(s)
ax.plot([x_val], [y_val], color="black", marker="o")
ax.axis("scaled")
save_image(ax.figure, "segment_intersection1.png")
def segment_intersection2(start0, end0, start1, end1):
"""Image for :func:`.segment_intersection` docstring."""
if NO_IMAGES:
return
line0 = bezier.Curve.from_nodes(stack1d(start0, end0))
line1 = bezier.Curve.from_nodes(stack1d(start1, end1))
ax = line0.plot(2, color=BLUE)
line1.plot(2, ax=ax, color=GREEN)
ax.axis("scaled")
save_image(ax.figure, "segment_intersection2.png")
def helper_parallel_lines(start0, end0, start1, end1, filename):
"""Image for :func:`.parallel_lines_parameters` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
points = stack1d(start0, end0, start1, end1)
ax.plot(points[0, :2], points[1, :2], marker="o", color=BLUE)
ax.plot(points[0, 2:], points[1, 2:], marker="o", color=GREEN)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(figure, filename)
def add_patch(
ax, nodes, color, with_nodes=True, alpha=0.625, node_color="black"
):
# ``nodes`` is stored Fortran-contiguous with ``x-y`` points in each
# column but ``Path()`` wants ``x-y`` points in each row.
path = _path_mod.Path(nodes.T)
patch = patches.PathPatch(
path, edgecolor=color, facecolor=color, alpha=alpha
)
ax.add_patch(patch)
if with_nodes:
ax.plot(
nodes[0, :],
nodes[1, :],
color=node_color,
linestyle="None",
marker="o",
)
def curve_constructor(curve):
"""Image for :class`.Curve` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
line = ax.lines[0]
nodes = curve._nodes
ax.plot(
nodes[0, :], nodes[1, :], color="black", linestyle="None", marker="o"
)
add_patch(ax, nodes, line.get_color())
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "curve_constructor.png")
def curve_evaluate(curve):
"""Image for :meth`.Curve.evaluate` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
points = curve.evaluate_multi(np.asfortranarray([0.75]))
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "curve_evaluate.png")
def curve_evaluate_hodograph(curve, s):
"""Image for :meth`.Curve.evaluate_hodograph` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
points = curve.evaluate_multi(np.asfortranarray([s]))
if points.shape != (2, 1):
raise ValueError("Unexpected shape", points)
point = points[:, 0]
tangents = curve.evaluate_hodograph(s)
if tangents.shape != (2, 1):
raise ValueError("Unexpected shape", tangents)
tangent = tangents[:, 0]
ax.plot(
[point[0] - 2 * tangent[0], point[0] + 2 * tangent[0]],
[point[1] - 2 * tangent[1], point[1] + 2 * tangent[1]],
color=BLUE,
alpha=0.5,
)
ax.plot(
[point[0], point[0] + tangent[0]],
[point[1], point[1] + tangent[1]],
color="black",
linestyle="dashed",
marker="o",
markersize=5,
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.75)
ax.set_ylim(-0.0625, 0.75)
save_image(ax.figure, "curve_evaluate_hodograph.png")
def curve_subdivide(curve, left, right):
"""Image for :meth`.Curve.subdivide` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
add_patch(ax, curve._nodes, "gray")
ax = left.plot(256, ax=ax, color=BLUE)
line = ax.lines[-1]
add_patch(ax, left._nodes, line.get_color())
right.plot(256, ax=ax, color=GREEN)
line = ax.lines[-1]
add_patch(ax, right._nodes, line.get_color())
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 3.125)
save_image(ax.figure, "curve_subdivide.png")
def curve_intersect(curve1, curve2, s_vals):
"""Image for :meth`.Curve.intersect` docstring."""
if NO_IMAGES:
return
ax = curve1.plot(256, color=BLUE)
curve2.plot(256, ax=ax, color=GREEN)
intersections = curve1.evaluate_multi(s_vals)
ax.plot(
intersections[0, :],
intersections[1, :],
color="black",
linestyle="None",
marker="o",
)
ax.axis("scaled")
ax.set_xlim(0.0, 0.75)
ax.set_ylim(0.0, 0.75)
save_image(ax.figure, "curve_intersect.png")
def triangle_constructor(triangle):
"""Image for :class`.Triangle` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE, with_nodes=True)
line = ax.lines[0]
nodes = triangle._nodes
add_patch(ax, nodes[:, (0, 1, 2, 5)], line.get_color())
delta = 1.0 / 32.0
ax.text(
nodes[0, 0],
nodes[1, 0],
r"$v_0$",
fontsize=20,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
nodes[0, 1],
nodes[1, 1],
r"$v_1$",
fontsize=20,
verticalalignment="top",
horizontalalignment="center",
)
ax.text(
nodes[0, 2],
nodes[1, 2],
r"$v_2$",
fontsize=20,
verticalalignment="top",
horizontalalignment="left",
)
ax.text(
nodes[0, 3] - delta,
nodes[1, 3],
r"$v_3$",
fontsize=20,
verticalalignment="center",
horizontalalignment="right",
)
ax.text(
nodes[0, 4] + delta,
nodes[1, 4],
r"$v_4$",
fontsize=20,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
nodes[0, 5],
nodes[1, 5] + delta,
r"$v_5$",
fontsize=20,
verticalalignment="bottom",
horizontalalignment="center",
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "triangle_constructor.png")
def triangle_evaluate_barycentric(triangle, point):
"""Image for :meth`.Triangle.evaluate_barycentric` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.plot(
point[0, :], point[1, :], color="black", linestyle="None", marker="o"
)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "triangle_evaluate_barycentric.png")
def triangle_evaluate_cartesian_multi(triangle, points):
"""Image for :meth`.Triangle.evaluate_cartesian_multi` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
delta = 1.0 / 32.0
font_size = 18
ax.text(
points[0, 0],
points[1, 0],
r"$w_0$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
points[0, 1] + 2 * delta,
points[1, 1],
r"$w_1$",
fontsize=font_size,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
points[0, 2],
points[1, 2] + delta,
r"$w_2$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="left",
)
ax.axis("scaled")
ax.set_xlim(-3.125, 2.375)
ax.set_ylim(-0.25, 2.125)
save_image(ax.figure, "triangle_evaluate_cartesian_multi.png")
def triangle_evaluate_barycentric_multi(triangle, points):
"""Image for :meth`.Triangle.evaluate_barycentric_multi` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
delta = 1.0 / 32.0
font_size = 18
ax.text(
points[0, 0],
points[1, 0] + delta,
r"$w_0$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="center",
)
ax.text(
points[0, 1],
points[1, 1] - delta,
r"$w_1$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
points[0, 2],
points[1, 2],
r"$w_2$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="left",
)
ax.text(
points[0, 3],
points[1, 3],
r"$w_3$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.axis("scaled")
ax.set_xlim(-3.125, 2.125)
ax.set_ylim(-0.3125, 2.125)
save_image(ax.figure, "triangle_evaluate_barycentric_multi.png")
def triangle_is_valid1(triangle):
"""Image for :meth`.Triangle.is_valid` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 2.125)
save_image(ax.figure, "triangle_is_valid1.png")
def triangle_is_valid2(triangle):
"""Image for :meth`.Triangle.is_valid` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.axis("scaled")
ax.set_xlim(-0.125, 1.0625)
ax.set_ylim(-0.0625, 1.0625)
save_image(ax.figure, "triangle_is_valid2.png")
def triangle_is_valid3(triangle):
"""Image for :meth`.Triangle.is_valid` docstring."""
if NO_IMAGES:
return
edge1, edge2, edge3 = triangle.edges
N = 128
# Compute points on each edge.
std_s = np.linspace(0.0, 1.0, N + 1)
points1 = edge1.evaluate_multi(std_s)
points2 = edge2.evaluate_multi(std_s)
points3 = edge3.evaluate_multi(std_s)
# Compute the actual boundary where the Jacobian is 0.
s_vals = np.linspace(0.0, 0.2, N)
t_discrim = np.sqrt((1.0 - s_vals) * (1.0 - 5.0 * s_vals))
t_top = 0.5 * (1.0 - s_vals + t_discrim)
t_bottom = 0.5 * (1.0 - s_vals - t_discrim)
jacobian_zero_params = np.zeros((2 * N - 1, 2), order="F")
jacobian_zero_params[:N, 0] = s_vals
jacobian_zero_params[:N, 1] = t_top
jacobian_zero_params[N:, 0] = s_vals[-2::-1]
jacobian_zero_params[N:, 1] = t_bottom[-2::-1]
jac_edge = triangle.evaluate_cartesian_multi(jacobian_zero_params)
# Add the triangle to the plot and add a dashed line
# for each "true" edge.
figure = plt.figure()
ax = figure.gca()
(line,) = ax.plot(jac_edge[0, :], jac_edge[1, :], color=BLUE)
color = line.get_color()
ax.plot(points1[0, :], points1[1, :], color="black", linestyle="dashed")
ax.plot(points2[0, :], points2[1, :], color="black", linestyle="dashed")
ax.plot(points3[0, :], points3[1, :], color="black", linestyle="dashed")
polygon = np.hstack([points1[:, 1:], points2[:, 1:], jac_edge[:, 1:]])
add_patch(ax, polygon, color, with_nodes=False)
ax.axis("scaled")
ax.set_xlim(-0.0625, 1.0625)
ax.set_ylim(-0.0625, 1.0625)
save_image(ax.figure, "triangle_is_valid3.png")
def triangle_subdivide1():
"""Image for :meth`.Triangle.subdivide` docstring."""
if NO_IMAGES:
return
triangle = bezier.Triangle.from_nodes(
np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
)
triangle_a, triangle_b, triangle_c, triangle_d = triangle.subdivide()
figure, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
for ax in (ax1, ax2, ax3, ax4):
triangle.plot(2, ax=ax, color=BLUE)
triangle_a.plot(2, ax=ax1, color=GREEN)
ax1.text(
1.0 / 6.0,
1.0 / 6.0,
r"$A$",
fontsize=20,
verticalalignment="center",
horizontalalignment="center",
)
triangle_b.plot(2, ax=ax2, color=GREEN)
ax2.text(
1.0 / 3.0,
1.0 / 3.0,
r"$B$",
fontsize=20,
verticalalignment="center",
horizontalalignment="center",
)
triangle_c.plot(2, ax=ax3, color=GREEN)
ax3.text(
2.0 / 3.0,
1.0 / 6.0,
r"$C$",
fontsize=20,
verticalalignment="center",
horizontalalignment="center",
)
triangle_d.plot(2, ax=ax4, color=GREEN)
ax4.text(
1.0 / 6.0,
2.0 / 3.0,
r"$D$",
fontsize=20,
verticalalignment="center",
horizontalalignment="center",
)
for ax in (ax1, ax2, ax3, ax4):
ax.axis("scaled")
save_image(figure, "triangle_subdivide1")
def add_edges(ax, triangle, s_vals, color):
edge1, edge2, edge3 = triangle.edges
# Compute points on each edge.
points1 = edge1.evaluate_multi(s_vals)
points2 = edge2.evaluate_multi(s_vals)
points3 = edge3.evaluate_multi(s_vals)
# Add the points to the plot.
ax.plot(points1[0, :], points1[1, :], color=color)
ax.plot(points2[0, :], points2[1, :], color=color)
ax.plot(points3[0, :], points3[1, :], color=color)
def triangle_subdivide2(triangle, sub_triangle_b):
"""Image for :meth`.Triangle.subdivide` docstring."""
if NO_IMAGES:
return
# Plot set-up.
figure = plt.figure()
ax = figure.gca()
colors = seaborn.husl_palette(6)
N = 128
s_vals = np.linspace(0.0, 1.0, N + 1)
# Add edges from triangle.
add_edges(ax, triangle, s_vals, colors[4])
# Now do the same for triangle B.
add_edges(ax, sub_triangle_b, s_vals, colors[0])
# Add the control points polygon for the original triangle.
nodes = triangle._nodes[:, (0, 2, 4, 5, 0)]
add_patch(ax, nodes, colors[2], with_nodes=False)
# Add the control points polygon for the sub-triangle.
nodes = sub_triangle_b._nodes[:, (0, 1, 2, 5, 3, 0)]
add_patch(ax, nodes, colors[1], with_nodes=False)
# Plot **all** the nodes.
sub_nodes = sub_triangle_b._nodes
ax.plot(
sub_nodes[0, :],
sub_nodes[1, :],
color="black",
linestyle="None",
marker="o",
)
# Take those same points and add the boundary.
ax.plot(nodes[0, :], nodes[1, :], color="black", linestyle="dashed")
ax.axis("scaled")
ax.set_xlim(-1.125, 2.125)
ax.set_ylim(-0.125, 4.125)
save_image(ax.figure, "triangle_subdivide2")
def curved_polygon_constructor1(curved_poly):
"""Image for :class`.CurvedPolygon` docstring."""
if NO_IMAGES:
return
ax = curved_poly.plot(256, color=BLUE)
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.625, 1.625)
save_image(ax.figure, "curved_polygon_constructor1.png")
def curved_polygon_constructor2(curved_poly):
"""Image for :class`.CurvedPolygon` docstring."""
if NO_IMAGES:
return
ax = curved_poly.plot(256, color=BLUE)
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "curved_polygon_constructor2.png")
def triangle_locate(triangle, point):
"""Image for :meth`.Triangle.locate` docstring."""
if NO_IMAGES:
return
ax = triangle.plot(256, color=BLUE)
ax.plot(
point[0, :], point[1, :], color="black", linestyle="None", marker="o"
)
ax.axis("scaled")
ax.set_xlim(-0.0625, 1.0625)
ax.set_ylim(-0.1875, 1.0625)
save_image(ax.figure, "triangle_locate.png")
def curve_specialize(curve, new_curve):
"""Image for :meth`.Curve.specialize` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
interval = r"$\left[0, 1\right]$"
line = ax.lines[-1]
line.set_label(interval)
color1 = line.get_color()
new_curve.plot(256, ax=ax, color=GREEN)
interval = r"$\left[-\frac{1}{4}, \frac{3}{4}\right]$"
line = ax.lines[-1]
line.set_label(interval)
ax.plot(
curve._nodes[0, (0, -1)],
curve._nodes[1, (0, -1)],
color=color1,
linestyle="None",
marker="o",
)
ax.plot(
new_curve._nodes[0, (0, -1)],
new_curve._nodes[1, (0, -1)],
color=line.get_color(),
linestyle="None",
marker="o",
)
ax.legend(loc="lower right", fontsize=12)
ax.axis("scaled")
ax.set_xlim(-0.375, 1.125)
ax.set_ylim(-0.75, 0.625)
save_image(ax.figure, "curve_specialize.png")
def newton_refine_triangle(triangle, x_val, y_val, s, t, new_s, new_t):
"""Image for :func:`.hazmat.triangle_helpers.newton_refine` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2)
# Plot features of the parameter space in ax1.
linear_triangle = bezier.Triangle.from_nodes(
np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
)
linear_triangle.plot(2, ax=ax1, color=BLUE)
ax1.plot([0.25], [0.5], marker="H", color=GREEN)
ax1.plot([s], [t], color="black", linestyle="None", marker="o")
ax1.plot(
[new_s],
[new_t],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
# Plot the equivalent output in ax2.
triangle.plot(256, ax=ax2, color=BLUE)
points = triangle.evaluate_cartesian_multi(
np.asfortranarray([[s, t], [new_s, new_t]])
)
ax2.plot([x_val], [y_val], marker="H", color=GREEN)
ax2.plot(
points[0, [0]],
points[1, [0]],
color="black",
linestyle="None",
marker="o",
)
ax2.plot(
points[0, [1]],
points[1, [1]],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
# Set the axis bounds / scaling.
ax1.axis("scaled")
ax1.set_xlim(-0.0625, 1.0625)
ax1.set_ylim(-0.0625, 1.0625)
ax2.axis("scaled")
ax2.set_xlim(-0.125, 2.125)
ax2.set_ylim(-0.125, 2.125)
save_image(figure, "newton_refine_triangle.png")
def classify_help(s, curve1, triangle1, curve2, triangle2, interior, ax=None):
assert triangle1.is_valid
edge1, _, _ = triangle1.edges
assert np.all(edge1._nodes == curve1._nodes)
assert triangle2.is_valid
edge2, _, _ = triangle2.edges
assert np.all(edge2._nodes == curve2._nodes)
ax = triangle1.plot(256, ax=ax, color=BLUE)
# Manually reduce the alpha on the triangle patch(es).
ax.patches[-1].set_alpha(0.1875)
color1 = ax.lines[-1].get_color()
triangle2.plot(256, ax=ax, color=GREEN)
ax.patches[-1].set_alpha(0.1875)
color2 = ax.lines[-1].get_color()
# Remove the existing boundary (lines) and just add our edges.
while ax.lines:
ax.lines[-1].remove()
edge1.plot(256, ax=ax, color=color1)
edge2.plot(256, ax=ax, color=color2)
(int_x,), (int_y,) = curve1.evaluate(s)
if interior == 0:
color = color1
elif interior == 1:
color = color2
else:
color = RED
ax.plot([int_x], [int_y], color=color, linestyle="None", marker="o")
ax.axis("scaled")
return ax
def classify_intersection1(s, curve1, tangent1, curve2, tangent2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[[1.0, 1.75, 2.0, 1.0, 1.5, 1.0], [0.0, 0.25, 1.0, 1.0, 1.5, 2.0]]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[0.0, 1.6875, 2.0, 0.25, 1.25, 0.5],
[0.0, 0.0625, 0.5, 1.0, 1.25, 2.0],
]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, 0)
(int_x,), (int_y,) = curve1.evaluate(s)
# Remove the alpha from the color
color1 = ax.patches[0].get_facecolor()[:3]
color2 = ax.patches[1].get_facecolor()[:3]
ax.plot(
[int_x, int_x + tangent1[0, 0]],
[int_y, int_y + tangent1[1, 0]],
color=color1,
linestyle="dashed",
)
ax.plot(
[int_x, int_x + tangent2[0, 0]],
[int_y, int_y + tangent2[1, 0]],
color=color2,
linestyle="dashed",
)
ax.plot([int_x], [int_y], color=color1, linestyle="None", marker="o")
ax.axis("scaled")
ax.set_xlim(-0.125, 2.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "classify_intersection1.png")
def classify_intersection2(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[[1.0, 1.5, 2.0, 1.25, 1.75, 1.5], [0.0, 1.0, 0.0, 1.0, 1.0, 2.0]]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[[0.0, 1.5, 3.0, 0.75, 2.25, 1.5], [0.0, 1.0, 0.0, 2.0, 2.0, 4.0]]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, 1)
ax.set_xlim(-0.0625, 3.0625)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "classify_intersection2.png")
def classify_intersection3(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[2.0, 1.5, 1.0, 1.75, 1.25, 1.5],
[0.0, 1.0, 0.0, -1.0, -1.0, -2.0],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[3.0, 1.5, 0.0, 2.25, 0.75, 1.5],
[0.0, 1.0, 0.0, -2.0, -2.0, -4.0],
]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, 0)
ax.set_xlim(-0.0625, 3.0625)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "classify_intersection3.png")
def classify_intersection4(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[2.0, 1.5, 1.0, 1.75, 1.25, 1.5],
[0.0, 1.0, 0.0, -1.0, -1.0, -2.0],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[[0.0, 1.5, 3.0, 0.75, 2.25, 1.5], [0.0, 1.0, 0.0, 2.0, 2.0, 4.0]]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, None)
ax.set_xlim(-0.0625, 3.0625)
ax.set_ylim(-0.0625, 0.5625)
save_image(ax.figure, "classify_intersection4.png")
def classify_intersection5(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[1.0, 1.5, 2.0, 1.25, 1.75, 1.5],
[0.0, 1.0, 0.0, 0.9375, 0.9375, 1.875],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[3.0, 1.5, 0.0, 2.25, 0.75, 1.5],
[0.0, 1.0, 0.0, -2.0, -2.0, -4.0],
]
)
)
figure, (ax1, ax2) = plt.subplots(2, 1)
classify_help(s, curve1, triangle1, curve2, triangle2, 0, ax=ax1)
classify_help(s, curve1, triangle1, curve2, triangle2, 1, ax=ax2)
# Remove the alpha from the color
color1 = ax1.patches[0].get_facecolor()[:3]
color2 = ax1.patches[1].get_facecolor()[:3]
# Now add the "degenerate" intersection polygons. The first
# comes from specializing to
# left1(0.5, 1.0)-left2(0.0, 0.25)-right1(0.375, 0.5)
triangle3 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[1.5, 1.75, 2.0, 1.6875, 1.9375, 1.875],
[0.5, 0.5, 0.0, 0.5, 0.234375, 0.46875],
]
)
)
# NOTE: We don't require the intersection polygon be valid.
triangle3.plot(256, ax=ax1, color=RED)
# The second comes from specializing to
# left1(0.0, 0.5)-right1(0.5, 0.625)-left3(0.75, 1.0)
triangle4 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[1.0, 1.25, 1.5, 1.0625, 1.3125, 1.125],
[0.0, 0.5, 0.5, 0.234375, 0.5, 0.46875],
]
)
)
# NOTE: We don't require the intersection polygon be valid.
triangle4.plot(256, ax=ax2, color=RED)
(int_x,), (int_y,) = curve1.evaluate(s)
ax1.plot([int_x], [int_y], color=color1, linestyle="None", marker="o")
ax2.plot([int_x], [int_y], color=color2, linestyle="None", marker="o")
for ax in (ax1, ax2):
ax.axis("scaled")
ax.set_xlim(-0.0625, 3.0625)
ax.set_ylim(-0.0625, 0.5625)
plt.setp(ax1.get_xticklabels(), visible=False)
figure.tight_layout(h_pad=-7.0)
save_image(figure, "classify_intersection5.png")
def classify_intersection6(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[-0.125, -0.125, 0.375, -0.0625, 0.1875, 0.0],
[0.0625, -0.0625, 0.0625, 0.15625, 0.15625, 0.25],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[-0.25, -0.25, 0.75, 0.125, 0.625, 0.5],
[0.25, -0.25, 0.25, 0.625, 0.625, 1.0],
]
)
)
ax = classify_help(s, curve1, triangle1, curve2, triangle2, None)
ax.set_xlim(-0.3125, 1.0625)
ax.set_ylim(-0.0625, 0.3125)
save_image(ax.figure, "classify_intersection6.png")
def classify_intersection7(s, curve1a, curve1b, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[0.0, 4.5, 9.0, 0.0, 4.5, 0.0],
[0.0, 0.0, 2.25, 1.25, 2.375, 2.5],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[11.25, 9.0, 2.75, 8.125, 3.875, 5.0],
[0.0, 4.5, 1.0, -0.75, -0.25, -1.5],
]
)
)
figure, (ax1, ax2) = plt.subplots(2, 1)
classify_help(s, curve1a, triangle1, curve2, triangle2, None, ax=ax1)
triangle1._nodes = np.asfortranarray(
triangle1._nodes[:, (2, 4, 5, 1, 3, 0)]
)
triangle1._edges = None
classify_help(0.0, curve1b, triangle1, curve2, triangle2, 0, ax=ax2)
for ax in (ax1, ax2):
ax.set_xlim(-0.125, 11.5)
ax.set_ylim(-0.125, 2.625)
plt.setp(ax1.get_xticklabels(), visible=False)
figure.tight_layout(h_pad=-5.0)
save_image(figure, "classify_intersection7.png")
def get_curvature(nodes, s, tangent_vec, curvature):
"""Image for :func:`get_curvature` docstring."""
if NO_IMAGES:
return
curve = bezier.Curve.from_nodes(nodes)
# Find the center of the circle along the direction
# perpendicular to the tangent vector (90 degree left turn).
radius_dir = np.asfortranarray([[-tangent_vec[1, 0]], [tangent_vec[0, 0]]])
radius_dir /= np.linalg.norm(radius_dir, ord=2)
point = curve.evaluate(s)
circle_center = point + radius_dir / curvature
# Add the curve.
ax = curve.plot(256, color=BLUE)
# Add the circle.
circle_center = circle_center.ravel(order="F")
circle = plt.Circle(circle_center, 1.0 / abs(curvature), alpha=0.25)
ax.add_artist(circle)
# Add the point.
ax.plot(
point[0, :], point[1, :], color="black", marker="o", linestyle="None"
)
ax.axis("scaled")
ax.set_xlim(-0.0625, 1.0625)
ax.set_ylim(-0.0625, 0.625)
save_image(ax.figure, "get_curvature.png")
def curve_locate(curve, point1, point2, point3):
"""Image for :meth`.Curve.locate` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
points = np.hstack([point1, point2, point3])
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
ax.axis("scaled")
ax.set_xlim(-0.8125, 0.0625)
ax.set_ylim(0.75, 2.0625)
save_image(ax.figure, "curve_locate.png")
def newton_refine_curve(curve, point, s, new_s):
"""Image for :func:`.hazmat.curve_helpers.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
ax.plot(point[0, :], point[1, :], marker="H", color=GREEN)
wrong_points = curve.evaluate_multi(np.asfortranarray([s, new_s]))
ax.plot(
wrong_points[0, [0]],
wrong_points[1, [0]],
color="black",
linestyle="None",
marker="o",
)
ax.plot(
wrong_points[0, [1]],
wrong_points[1, [1]],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
# Set the axis bounds / scaling.
ax.axis("scaled")
ax.set_xlim(-0.125, 3.125)
ax.set_ylim(-0.125, 1.375)
save_image(ax.figure, "newton_refine_curve.png")
def newton_refine_curve_cusp(curve, s_vals):
"""Image for :func:`.hazmat.curve_helpers.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
ax.lines[-1].zorder = 1
points = curve.evaluate_multi(np.asfortranarray(s_vals))
colors = seaborn.dark_palette("blue", 6)
ax.scatter(
points[0, :], points[1, :], c=colors, s=20, alpha=0.75, zorder=2
)
# Set the axis bounds / scaling.
ax.axis("scaled")
ax.set_xlim(-0.125, 6.125)
ax.set_ylim(-3.125, 3.125)
save_image(ax.figure, "newton_refine_curve_cusp.png")
def classify_intersection8(s, curve1, triangle1, curve2, triangle2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
ax = classify_help(s, curve1, triangle1, curve2, triangle2, None)
ax.set_xlim(-1.125, 1.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "classify_intersection8.png")
def _edges_classify_intersection9():
"""The edges for the curved polygon intersection used below.
Helper for :func:`classify_intersection9`.
"""
edges1 = (
bezier.Curve.from_nodes(
np.asfortranarray([[32.0, 30.0], [20.0, 25.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[30.0, 25.0, 20.0], [25.0, 20.0, 20.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[20.0, 25.0, 30.0], [20.0, 20.0, 15.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[30.0, 32.0], [15.0, 20.0]])
),
)
edges2 = (
bezier.Curve.from_nodes(
np.asfortranarray([[8.0, 10.0], [20.0, 15.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[10.0, 15.0, 20.0], [15.0, 20.0, 20.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[20.0, 15.0, 10.0], [20.0, 20.0, 25.0]])
),
bezier.Curve.from_nodes(
np.asfortranarray([[10.0, 8.0], [25.0, 20.0]])
),
)
return edges1, edges2
def classify_intersection9(s, curve1, curve2):
"""Image for :func:`.hazmat.triangle_helpers.classify_intersection` doc."""
if NO_IMAGES:
return
triangle1 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[0.0, 20.0, 40.0, 10.0, 30.0, 20.0],
[0.0, 40.0, 0.0, 25.0, 25.0, 50.0],
]
)
)
triangle2 = bezier.Triangle.from_nodes(
np.asfortranarray(
[
[40.0, 20.0, 0.0, 30.0, 10.0, 20.0],
[40.0, 0.0, 40.0, 15.0, 15.0, -10.0],
]
)
)
figure, (ax1, ax2) = plt.subplots(1, 2)
classify_help(s, curve1, triangle1, curve2, triangle2, 0, ax=ax1)
classify_help(s, curve1, triangle1, curve2, triangle2, 1, ax=ax2)
# Remove the alpha from the color
color1 = ax1.patches[0].get_facecolor()[:3]
color2 = ax1.patches[1].get_facecolor()[:3]
# Now add the "degenerate" intersection polygons.
cp_edges1, cp_edges2 = _edges_classify_intersection9()
curved_polygon1 = bezier.CurvedPolygon(*cp_edges1)
curved_polygon1.plot(256, ax=ax1, color=RED)
curved_polygon2 = bezier.CurvedPolygon(*cp_edges2)
curved_polygon2.plot(256, ax=ax2, color=RED)
(int_x,), (int_y,) = curve1.evaluate(s)
ax1.plot([int_x], [int_y], color=color1, linestyle="None", marker="o")
ax2.plot([int_x], [int_y], color=color2, linestyle="None", marker="o")
for ax in (ax1, ax2):
ax.axis("scaled")
ax.set_xlim(-2.0, 42.0)
ax.set_ylim(-12.0, 52.0)
plt.setp(ax2.get_yticklabels(), visible=False)
figure.tight_layout(w_pad=1.0)
save_image(figure, "classify_intersection9.png")
def curve_elevate(curve, elevated):
"""Image for :meth:`.curve.Curve.elevate` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2)
curve.plot(256, ax=ax1, color=BLUE)
color = ax1.lines[-1].get_color()
add_patch(ax1, curve._nodes, color)
elevated.plot(256, ax=ax2, color=BLUE)
color = ax2.lines[-1].get_color()
add_patch(ax2, elevated._nodes, color)
ax1.axis("scaled")
ax2.axis("scaled")
_plot_helpers.add_plot_boundary(ax1)
ax2.set_xlim(*ax1.get_xlim())
ax2.set_ylim(*ax1.get_ylim())
save_image(figure, "curve_elevate.png")
def triangle_elevate(triangle, elevated):
"""Image for :meth:`.triangle.Triangle.elevate` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2)
triangle.plot(256, ax=ax1, color=BLUE)
color = ax1.lines[-1].get_color()
nodes = triangle._nodes[:, (0, 1, 2, 4, 5)]
add_patch(ax1, nodes, color)
elevated.plot(256, ax=ax2, color=BLUE)
color = ax2.lines[-1].get_color()
nodes = elevated._nodes[:, (0, 1, 2, 3, 6, 8, 9)]
add_patch(ax2, nodes, color)
ax1.axis("scaled")
ax2.axis("scaled")
_plot_helpers.add_plot_boundary(ax1)
ax2.set_xlim(*ax1.get_xlim())
ax2.set_ylim(*ax1.get_ylim())
save_image(figure, "triangle_elevate.png")
def unit_triangle():
"""Image for :class:`.triangle.Triangle` docstring."""
if NO_IMAGES:
return
nodes = np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
triangle = bezier.Triangle(nodes, degree=1)
ax = triangle.plot(256, color=BLUE)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(ax.figure, "unit_triangle.png")
def curve_reduce(curve, reduced):
"""Image for :meth:`.curve.Curve.reduce` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)
curve.plot(256, ax=ax1, color=BLUE)
color = ax1.lines[-1].get_color()
add_patch(ax1, curve._nodes, color)
reduced.plot(256, ax=ax2, color=BLUE)
color = ax2.lines[-1].get_color()
add_patch(ax2, reduced._nodes, color)
ax1.axis("scaled")
ax2.axis("scaled")
_plot_helpers.add_plot_boundary(ax2)
save_image(figure, "curve_reduce.png")
def curve_reduce_approx(curve, reduced):
"""Image for :meth:`.curve.Curve.reduce` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256, color=BLUE)
color = ax.lines[-1].get_color()
add_patch(ax, curve._nodes, color, alpha=0.25, node_color=color)
reduced.plot(256, ax=ax, color=GREEN)
color = ax.lines[-1].get_color()
add_patch(ax, reduced._nodes, color, alpha=0.25, node_color=color)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(ax.figure, "curve_reduce_approx.png")
def simple_axis(ax):
ax.axis("scaled")
ax.set_xticklabels([])
ax.set_yticklabels([])
def plot_with_bbox(curve, ax, color, with_nodes=False):
curve.plot(256, color=color, ax=ax)
left, right, bottom, top = _helpers.bbox(curve._nodes)
bbox_nodes = np.asfortranarray(
[[left, right, right, left], [bottom, bottom, top, top]]
)
add_patch(ax, bbox_nodes, color, with_nodes=False)
if with_nodes:
ax.plot(
curve._nodes[0, :],
curve._nodes[1, :],
color=color,
linestyle="None",
marker="o",
markersize=4,
)
def plot_with_convex_hull(curve, ax, color, with_nodes=False):
curve.plot(256, color=color, ax=ax)
convex_hull = _helpers.simple_convex_hull(curve._nodes)
add_patch(ax, convex_hull, color, with_nodes=False)
if with_nodes:
ax.plot(
curve._nodes[0, :],
curve._nodes[1, :],
color=color,
linestyle="None",
marker="o",
markersize=4,
)
def _curve_boundary_predicate(filename, curve_boundary_plot, headers):
header1, header2, header3 = headers
figure, (ax1, ax2, ax3) = plt.subplots(1, 3)
control_pts1a = np.asfortranarray([[0.0, 0.375, 1.0], [0.0, 0.5, 0.125]])
curve1a = bezier.Curve(control_pts1a, degree=2)
control_pts1b = np.asfortranarray(
[[0.25, -0.125, 0.5], [-0.125, 0.375, 1.0]]
)
curve1b = bezier.Curve(control_pts1b, degree=2)
curve_boundary_plot(curve1a, ax1, BLUE)
curve_boundary_plot(curve1b, ax1, GREEN)
control_pts2a = np.asfortranarray([[0.0, 0.75, 1.0], [1.0, 0.75, 0.0]])
curve2a = bezier.Curve(control_pts2a, degree=2)
control_pts2b = np.asfortranarray(
[[0.625, 0.875, 1.625], [1.625, 0.875, 0.625]]
)
curve2b = bezier.Curve(control_pts2b, degree=2)
curve_boundary_plot(curve2a, ax2, BLUE)
curve_boundary_plot(curve2b, ax2, GREEN)
control_pts3a = np.asfortranarray([[0.0, 0.25, 1.0], [-0.25, 0.25, -0.75]])
curve3a = bezier.Curve(control_pts3a, degree=2)
control_pts3b = np.asfortranarray([[1.0, 1.5, 2.0], [-1.0, -1.5, -1.0]])
curve3b = bezier.Curve(control_pts3b, degree=2)
curve_boundary_plot(curve3a, ax3, BLUE)
curve_boundary_plot(curve3b, ax3, GREEN)
for ax in (ax1, ax2, ax3):
simple_axis(ax)
text_size = 10
ax1.set_xlim(-0.2, 1.1)
ax1.set_ylim(-0.2, 1.1)
ax1.set_title(header1, fontsize=text_size)
ax2.set_xlim(-0.1, 1.75)
ax2.set_ylim(-0.1, 1.75)
ax2.set_title(header2, fontsize=text_size)
ax3.set_xlim(-0.1, 2.1)
ax3.set_ylim(-1.7, 0.5)
ax3.set_title(header3, fontsize=text_size)
figure.set_size_inches(6.0, 2.2)
figure.subplots_adjust(
left=0.01, bottom=0.01, right=0.99, top=0.9, wspace=0.04, hspace=0.2
)
save_image(figure, filename)
def bounding_box_predicate():
headers = ("MAYBE", "MAYBE", "NO")
_curve_boundary_predicate(
"bounding_box_predicate.png", plot_with_bbox, headers
)
def convex_hull_predicate():
headers = ("MAYBE", "NO", "NO")
_curve_boundary_predicate(
"convex_hull_predicate.png", plot_with_convex_hull, headers
)
def subdivide_curve():
figure, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True, sharey=True)
nodes = np.asfortranarray([[0.0, 1.0, 2.0, 4.0], [0.0, 4.0, 0.0, 3.0]])
curve = bezier.Curve.from_nodes(nodes)
left, right = curve.subdivide()
curve.plot(256, ax=ax1, alpha=0.25, color="black")
left.plot(256, ax=ax1)
curve.plot(256, ax=ax2)
curve.plot(256, ax=ax3, alpha=0.25, color="black")
right.plot(256, ax=ax3)
text_size = 10
ax1.text(
2.5,
0.25,
r"$\left[0, \frac{1}{2}\right]$",
horizontalalignment="center",
verticalalignment="center",
fontsize=text_size,
)
ax2.text(
2.5,
0.25,
r"$\left[0, 1\right]$",
horizontalalignment="center",
verticalalignment="center",
fontsize=text_size,
)
ax3.text(
2.5,
0.25,
r"$\left[\frac{1}{2}, 1\right]$",
horizontalalignment="center",
verticalalignment="center",
fontsize=text_size,
)
for ax in (ax1, ax2, ax3):
simple_axis(ax)
figure.set_size_inches(6.0, 1.5)
figure.subplots_adjust(
left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.04, hspace=0.2
)
save_image(figure, "subdivide_curve.png")
def bbox_intersect(curve1, curve2):
enum_val = _geometric_intersection.bbox_intersect(
curve1.nodes, curve2.nodes
)
return enum_val != _py_geometric_intersection.BoxIntersectionType.DISJOINT
def refine_candidates(left, right):
new_left = []
for curve in left:
new_left.extend(curve.subdivide())
new_right = []
for curve in right:
new_right.extend(curve.subdivide())
keep_left = []
keep_right = []
for curve1 in new_left:
for curve2 in new_right:
if bbox_intersect(curve1, curve2):
keep_left.append(curve1)
if curve2 not in keep_right:
keep_right.append(curve2)
return keep_left, keep_right
def unique_curves(pairs):
left_tuples = set()
right_tuples = set()
left_curves = []
right_curves = []
for left, right in pairs:
as_tuple = tuple(left._nodes.flatten(order="F"))
if as_tuple not in left_tuples:
left_tuples.add(as_tuple)
left_curves.append(left)
as_tuple = tuple(right._nodes.flatten(order="F"))
if as_tuple not in right_tuples:
right_tuples.add(as_tuple)
right_curves.append(right)
return left_curves, right_curves
def subdivision_process():
nodes15 = np.asfortranarray([[0.25, 0.625, 1.0], [0.625, 0.25, 1.0]])
curve15 = bezier.Curve(nodes15, degree=2)
nodes25 = np.asfortranarray([[0.0, 0.25, 0.75, 1.0], [0.5, 1.0, 1.5, 0.5]])
curve25 = bezier.Curve(nodes25, degree=3)
figure, all_axes = plt.subplots(2, 3, sharex=True, sharey=True)
ax1, ax2, ax3, ax4, ax5, ax6 = all_axes.flatten()
color1 = BLUE
color2 = GREEN
plot_with_bbox(curve15, ax1, color1)
plot_with_bbox(curve25, ax1, color2)
left, right = refine_candidates([curve15], [curve25])
for curve in left:
plot_with_bbox(curve, ax2, color1)
for curve in right:
plot_with_bbox(curve, ax2, color2)
for ax in (ax3, ax4, ax5, ax6):
left, right = refine_candidates(left, right)
curve15.plot(256, color=color1, alpha=0.5, ax=ax)
for curve in left:
plot_with_bbox(curve, ax, color=color1)
curve25.plot(256, color=color2, alpha=0.5, ax=ax)
for curve in right:
plot_with_bbox(curve, ax, color2)
for ax in (ax1, ax2, ax3, ax4, ax5, ax6):
simple_axis(ax)
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(0.4, 1.15)
figure.set_size_inches(6.0, 2.8)
figure.subplots_adjust(
left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0.04, hspace=0.04
)
save_image(figure, "subdivision_process.png")
def _subdivision_pruning_zoom(all_axes, column):
half_width = 0.5 ** (column + 2)
min_x = 0.75 - half_width
max_x = 0.75 + half_width
min_y = 0.25 - half_width
max_y = 0.25 + half_width
for row in (0, 1, 2):
all_axes[row, column].plot(
[min_x, max_x, max_x, min_x, min_x],
[min_y, min_y, max_y, max_y, min_y],
color="black",
)
buffer = 0.5 ** (column + 6)
for row in (1, 2):
all_axes[row, column].set_xlim(min_x - buffer, max_x + buffer)
all_axes[row, column].set_ylim(min_y - buffer, max_y + buffer)
def subdivision_pruning():
figure, all_axes = plt.subplots(3, 4)
nodes69 = np.asfortranarray([[0.0, 1.0, 1.0], [0.0, 0.0, 1.0]])
curve69 = bezier.Curve(nodes69, degree=2)
delta = np.asfortranarray([[1.0, 1.0, -1.0], [-1.0, -1.0, 1.0]]) / 32.0
nodes_other = nodes69 + delta
curve_other = bezier.Curve(nodes_other, degree=2)
color1 = BLUE
color2 = GREEN
for ax in all_axes.flatten():
curve69.plot(256, color=color1, ax=ax)
curve_other.plot(256, color=color2, ax=ax)
candidates = {0: [(curve69, curve_other)]}
intersections = []
for i in range(5):
candidates[i + 1] = _py_geometric_intersection.intersect_one_round(
candidates[i], intersections
)
for column in (0, 1, 2, 3):
left_curves, right_curves = unique_curves(candidates[column + 2])
for curve in left_curves:
plot_with_bbox(curve, all_axes[0, column], color1)
plot_with_bbox(curve, all_axes[1, column], color1, with_nodes=True)
plot_with_convex_hull(
curve, all_axes[2, column], color1, with_nodes=True
)
for curve in right_curves:
plot_with_bbox(curve, all_axes[0, column], color2)
plot_with_bbox(curve, all_axes[1, column], color2, with_nodes=True)
plot_with_convex_hull(
curve, all_axes[2, column], color2, with_nodes=True
)
for ax in all_axes.flatten():
simple_axis(ax)
_subdivision_pruning_zoom(all_axes, 0)
_subdivision_pruning_zoom(all_axes, 1)
_subdivision_pruning_zoom(all_axes, 2)
_subdivision_pruning_zoom(all_axes, 3)
intersection_params = curve69.intersect(curve_other)
s_vals = intersection_params[0, :]
intersections = curve69.evaluate_multi(s_vals)
for column in (0, 1, 2, 3):
all_axes[0, column].plot(
intersections[0, :],
intersections[1, :],
color="black",
linestyle="None",
marker="o",
markersize=4,
)
save_image(figure, "subdivision_pruning.png")
def _plot_endpoints_line(ax, fat_line_coeffs, **plot_kwargs):
# NOTE: This assumes the x-limits have already been set for the axis.
coeff_a, coeff_b, coeff_c, _, _ = fat_line_coeffs
if coeff_b == 0.0:
raise NotImplementedError("Vertical lines not supported")
min_x, max_x = ax.get_xlim()
# ax + by + c = 0 ==> y = -(ax + c)/b
min_y = -(coeff_a * min_x + coeff_c) / coeff_b
max_y = -(coeff_a * max_x + coeff_c) / coeff_b
ax.plot(
[min_x, max_x],
[min_y, max_y],
**plot_kwargs,
)
ax.set_xlim(min_x, max_x)
def _normalize_implicit_line_tuple(info):
length = np.linalg.norm(info[:2], ord=2)
return tuple(np.array(info) / length)
def compute_implicit_line(nodes):
"""Image for :func:`.hazmat.clipping.compute_implicit_line` docstring."""
if NO_IMAGES:
return
curve = bezier.Curve.from_nodes(nodes)
ax = curve.plot(256, color=BLUE)
min_x, max_x = nodes[0, (0, -1)]
min_y, max_y = nodes[1, (0, -1)]
ax.plot(
[min_x, max_x, max_x],
[min_y, min_y, max_y],
color="black",
linestyle="dashed",
)
ax.axis("scaled")
# NOTE: This "cheats" and assumes knowledge of what's actually in ``nodes``.
ax.set_xticks([0.0, 1.0, 2.0, 3.0, 4.0])
ax.set_yticks([0.0, 1.0, 2.0, 3.0])
info = clipping.compute_fat_line(nodes)
info = _normalize_implicit_line_tuple(info)
_plot_endpoints_line(ax, info, color="black")
save_image(ax.figure, "compute_implicit_line.png")
def _plot_fat_lines(ax, fat_line_coeffs, **fill_between_kwargs):
# NOTE: This assumes the x-limits have already been set for the axis.
coeff_a, coeff_b, coeff_c, d_low, d_high = fat_line_coeffs
if coeff_b == 0.0:
raise NotImplementedError("Vertical lines not supported")
min_x, max_x = ax.get_xlim()
coeff_c_low = coeff_c - d_low
coeff_c_high = coeff_c - d_high
# ax + by + c = 0 ==> y = -(ax + c)/b
min_y_low = -(coeff_a * min_x + coeff_c_low) / coeff_b
min_y_high = -(coeff_a * min_x + coeff_c_high) / coeff_b
max_y_low = -(coeff_a * max_x + coeff_c_low) / coeff_b
max_y_high = -(coeff_a * max_x + coeff_c_high) / coeff_b
ax.fill_between(
[min_x, max_x],
[min_y_low, max_y_low],
[min_y_high, max_y_high],
**fill_between_kwargs,
)
ax.set_xlim(min_x, max_x)
def _add_perpendicular_segments(ax, nodes, fat_line_coeffs, color):
coeff_a, coeff_b, coeff_c, _, _ = fat_line_coeffs
_, num_nodes = nodes.shape
for index in range(num_nodes):
# ax + by + c = 0 is perpendicular to lines of the form
# bx - ay = c'
curr_x, curr_y = nodes[:, index]
c_prime = coeff_b * curr_x - coeff_a * curr_y
# bx - ay = c' intersects ax + by + c = 0 at
# [x0, y0] = [b c' - a c, -a c' - b c] (assuming a^2 + b^2 == 1)
x_intersect = coeff_b * c_prime - coeff_a * coeff_c
y_intersect = -coeff_a * c_prime - coeff_b * coeff_c
ax.plot(
[curr_x, x_intersect],
[curr_y, y_intersect],
color=color,
linestyle="dashed",
)
def compute_fat_line(nodes, fat_line_coeffs):
"""Image for :func:`.hazmat.clipping.compute_fat_line` docstring."""
if NO_IMAGES:
return
fat_line_coeffs = _normalize_implicit_line_tuple(fat_line_coeffs)
curve = bezier.Curve.from_nodes(nodes)
ax = curve.plot(256, color=BLUE)
ax.plot(
nodes[0, :],
nodes[1, :],
marker="o",
color=BLUE,
linestyle="none",
)
_add_perpendicular_segments(ax, nodes, fat_line_coeffs, BLUE)
ax.axis("scaled")
_plot_endpoints_line(ax, fat_line_coeffs, color=BLUE, linestyle="dashed")
_plot_fat_lines(ax, fat_line_coeffs, color=BLUE, alpha=0.5)
save_image(ax.figure, "compute_fat_line.png")
def clip_range(nodes1, nodes2):
"""Image for :func:`.hazmat.clipping.clip_range` docstring."""
if NO_IMAGES:
return
curve1 = bezier.Curve.from_nodes(nodes1)
curve2 = bezier.Curve.from_nodes(nodes2)
# Plot both curves as well as the nodes.
ax = curve1.plot(256, color=BLUE)
curve2.plot(256, ax=ax, color=GREEN)
ax.plot(
nodes1[0, :],
nodes1[1, :],
marker="o",
color=BLUE,
linestyle="none",
)
ax.plot(
nodes2[0, :],
nodes2[1, :],
marker="o",
color=GREEN,
linestyle="none",
)
fat_line_coeffs = clipping.compute_fat_line(nodes1)
fat_line_coeffs = _normalize_implicit_line_tuple(fat_line_coeffs)
# Add perpendicular lines to the "implicit" line.
_add_perpendicular_segments(ax, nodes2, fat_line_coeffs, GREEN)
# Establish boundary **assuming** contents of ``nodes1`` and ``nodes2``.
ax.axis("scaled")
ax.set_xlim(-0.625, 7.375)
ax.set_ylim(-0.25, 4.625)
_plot_endpoints_line(ax, fat_line_coeffs, color=BLUE, linestyle="dashed")
_plot_fat_lines(ax, fat_line_coeffs, color=BLUE, alpha=0.5)
save_image(ax.figure, "clip_range.png")
def clip_range_distances(nodes1, nodes2):
"""Image for :func:`.hazmat.clipping.clip_range` docstring."""
if NO_IMAGES:
return
figure = plt.figure()
ax = figure.gca()
fat_line_coeffs = clipping.compute_fat_line(nodes1)
coeff_a, coeff_b, coeff_c, d_min, d_max = fat_line_coeffs
degree2, polynomial = clipping._clip_range_polynomial(
nodes2, coeff_a, coeff_b, coeff_c
)
ax.fill_between([0.0, degree2], d_min, d_max, color=BLUE, alpha=0.25)
s_min, s_max = clipping.clip_range(nodes1, nodes2)
convex_hull = _helpers.simple_convex_hull(polynomial)
add_patch(
ax,
convex_hull,
GREEN,
with_nodes=True,
alpha=0.625,
node_color=GREEN,
)
# Plot the true distance function ``d(t)``.
t_values = np.linspace(0.0, 1.0, 257)
curve2 = bezier.Curve.from_nodes(nodes2)
evaluated = curve2.evaluate_multi(t_values)
x_values = degree2 * t_values
d_values = coeff_a * evaluated[0, :] + coeff_b * evaluated[1, :] + coeff_c
ax.plot(x_values, d_values, color=GREEN)
# Add dashed lines to each control point in the convex hull.
for index in range(degree2 + 1):
x_val, y_val = polynomial[:, index]
ax.plot([x_val, x_val], [0.0, y_val], color=GREEN, linestyle="dashed")
# NOTE: This "cheats" and uses the fact that it knows that ``s_min``
# corresponds to ``d_max`` and ``s_max`` corresponds to ``d_min``.
ax.plot(
[degree2 * s_min, degree2 * s_max],
[d_max, d_min],
color="black",
marker="o",
linestyle="none",
)
# Use minor xticks **above** for showing s_min and s_max.
jitter = 0.5 ** 5
# NOTE: We introduce ``jitter`` to avoid using the same value for a minor
# xtick that is used for a major one. When ``matplotlib`` sees a
# minor xtick at the exact same value used by a major xtick, it
# ignores the tick.
ax.set_xticks(
[degree2 * s_min + jitter, degree2 * s_max - jitter], minor=True
)
ax.set_xticklabels([f"$t = {s_min}$", f"$t = {s_max}$"], minor=True)
ax.tick_params(
axis="x",
which="minor",
direction="in",
top=False,
bottom=False,
labelbottom=False,
labeltop=True,
)
# Add line up to minor xticks. Similar to the dots on ``s_min`` and
# ``s_max`` this "cheats" with the correspondence to ``d_min`` / ``d_max``.
min_y, max_y = ax.get_ylim()
ax.plot(
[degree2 * s_min, degree2 * s_min],
[d_max, max_y],
color="black",
alpha=0.125,
linestyle="dashed",
)
ax.plot(
[degree2 * s_max, degree2 * s_max],
[d_min, max_y],
color="black",
alpha=0.125,
linestyle="dashed",
)
ax.set_ylim(min_y, max_y)
ax.set_xlabel("$2t$")
ax.set_ylabel("$d(t)$", rotation=0)
save_image(figure, "clip_range_distances.png")
def main():
bounding_box_predicate()
convex_hull_predicate()
subdivide_curve()
subdivision_process()
subdivision_pruning()
if __name__ == "__main__":
main()
| apache-2.0 |
smorton2/think-stats | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
willkuhn/wingrid | wingrid/core.py | 1 | 76216 | # -*- coding: utf-8 -*-
"""
Wingrid Insect Wing Color Analysis Package
"""
# Author: William R. Kuhn
# License: GNU GPL License v3
from .helpers import downscaleIf,slope,polar2cart,cart2polar,RGB2chrom
import cv2 # MUST BE version 3.0 or higher
import numpy as np
import random
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge
from matplotlib.collections import PatchCollection
from matplotlib import lines
from scipy.spatial.distance import euclidean
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
################################################################################
# GRID SAMPLER CLASS
class Grid():
"""Grid Sampler
Takes an image that contains a pair of wings, fits a stretchable grid to
the wings in the image, and samples color features from the grid.
For now, images are assumed to have had their backgrounds removed
beforehand with photo-editing, such as GIMP. See Image Preparation Guide
for details.
IMPORTANT: Images must be RGB, not BGR. OpenCV imports images as BGR, so if
if is used to import an image for grid-fitting, the channel order must be
reversed beforehand, like so:
>>> im = cv2.imread(im_filename)[:,:,::-1]
The steps for fitting a grid to an image are as follows:
(1) Detect wings in the image with simple thresholding.
(2) Fit a minimum-enclosing triangle ABC to the wings, so that AB is line
along the anterior margin of the set of wings, AC is a line along
the posterior margin and A is a vertex at the basal edges of the
wings.
(3) Define n_grid_rows-1 evenly-spaced lines between AB and AC that all
intersect vertex A. Take the slopes (in degrees) of these lines, and
those of AB and AC, as the variable `thetas`.
(4) Find the closest and farthest wing pixel values from vertex A and
take their distance from A as the beginning and end of a list of
`radii`. Calculate n_grid_cols-1 additional evenly-spaced radii
between these bounds and add them to `radii`.
(5) Grid cells can now be formed, where rows are lines radiating from
vertex A at the angles defined in `thetas` and columns are arcs around
vertex A at distances defined in 'radii'.
Parameters
----------
n_grid_rows,n_grid_cols : int, optional (default 10 and 10)
Desired number of rows and columns in the grid. Each must be >=1.
background : str, optional
Background color in the image (background must be converted to this
color in Photoshop, for example, prior to sampling), possible values:
- 'black' (default)
- 'white'
use_chrom : bool, optional (default True)
Whether to transform the RGB image pixel values to chromatic coordinates
prior to sampling the image.
blur : bool, optional (default True)
Passed to `RGB2chrom()`; whether blurring is applied before an image is
converted to chromatic coordinates.
max_dim : int, optional (default 1000)
Maximum dimension for an input image. If the image excedes this amount
in height or width, the image will be downsampled so that the maximum of
those equals `max_dim`
min_object_size : int, optional (default 20)
Used to remove small objects during masking. Minimum size of an object
(in terms of no. of edge pixels) that will be accepted by
`Grid._get_mask()`. Smaller objects will be filtered out. Increasing
this value means that larger objects will be removed during masking.
Attributes
----------
n_cells_ : int
Number of cells in the grid (n_grid_rows*n_grid_cols).
n_features_ : int
Number of features that will be produces (3*2*n_cells_).
thetas_ : array, shape (n_grid_rows+1,)
Slopes (in degrees) used to draw rows of the grid.
radii_ : array, shape (n_grid_cols+1,)
Radii used to draw columns of the grid.
mask_ : bool array, same shape as input image
Boolean mask of input image.
hull_pts_ : array, shape (arbitrary,2)
Coordinates of edge pixels in the convex image of `mask_`.
tri_ : array, shape (3,2)
Coordinates of minimum-enclosing triangle around the wings in the input
image.
features_ : array, shape (n_features,)
Features calculated by sampling input image. `np.nan` is returned for
features from 'edge cells' (see `f_mask_` description below).
f_mask_ : bool array, shape (n_features_,)
List describing condition of the cell where each feature is derived.
`False` means that the cell contains some of the input image's mask
and/or that the cell is out of the bounds of the image. Cells that
meet either/both of these conditions are called 'edge cells'. `True`
means that the cell is not an 'edge cell'. This mask is used later for
filtering out edge cells among a set features extracted from multiple
images.
f_labels_ : array, shape (n_features_,)
List of strings labels describing each feature in the form
[channel: r,g,b][method: m,s][cell number]. Ex, the label 'gs12'
describes a feature derived from the stdev of cell 12 in the green
channel.
cell_px_coords_ : list of arrays, length (n_grid_cols*n_grid_rows)
(x,y) coordinates of all pixels in each grid cell.
Examples
--------
TODO:
>>> from wingrid import Grid
>>> import cv2
>>> grid = Grid(n_lines=5,n_arcs=8)
>>> im = imread('image.jpg')
>>> im = im[:,:,::-1] # convert from openCV's BGR to RGB
>>> grid.fit(im)
>>> features,f_mask = grid.coeffs_, grid.f_mask_
[1]
"""
def __init__(self, n_grid_rows=5, n_grid_cols=8, background='black',
use_chrom=True, blur=True, max_dim=1000, min_object_size=20):
self.n_grid_rows = n_grid_rows
self.n_grid_cols = n_grid_cols
self.background = background
self.use_chrom = use_chrom
self.blur = blur
self.max_dim = max_dim
self.min_object_size = min_object_size
# Check `n_grid_rows` and `n_grid_cols`
if n_grid_rows<1:
raise ValueError("`n_grid_rows` must be >=1")
elif n_grid_cols<1:
raise ValueError("`n_grid_cols` must be >=1")
# Check that cv2 includes minEnclosingTriangle
self._check_opencv_version()
# Calculate the number of cells in the grid
self.n_cells_ = n_grid_rows*n_grid_cols
# Calculate the no. of features to be extracted
self.n_features_ = n_grid_rows*n_grid_cols*2*3
# Make feature labels
self._get_f_labels()
@staticmethod
def _check_opencv_version():
"""OpenCV versions before 3.1 are missing `cv2.minEnclosingTriangle`,
which is required for `Grid._calc_triangle()`. This returns an error if
that function is missing."""
if not 'minEnclosingTriangle' in dir(cv2):
msg =("OpenCV version >=3.1 (with `minEnclosingTriangle`) is"
" required. Found v{}".format(cv2.__version__))
raise RuntimeError(msg)
def _get_f_labels(self):
"""Creates feature labels.
Labels are strings in the form [channel][statistic][cell no.], where
channel is 'r', 'g', or 'b', statistic is 'm' for mean or 's' for
standard deviation and cells are numbered left to right, top to bottom
in the grid, starting with 0."""
n_cells = self.n_cells_
labels = np.array([ch+st+str(c) for ch in ('rgb') for st in ('ms') \
for c in range(n_cells)])
labels = np.array(labels)
self.f_labels_ = labels
def _get_mask(self,image):
"""Masks input image.
Masks by thresholding using the specified `background` then looking for
large objects (i.e. wings) in the thresholded image."""
min_object_size = self.min_object_size
background = self.background
# Check image first
if image is None:
raise ValueError('Input image is `None`.')
# Downsample large images
im = downscaleIf(image, max_dim=self.max_dim)
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
# Threshold image
if background=='white':
_,thresh = cv2.threshold(gray,254,255,type=cv2.THRESH_BINARY)
elif background=='black':
_,thresh = cv2.threshold(gray,1,255,type=cv2.THRESH_BINARY_INV)
else:
raise ValueError("Unrecognized value for `background`. Available options: ['black','white']")
# Find objects in the image
_,cnts,_ = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
# Add to a blank mask all objects that are at least 500 edge pixels in size
mask = np.ones(thresh.shape, dtype=np.uint8)*255
# Check that at least 1 object is exceeds `min_object_size`
if np.all([len(c)<min_object_size for c in cnts]):
raise RuntimeError('`_get_mask` failed to detect wings in image. Try lowering `min_object_size`.')
for c in cnts:
if len(c)>=min_object_size: # only include 'large' objects
cv2.drawContours(mask, [c], -1, 0, -1)
mask = 255 - mask # Invert mask (so white = wings, black = background)
mask = mask.astype(bool)
# Verify mask shape
if image.shape[:2] != mask.shape:
raise RuntimeError('Mask shape does not match image shape: %s,%s'
% ( str(mask.shape)),str(image.shape[:2]) )
# Check that mask doesn't include too much of the image's perimeter
perim = np.concatenate([mask[:8].ravel(),
mask[-8:].ravel(),
mask[:,:8].ravel(),
mask[:,-8:].ravel()])
if perim.tolist().count(True)/float(len(perim))>0.15:
raise RuntimeError("Image was incorrectly masked. Make sure that"
" the image's background has been removed and replaced with black"
" or white.")
self.mask_ = mask
def _get_hull_pts(self):
"""Get coordinates of edge pixels in mask"""
mask = self.mask_
thresh = mask.astype(np.uint8)*255
# Get contours
_,cnts,_ = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
if len(cnts)>1: # if more than one object in image
cnts = np.concatenate(cnts) # join cnts to get points for all objects
elif len(cnts)==1: # else take the 1st object in cnts, cast as array
cnts = np.array(cnts[0])
else:
raise RuntimeError('Failed to detect wings in image.')
# Get hull points
hull_pts = cv2.convexHull(cnts)
self.hull_pts_ = hull_pts
def _calc_triangle(self):
"""Calculates the minimum-enclosing triangle around the wings."""
hull_pts = self.hull_pts_
# CALCULATE TRIANGLE
area,tri = cv2.minEnclosingTriangle(hull_pts) # Min triangle
tri = tri.reshape((3,2))
# REDEFINE TRIANGLE VERTICES (ABC)
# A: left-most: lowest x-valued points
a = np.argmin(tri[:,0])
# A: upperleft-most: lowest y-valued of the 2 lowest x-valued points
#a = np.argmin(tri[np.argsort(tri[:,0])[:2],1])
# A: closest point to origin (0,0)
#a = np.argmin([np.sqrt([pt[0]**2 + pt[1]**2) for pt in tri])
# TODO: refine strategy for determining vertex A
# B: uppermost (lowest y-value [in img coord sys]) of the remaining 2
remaining_two = list({0,1,2}-{a})
b = remaining_two[np.argmin(tri[remaining_two,1])]
# C: remaining vertex
c = list({0,1,2} - {a,b})[0]
A,B,C = [tri[i] for i in [a,b,c]]
tri_new = np.array((A,B,C))
self.tri_ = tri_new
def _define_grid(self):
"""Calculate slopes for grid rows and radii for columns."""
A,B,C = self.tri_
hull_pts = self.hull_pts_
n_grid_rows = self.n_grid_rows
n_grid_cols = self.n_grid_cols
# Randomly sample a subset of hull points if there lots
if len(hull_pts)>200:
hull_pts = random.sample(hull_pts,200)
else:
hull_pts = hull_pts
# Calculate radii
dists = [euclidean(A,i) for i in hull_pts]
radii = np.linspace(min(dists), max(dists),n_grid_cols+1)
# Calculate slopes:
# Note: Since we're in image coordinates (y-axis is flipped),
# upper slope is typically neg & lower is typically pos.
slopeAB = slope(A,B) # slope of the upper edge
slopeAC = slope(A,C) # slope of the lower edge
thetas = np.linspace(slopeAB, slopeAC,n_grid_rows+1)
self.radii_ = radii
self.thetas_ = thetas
@ staticmethod
def _get_px_coords_for_cell(cell_row,cell_col,tri,radii,thetas,
step=2):
"""Calculates the (x,y) coordinates of pixels in a grid cell.
Used in self._get_coords().
Parameters
----------
cell_row,cell_col : ints
Index of cell, where 0<=cell_row<=n_grid_rows and
0<=cell_col<=n_grid_cols
tri,radii,thetas : params
Parameters of the grid
step : int (default 2)
Computation-saving parameter, will return every `skip` pixel
in the row and column direction. With `step=2`, coordinates
are returned for every other pixel, row-wise and column-wise.
With `step=1`, all pixel coordinates are returned.
Returns
-------
coords : array, shape (~n_pixels_in_cell/(step**2),2)
(x,y) coordinates for pixels in the specified cell
"""
A,B,C = tri
row = cell_row
col = cell_col
# Get cell's corners (coords of the intesections of its 2 lines and 2 arcs)
rL,rR = radii[[col,col+1]] # radii of left and right bounding arcs
tU,tD = thetas[[row,row+1]] # slope of upper and lower bounding lines
corner_UL = polar2cart(rL,tU,originXY=A) # upper left corner (on image)
corner_LL = polar2cart(rL,tD,originXY=A) # lower left corner (on image)
corner_UR = polar2cart(rR,tU,originXY=A) # upper right corner (on image)
corner_LR = polar2cart(rR,tD,originXY=A) # lower right corner (on image)
corner_UL,corner_LL,corner_UR,corner_LR = \
np.array([corner_UL,corner_LL,corner_UR,corner_LR],dtype=int)
# Calculate cell's bounding box as (left,right,top,bottom)
if min(tU,tD)<0<max(tU,tD): # if cell crosses the x-axis
bbL = min(corner_UL[0], corner_LL[0])
# right bound may be on arc, which crosses x-axis at (rR+A[0],0)
bbR = max(corner_UR[0], corner_LR[0], int(rR+A[0]))
bbU = min(corner_UL[1], corner_UR[1])
bbD = max(corner_LL[1], corner_LR[1])
else:
bbL = min(corner_UL[0], corner_LL[0])
bbR = max(corner_UR[0], corner_LR[0])
bbU = min(corner_UL[1], corner_UR[1])
bbD = max(corner_LL[1], corner_LR[1])
# List the coordinates of every `step` pixel in the cell's bounding box
coords = np.array([(x,y) for y in range(bbU,bbD,step) \
for x in range(bbL,bbR,step)])
# Convert those (x,y) pixel coordinates to (r,theta) polar coords
coords_pol = np.array(list(map(lambda i: cart2polar(*i,originXY=A),
coords)))
# Find pixel coordinates within grid cell
rWise = np.bitwise_and(rL<=coords_pol[:,0],
coords_pol[:,0]<rR)
thetaWise = np.bitwise_and(tU<=coords_pol[:,1],
coords_pol[:,1]<tD)
coords = coords[np.bitwise_and(rWise,thetaWise)]
return coords
@staticmethod
def _safe_extract(x,y,image,mask,h,w):
"""Extracts the pixel value from an image at coordinate (x,y),
returning np.nan if (x,y) is out-of-bounds or off the mask.
Used in `self._sample_grid()`.
Parameters
----------
x,y : int
Pixel coordinates, where (x,y) corresponds to the pixel at
image[y,x]
image : array
Input image
mask : array
Image mask from fitted grid model
h,w : int
The height and width, respectively, `image`
"""
if x<0 or y<0: # nan if (x,y) is out of left or top bounds
return np.nan
elif x>w-1 or y>h-1: # nan if (x,y) is out of right or bottom bounds
return np.nan
elif not mask[y,x]: # nan if pixel is outside of object's mask
return np.nan
else: # otherwise return the pixel value at coord (x,y)
return image[y,x]
@staticmethod
def _sample_grid(self,image,mask,coords):
"""Samples image using pre-fitted grid and image mask.
Used in `self.fit_sample()`.
Parameters
----------
image : array
Input image
mask,coords : params
Parameters from fitted grid
Returns
-------
features : array, shape (n_features,)
Features calculated by sampling input image. `np.nan` is returned
for features from 'edge cells' (see `f_mask_` description below).
f_mask : bool array, shape (n_features_,)
List describing condition of the cell where each feature is derived.
`False` means that the cell contains some of the input image's mask
and/or that the cell is out of the bounds of the image. Cells that
meet either/both of these conditions are called 'edge cells'. `True`
means that the cell is not an 'edge cell'. This mask is used later
for filtering out edge cells among a set features extracted from
multiple images.
"""
f_mask = [] # image's feature mask
features = [] # image featured
h,w = image.shape[:2]
for channel in cv2.split(image): # for each color channel: (r,b,g)
# Wrapper for _safe_extract()
extract = lambda px: self._safe_extract(px[0],px[1],channel,
mask,h,w)
means = [] # grid's cell means
stDevs = [] # grid's cell standard deviations
f_m = [] # holds grid's feature mask
for cell in coords: # for each cell in the grid
# extract pixel values while also checking that each pixel is
# within the image and within the image mask
vals = np.array(list(map(extract,cell)))
# drop any nans
vals_no_nan = vals[~np.isnan(vals)]
# if sampling cell returned any nans, f_mask=False, else True
if len(vals_no_nan)<len(vals):
f_m.append(False)
else:
f_m.append(True)
# Calculate the means & std of non-nan values in the cell
if len(vals_no_nan)==0:
m = np.nan
sd = np.nan
else:
m = np.mean(vals_no_nan)
sd = np.std(vals_no_nan)
means.append(m)
stDevs.append(sd)
features.extend(means)
features.extend(stDevs)
f_mask.extend(f_m+f_m) # extend f_mask once for mean & once for std
return np.array(features),np.array(f_mask)
def _get_coords(self):
"""Calculates pixel coordinates for all cells in the grid."""
n_grid_rows = self.n_grid_rows
n_grid_cols = self.n_grid_cols
tri = self.tri_
radii = self.radii_
thetas = self.thetas_
# Get row,col indices for all cells in the grid
cells = [(row,col) for row in range(n_grid_rows) \
for col in range(n_grid_cols)]
# Find the px coordinates in each cell
coords = [self._get_px_coords_for_cell(r,c,tri,radii,thetas,
step=2) for (r,c) in cells]
self.cell_px_coords_ = coords
def fit(self,image):
"""Fit grid to an input image without sampling the image, returning
`self`.
Parameters
----------
image : array
Input image
"""
# Check image first
if image is None:
raise ValueError('Input image is `None`.')
# Downsample large images
im = downscaleIf(image, max_dim=self.max_dim)
self._get_mask(im) # mask the image
self._get_hull_pts() # convex hull of the image mask
self._calc_triangle() # find the min-enclosing triange
self._define_grid() # define radii & thetas
self._get_coords() # find the coordinates of pixels in each grid cell
return self # allows this method to be chained to others
def fit_sample(self,image):
"""Fit grid to an input image, and then sample
color features from the image.
Parameters
----------
image : array
Input image
Returns
-------
features : array, shape (n_features,)
Features calculated by sampling input image. `np.nan` is returned
for features from 'edge cells' (see `f_mask_` description below).
f_mask : bool array, shape (n_features_,)
List describing condition of the cell where each feature is derived.
`False` means that the cell contains some of the input image's mask
and/or that the cell is out of the bounds of the image. Cells that
meet either/both of these conditions are called 'edge cells'. `True`
means that the cell is not an 'edge cell'. This mask is used later
for filtering out edge cells among a set features extracted from
multiple images.
"""
# Check image first
if image is None:
raise ValueError('Input image is `None`.')
# Downsample large images
im = downscaleIf(image, max_dim=self.max_dim)
# Fit grid to image
self.fit(im)
# Convert image from RGB to chromatic coordinates
if self.use_chrom:
im = RGB2chrom(im,blur=self.blur)
features,f_mask = self._sample_grid(self,im,self.mask_,
self.cell_px_coords_)
self.features_ = features
self.f_mask_ = f_mask
return features,f_mask
def get_params(self):
"""Get a dictionary of the grid model's current parameters."""
keys = ['background',
'cell_px_coords_',
'f_labels_',
'f_mask_',
'features_',
'hull_pts_',
'mask_',
'max_dim',
'min_object_size',
'n_cells_',
'n_features_',
'n_grid_cols',
'n_grid_rows',
'radii_',
'thetas_',
'tri_',
'use_chrom']
d = {}
for k in keys:
if hasattr(self,k):
d[k] = getattr(self,k)
return d
def plot_grid(self,image=None,show_gridlines=True,show_cell_nums=True,
show_tri=True,show_edge_cells=True,use_chrom=False,
filepath=None):
"""Vizualize a fitted and sampled grid.
Raises RuntimeError if grid has not been fitted and sampled.
Parameters
----------
image : array, optional (default None)
Input image on which the grid has been fitted. If no image is
provided, grid will be plotted on a canvas matching `background`.
show_gridlines : bool, optional (default True)
Whether to show the gridlines of the fitted grid.
show_cell_nums : bool, optional (default True)
Whether to show the cell numbers of the fitted grid.
show_tri : bool, optional (default True)
Whether to show the minimum-enclosing triangle used to fit the grid.
show_edge_cells : bool, optional (default True)
Whether to show 'edge cells' (for which f_mask is False).
use_chrom : bool, optional (default False)
If an image is provided, whether to convert it to chromatic
coordinates before plotting it.
filepath : string (default None)
Optional filepath to save figure directly to file. A path ending in
`.png` is recommended, e.g. `path/to/file.png`
"""
if not hasattr(self,'f_mask_'):
raise RuntimeError('Grid has not yet been fit to an image. Try'
' `g=Grid().fit_sample(image);g.plot_grid()`.')
A,B,C = tri = self.tri_
thetas = self.thetas_
radii = self.radii_
coords = self.cell_px_coords_
background = self.background
# Set up plot colors
colors = [(103, 255, 30), # for triangle
(103, 255, 30), # for triangle vertices
( 0, 95, 254), # for grid lines
(240, 59, 0), # for grid cell numbers
(191, 191, 191), # for edge cells, gray
]
# Rescale values to between 0 and 1
colors = [(r/255.,g/255.,b/255.) for r,g,b in colors]
# Grid linewidth
glw = 1.
# Font dictionary
fontdict={'weight':'bold',
'fontsize':12}
if image is not None:
# Downsample large images
im = downscaleIf(image, max_dim=self.max_dim)
else:
im = self.mask_ # used only to get image shape for adjusting bounds
if use_chrom and image is not None:
# Convert image to chromatic coordinates
im = RGB2chrom(im)
if show_edge_cells:
f_mask = self.f_mask_
n_grid_rows = self.n_grid_rows
n_grid_cols = self.n_grid_cols
# Get channel,statistic,row,col indices for all cells in the grid
cells = [(ch,st,row,col) for ch in range(3) for st in range(2)\
for row in range(n_grid_rows) for col in range(n_grid_cols)]
# Get indices of non-contributing cells ('edge cells')
cells_noncontrib = np.array(cells)[~f_mask]
# PLOT
fig, ax = plt.subplots()
# Add image
if image is not None:
ax.imshow(im)
# Change color of figure background to match image background
if background=='white':
ax.set_facecolor('white')
elif background=='black':
ax.set_facecolor('black')
# Plot edge cells
if show_edge_cells:
patches = []
for (_,_,r,c) in cells_noncontrib:
w = Wedge(A,
radii[c+1], # outer radius
thetas[r], # theta1 (in deg)
thetas[r+1], # theta2 (in deg)
width=radii[c+1]-radii[c]) # outer - inner radius
patches.append(w)
p = PatchCollection(patches,
facecolor=colors[4],
edgecolor=None,
alpha=0.2)
ax.add_collection(p)
# Plot triangle
if show_tri:
#Draw points of triangle
plt.plot(*tri[[0,1,2,0]].T,
color=colors[0],
marker='o',
markersize=5,
lw=glw,
markeredgecolor='none')
#Label triangle points
for lbl,coord in zip(('ABC'),tri):
ax.annotate(lbl,
xy=(0,0),
xytext=coord+(10,10),
color=colors[1],
**fontdict)
# Show grid
if show_gridlines:
#Draw arcs as PatchCollection wedges
patches = []
for r in radii:
w = Wedge(A,r,thetas[0],thetas[-1], width=0.001)
patches.append(w)
p = PatchCollection(patches,
facecolor='none',
edgecolor=colors[2],
lw=glw)
ax.add_collection(p)
#Draw lines
for t in np.deg2rad(thetas):
x0 = (radii[0] * np.cos(t)) + A[0]
y0 = (radii[0] * np.sin(t)) + A[1]
x1 = (radii[-1] * np.cos(t)) + A[0]
y1 = (radii[-1] * np.sin(t)) + A[1]
line = lines.Line2D((x0,x1),(y0,y1),
color=colors[2],
lw=glw)
ax.add_line(line)
# Annotate cells with their cell number
if show_cell_nums:
# Get centroids of each cell
cell_centers = list(map(lambda c: np.mean(c,axis=0), coords))
for i,(x,y) in enumerate(cell_centers):
ax.text(x,y,str(i),
color=colors[3],
ha='center',
va='center',
**fontdict)
# Adjust the plot's boundaries
buf = (0.02*radii[-1]) # buffer
xmin = min(0, # left side of image
A[0]-buf) # A's x-value
xmax = max(A[0]+radii[-1]+buf, # right side of grid
B[0]+buf, # B's x-value
C[0]+buf, # C's x-value
im.shape[1]) # right side of image
ymax = min(0, # top of image
B[1]-buf, # B's y-value
# top corner of grid
radii[-1]*np.sin(np.deg2rad(thetas[0]))+A[1]-buf)
ymin = max(im.shape[0], # bottom of image
C[1]+buf, # C's y-value
# bottom corner of grid
radii[-1]*np.sin(np.deg2rad(thetas[-1]))+A[1]+buf)
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
# adjust plot padding
plt.subplots_adjust(bottom=0.06,
left= 0.06,
right= 0.96,
top= 0.96
)
if filepath is None:
plt.show()
elif type(filepath) is str:
plt.savefig(filepath,dpi=95,
bbox_inches='tight',transparent=True,pad_inches=0)
else:
raise ValueError('Filepath should be a str path to file, like `path/to/file.png`')
# TODO: Add option to write figure to file.
# TODO: Add option to manually provide alternative colors for items.
################################################################################
# GRID ANALYZER class
class Analyze():
"""Grid Features Analyzer
Analyzes features extracted from multiple wing images using
`Grid.fit_sample()`.
Parameters
----------
features : array, shape (n_features,n_samples)
Dataset of features extracted from images using `Grid.fit_sample()`.
f_mask : bool array, shape (n_features,n_samples), optional
Feature mask determined using `Grid.fit_sample()`. It is strongly
recommended that this be provided.
scale_features : bool, optional (default True)
Whether or not to scale features with
`sklearn.preprocessing.StandardScaler` prior to transformation.
Attributes
----------
n_features_ : int
The number of features in the dataset (no. columns of `features`)
n_samples_ : int
The number of individuals in the dataset (no. rows of `features`)
f_mask_common_ : array of bools, shape (n_features,)
Common feature mask: for each feature, if all values for that feature
in `f_mask` are True, then the corresponding value in `f_mask_common_`
is True, otherwise it is False. Features with "True" in this list
were derived from cells that were consistently non-edge among all the
grids fitted to images in the dataset.
n_mask_features_ : int
The number of features in the dataset after masking features
n_components_ : int
The number of components in the transformed dataset.
- For PCA, n_components = min(n_features,n_samples)
- For LDA, n_components = n_classes-1
n_features_masked_ : int
The number of features in the dataset after masking. This is equal to
the number of 'True's in `f_mask_common_`.
features_masked_ : array
Features after masking.
features_transformed_ : array, shape (n_samples,n_components)
Features transformed with PCA or LDA.
loadings_ : array
The matrix used in the PCA or LDA to transform data points, i.e. the
'loading' values. Rows of this represent the contribution that each
feature makes to a principal component or linear discriminant.
- For PCA, an array of shape (n_components,n_features)
- For LDA, an array of shape (n_classes,n_features)
explained_variance_ratio_ : array, shape (n_components,)
The proportion of the total variance in features that is captured in
each component
classes_ : array, shape (n_classes)
For LDA only, a list of all classes on which the LDA was fitted
method_ : str
String describing which fit method has been run. Options: ['pca','lda']
Notes
-----
Notes text...
TODO: Examples
--------
>>> from wingrid import Grid,Analyze
>>> import cv2
[1]
"""
def __init__(self,features,f_mask=None,scale_features=True):
self.features = features
self.f_mask = f_mask
self.scale_features = scale_features
self.n_samples_ = features.shape[0]
self.n_features_ = features.shape[1]
if f_mask is not None:
# Check shapes of features and f_mask match
if features.shape != f_mask.shape:
raise ValueError("`features` and `f_mask` shapes must match."
"Got %s and %s." % (str(features.shape),
str(f_mask.shape)))
self._mask_features() # Mask features
def _mask_features(self):
"""Finds a common set of features from consistently non-edge cells."""
features = self.features
f_mask = self.f_mask
f_mask_common = np.all(f_mask,axis=0)
features_masked = features.T[f_mask_common].T
self.f_mask_common_ = f_mask_common
self.features_masked_ = features_masked
self.n_features_masked_ = features_masked.shape[1]
def fit_pca(self):
"""Perform a principal component analysis (PCA) on the
features."""
# Mask features
if self.f_mask is not None:
X = self.features_masked_
else:
X = self.features
# Rescale features
if self.scale_features:
X = StandardScaler().fit_transform(X)
# Do PCA
pca = PCA(n_components=None,whiten=True)
features_transformed = pca.fit_transform(X)
self.features_transformed_ = features_transformed
self.n_samples_ = features_transformed.shape[0]
self.n_components_ = features_transformed.shape[1]
self.loadings_ = pca.components_
self.explained_variance_ratio_ = pca.explained_variance_ratio_
self.method_ = 'pca'
return self
def fit_lda(self,labels):
"""Perform a linear discriminant analysis (LDA) on the features.
Parameters
----------
labels : array, shape (n_samples,)
Target values, i.e. a variable that groups samples into some
arbitrary subsets.
"""
if self.f_mask is not None:
X = self.features_masked_
else:
X = self.features
# Rescale features
if self.scale_features:
X = StandardScaler().fit_transform(X)
# Do LDA
lda = LinearDiscriminantAnalysis(n_components=None)
features_transformed = lda.fit_transform(X,labels)
self.features_transformed_ = features_transformed
self.n_samples_ = features_transformed.shape[0]
self.n_components_ = features_transformed.shape[1]
self.loadings_ = lda.coef_
self.explained_variance_ratio_ = lda.explained_variance_ratio_
self.classes_ = np.unique(labels)
self.method_ = 'lda'
return self
def loadings_top_contrib(self,comps=[1],n_highest=-1,grid=None):
"""Get the top-contributing features for 1+ components.
Returns either the indices or labels of the features that contributed
most to the specified components.
Parameters
----------
comps : list or 'all', optional (default [1])
List of 1 or more components with which to calculate the highest-
contributing features. '[1]' means the first component/discriminant
will be used to score feature contributions. Values must be between
1 and n_components. If 'all' is passed, highest-contributing
features are determined for all components.
n_highest : int, optional (default -1)
Number of highest-contributing features to return. Must be between
1 and n_features. If -1, returns indices for all features, sorted
from highest to lowest contribution.
grid : `Grid` instance, optional (default None)
An initialized instance of `Grid`, thus having the attribute
`f_labels_`. The grid in `grid` must match the grid used to sample
`features`.
Returns
-------
"""
# Check that a model has been fitted already
if not hasattr(self,'method_'):
raise RuntimeError('A model has not yet been fit to the features. '
'Run `Analyze.fit_pca()` or `Analyze.fit_lda()`.')
# Check that comps is a list and values are within proper range
if comps is 'all':
comps = range(1,self.n_components_+1)
elif not type(comps)==list:
raise ValueError("`comps` must be list or 'all'. Got type: {}"\
.format(type(comps)))
else:
for c in comps:
if not 1<=c<=self.n_components_:
raise ValueError('`comps` values must be between 1 &'
' n_components')
loadings = self.loadings_ # Loadings
if self.f_mask is not None:
n_f = self.n_features_masked_
# Check 'n_highest' values
if not 1<=n_highest<=n_f:
raise ValueError('`n_highest` must be <=n_features_masked.')
else:
n_f = self.n_features_
# Check 'n_highest' values
if not 1<=n_highest<=n_f:
raise ValueError('`n_highest` must be <=n_features.')
# Calculate highest-contributing loading values by their distance from origin
loading_dists = np.zeros(n_f)
for i in range(n_f):
# calculate Euclidean distance from loadings to origin:
loading_dists[i] = np.sqrt(np.sum([loadings[j,i]**2 for j in [c-1 for c in comps]]))
# get indices for `loading_dists` sorted highest to lowest
hc = np.argsort(loading_dists)[::-1]
hc = hc[:n_highest] # indices of the highest contributors
# if something is provided for `labels`
if grid is not None:
# if it's a `grid` instance, get f_labels
if hasattr(grid,'f_labels_'):
f_labels = grid.f_labels_
else:
raise ValueError('`grid` has not been initialized.')
# check length of labels
if not len(f_labels)==self.n_features_:
raise ValueError('The grid size in `grid` and n_features do '
'not match.')
if self.f_mask is not None:
# Mask labels
f_labels = f_labels[self.f_mask_common_]
return f_labels[hc]
# otherwise return indices of the highest contributing features
return hc
def plot_comps(self,labels,comps=[1,2],filter_by=None,color_by=None,
label_classes=True,indiv_labels=None,title=None,
center_at_origin=True):
"""Draw a 2D scatterplot of transformed data.
Parameters
----------
labels : list-like of shape (n_samples,)
Labels used to determine classes. Points for a class
are connected by lines extending from the class' centroid.
comps : list of 2 ints, optional (default [1,2])
Which two PCs or LDs to plot. [1,2] means the first two PCs or LDs.
Values must be between 1 and n_components.
filter_by : list-like of shape (n_samples,), optional
A Boolean list used to filter the plot to a subset of the samples
for which `filter_by`==True. Default is None (no filtering).
This is helpful for highlighting a subset of the data, while
holding the plot limits constant.
color_by : list-like of shape (n_samples,), optional
Alternative grouping variable for coloring individual points.
If not provided, indivuals are colored by class (as determined
using `labels`)
label_classes : bool, optional (default True)
If True, class labels are placed at each class' centroid.
indiv_labels : list-like of shape (n_samples,), optional
List of labels for individuals (e.g., filenames or sample codes).
Labels are placed alongside each individual's coordinate.
If provided, must be of length n_samples even if `filter_by` is
also provided. If None, individuals are not labeled.
title : str (default None), optional
A string to be added to the end of the plot's title.
center_at_origin : bool (default True), optional
If True, plot is centered at (0,0); otherwise, it's centered at
the centroid of the data. Centering at the origin looks nice, but
is not alway convenient, particularly when `filter_by` is used.
"""
# Check that a model has been fitted already
if not hasattr(self,'method_'):
raise RuntimeError('A model has not yet been fit to the features. '
'Run `Analyze.fit_pca()` or `Analyze.fit_lda()`.')
X = self.features_transformed_
method = self.method_
n_components = self.n_components_
explained_variance_ratio = self.explained_variance_ratio_
# Check `comps`
if len(comps)!=2:
raise ValueError('`comps` must be a list of length 2.')
elif not (1<=comps[0]<=n_components or 1<=comps[1]<=n_components):
raise ValueError('`comps` values must be between 1 & n_components.')
# Check `labels`
if not len(labels)==len(X):
raise ValueError('`labels` must be of length n_samples.')
else:
labels = np.array(labels) # make sure labels is an array
# Check `indiv_labels`
if indiv_labels is not None: # if individual labels are provided
# Check length of indiv_labels
if not len(indiv_labels)==len(X):
raise ValueError("`indiv_labels` must be of length n_samples.")
# Check `color_by`
if color_by is not None: # if individual labels are provided
# Check length of color_by
if not len(color_by)==len(X):
raise ValueError("`color_by` must be of length n_samples.")
# Check & apply `filter_by`
if filter_by is not None:
filter_by = np.array(filter_by) # Make sure its an array
if not len(filter_by)==len(X):
raise ValueError('`filter_by` must be of length n_samples.')
elif not filter_by.dtype is np.dtype('bool'):
raise ValueError('`filter_by` must be Boolean')
else: # Apply `filter_by`
X = X[filter_by]
labels = labels[filter_by]
if indiv_labels is not None:
indiv_labels = indiv_labels[filter_by]
# PRE-PLOT
classes = np.unique(labels) # get classes for connecting indivs
if color_by is not None:
# Get iterator list specifying color for each indiv
groups = np.unique(color_by)
group_colors = plt.cm.hsv(np.linspace(0.,0.75,len(groups)))
gc_dict = dict(zip(groups,group_colors))
group_colors_list = np.array([gc_dict[g] for g in color_by])
else:
# Get list for coloring by class
class_colors = plt.cm.hsv(np.linspace(0.,0.75,len(classes)))
plot_colors = [( 0, 0, 0), # class labels color: solid black
( 89, 89, 89), # indiv labels color: 35% gray
]
# Rescale values to between 0 and 1
plot_colors = [(r/255.,g/255.,b/255.) for r,g,b in plot_colors]
# Get axes limits
if center_at_origin:
buf = 0.2*np.abs(X[:,[comps[0]-1,comps[1]-1]]).std()
mmax = np.abs(X[:,[comps[0]-1,comps[1]-1]]).max() + buf
xmax = ymax = mmax
xmin = ymin = -mmax
elif not center_at_origin: # TODO: 'off' doesn't seem to work here
center = X.mean(axis=0) # find centroid
resid = X - center # center data at centroid
buf = 0.2*np.abs(resid[:,[comps[0]-1,comps[1]-1]]).std()
mmax = np.abs(resid[:,[comps[0]-1,comps[1]-1]]).max() + buf
xmax = mmax + center[0]
ymax = mmax + center[1]
xmin = -mmax + center[0]
ymin = -mmax + center[1]
else:
raise ValueError('`center_at_origin` must be Boolean.')
# PLOT
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(12,9))
# draw lines at origin
ax.vlines(0.,ymin,ymax,
colors='k',
linestyle='solid',
alpha=0.4)
ax.hlines(0.,xmin,xmax,
colors='k',
linestyle='solid',
alpha=0.4)
# Main plotting loop:
if color_by is not None: # If color_by is provided:
for cl in classes: #For each class
points = np.atleast_2d(X[labels==cl])
points = points[:,[comps[0]-1,comps[1]-1]]
colors = np.atleast_2d(group_colors_list[labels==cl])
# Calculate centroid
mx,my = points.mean(axis=0)
# Plot centroid (gray)
ax.plot(mx,my,'+',
alpha=0.5,
ms=10,
color='k',
markeredgewidth=1)
# Draw lines from all points to centroid (gray)
for x,y in points: #For each indiv with `bn`
ax.plot((x,mx),(y,my),
linestyle='solid',
color='k',
alpha=0.3)
# Plot all points (lookup colors from group_colors_list)
plt.scatter(points.T[0],points.T[1],
marker='o',
c=colors,
linewidths=0,
s=100,
alpha=0.5)
else: # If color_by is not provided:
for cl,color in zip(classes,class_colors): #For each class
points = np.atleast_2d(X[labels==cl])
points = points[:,[comps[0]-1,comps[1]-1]]
# Calculate centroid
mx,my = points.mean(axis=0)
# Plot centroid
ax.plot(mx,my,'+',
alpha=0.5,
ms=10,
color=color,
markeredgewidth=1)
# Draw lines from all points to centroid
for x,y in points: #For each indiv with `bn`
ax.plot((x,mx),(y,my),
linestyle='solid',
color=color,
alpha=0.3)
# Plot all points
plt.scatter(points.T[0],points.T[1],
marker='o',
c=color,
linewidths=0,
s=100,
alpha=0.5)
# Label individuals
if indiv_labels is not None: # if individual labels are provided
for (x,y),il in zip(X[:,[comps[0]-1,comps[1]-1]],indiv_labels):
ax.text(x+0.15*buf,y-0.15*buf, # offset coordinates by 15%
il,
color=plot_colors[1],
fontsize=10,
ha='left',
va='top')
# Label classes at their centroids
# This is separate & below individual labels so that class labels are on top
if label_classes:
for cl in classes: #For each class
points = np.atleast_2d(X[labels==cl])
points = points[:,[comps[0]-1,comps[1]-1]]
# Calculate centroid
mx,my = points.mean(axis=0)
# Label centroid with class label
ax.text(mx+0.1*buf,my+0.1*buf, # offset coordinates by 10%
cl,
color=plot_colors[0],
fontsize=14,
ha='left',
va='bottom')
# Label axes
if method=='pca':
xlabel = 'PC{} ({:.2f}% of total var)'.format(comps[0],explained_variance_ratio[0]*100)
ylabel = 'PC{} ({:.2f}% of total var)'.format(comps[1],explained_variance_ratio[1]*100)
elif method=='lda':
xlabel = 'LD{} ({:.2f}% of total var)'.format(comps[0],explained_variance_ratio[0]*100)
ylabel = 'LD{} ({:.2f}% of total var)'.format(comps[1],explained_variance_ratio[1]*100)
# Set axes limits to center origin
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
# Change color of the background outside plot to white
ax.set_facecolor('white')
# Label axes
plt.xlabel(xlabel,fontsize=12)
plt.ylabel(ylabel,fontsize=12)
# Title the plot
if method=='pca':
plot_title = 'PC{} & {}'.format(comps[0],comps[1])
elif method=='lda':
plot_title = 'LD{} & {}'.format(comps[0],comps[1])
if title is not None:
plot_title += (' - '+title)
plt.title(plot_title,fontsize=14)
# adjust plot padding
plt.subplots_adjust(bottom=0.06,
left= 0.06,
right= 0.96,
top= 0.96
)
plt.show()
def loadings_plot_bar(self,grid,comp=1):
"""Draw bar plot of loadings for a component of transformed features.
Parameters
----------
comp : int, optional (default 1)
Principal component or linear discriminant to display loadings for.
'1' means the first component/discriminant. Must be between
1 and n_components.
grid : `Grid` instance
An instance of `Grid` that has been initialized, so that it
has the attribute `f_labels_`. The grid parameter's must
match those used to extract the the features.
"""
# Check that a model has been fitted already
if not hasattr(self,'method_'):
raise RuntimeError('A model has not yet been fit to the features. '
'Run `Analyze.fit_pca()` or `Analyze.fit_lda()`.')
# Check grid
if not hasattr(grid,'f_labels_'):
raise ValueError("Entry for `grid` not recognized.")
# Check comp
if not 1<=comp<=self.n_components_:
raise ValueError('`comp` must be between 1 & n_components.')
f_labels = grid.f_labels_ # Feature labels
loadings = self.loadings_ # Loadings
f_mask_common = self.f_mask_common_ # Feature mask
explained_variance_ratio = self.explained_variance_ratio_
method = self.method_
# check that grid size and features length match up
if not len(f_mask_common)==len(f_labels):
raise ValueError("It looks like the number of features in "
"`f_mask` don't match the grid size in `grid`.")
if self.f_mask is not None:
# Mask feature labels
f_labels = f_labels[f_mask_common]
# Get variance for specified component
var = explained_variance_ratio[comp-1]
# Set up plot and subplots
fig,ax = plt.subplots(nrows=1,ncols=1,facecolor='w',figsize=(15,3))
# Add horizontal grid
ax.grid(color='k',
alpha=0.3,
which='major',
axis='y',
linewidth=0.5,
linestyle='solid')
# Plot loadings (Axes.vlines() looks nicer than Axes.bar())
ax.vlines(x=range(len(loadings[0])), # x-value for each line
ymin=0,
ymax=loadings[comp-1], # length of each line
linestyles='solid',
color='k',
linewidths=1.2)
# Set axis limits
ymin = loadings[comp-1].min()-0.1*loadings[comp-1].std()
ymax = loadings[comp-1].max()+0.1*loadings[comp-1].std()
ax.set_xlim(-2,loadings.shape[1]+1) # add a little space above 1st bar and below last bar
ax.set_ylim(ymin,ymax)
# Label y-axis
if method=='pca':
ax.set_ylabel('Loadings on PC{}\n({:.2f}% of total var)'.format(comp,var*100))
elif method=='lda':
ax.set_ylabel('Loadings on LD{}\n({:.2f}% of total var)'.format(comp,var*100))
# rotate the x-axis labels to vertical so they don't overlap
plt.setp(ax.xaxis.get_majorticklabels(),rotation='vertical')
# Label the x-ticks using the f_labels:
ax.set_xticks(np.arange(len(loadings[0])))
ax.set_xticklabels(f_labels)
# adjust x-axis
ax.tick_params(axis='x',
labelsize=10, # sets fontsize for x- and y-labels
length=0 # effectively removes x-tick marks from plot
)
# Change color of the background outside plot to white
ax.set_facecolor('white')
# adjust plot padding
plt.subplots_adjust(bottom=0.25,
left= 0.09,
right= 0.97,
top= 0.99
)
plt.show()
# TODO: This plot looks cramped when there are more than ~100 features
# Perhaps only label the top-contributing features when n_features>100
def loadings_plot_2d(self,grid,comps=[1,2],n_highest=10,title=None):
"""Scatterplot that shows, in 2 dimensions, the contribution that
each feature makes toward the selected components. Thus, points that
are relatively far away from the origin represent high contributing-
features. The top `n_highest` contributing features are highlighted
and labeled.
This plot is adapted from one in Valentine Svensson's 29 November 2012
blog post entitled, "Loadings with scikit-learn PCA"
(http://www.nxn.se/valent/loadings-with-scikit-learn-pca and
https://gist.github.com/vals/4172579).
Parameters
----------
comps : list of ints, optional (default [1,2])
Principal components or linear discriminants to display loadings
for. [1,2] means the first two PCs/LDs. Must be between 1 and
n_components.
grid : `Grid` instance
An instance of `Grid` that has been initialized, so that it
has the attribute `f_labels_`. The grid parameter's must
match those used to extract the the features.
n_highest : int, optional (default 10)
Number of highest-contributing features to highlight in the plot.
If `f_mask` was provided, must be between 1 and n_features_masked,
otherwise must be between 1 and n_features.
title : str or None (default), optional
A string that will be added to the end of the plot's title.
"""
# Check that a model has been fitted already
if not hasattr(self,'method_'):
raise RuntimeError('A model has not yet been fit to the features. '
'Run `Analyze.fit_pca()` or `Analyze.fit_lda()`.')
# Check grid
if not hasattr(grid,'f_labels_'):
raise ValueError("Entry for `grid` not recognized.")
# Check `comps` values
if len(comps)!=2:
raise ValueError('`comps` must be a list of length 2.')
elif not (1<=comps[0]<=self.n_components_ or 1<=comps[1]<=self.n_components_):
raise ValueError('`comps` values must be <=n_components.')
f_labels = grid.f_labels_ # Feature labels
loadings = self.loadings_ # Loadings
explained_variance_ratio = self.explained_variance_ratio_
method = self.method_
# Set number of features and check `n_highest` value
if self.f_mask is not None:
n_f = self.n_features_masked_
if not 1<=n_highest<=n_f:
raise ValueError('`n_highest` must be <=n_features_masked.')
else:
n_f = self.n_features_
if not 1<=n_highest<=n_f:
raise ValueError('`n_highest` must be <=n_features.')
# Get variance for specified component
var = explained_variance_ratio[[c-1 for c in comps]]
# Mask feature labels
if self.f_mask is not None:
f_labels = f_labels[self.f_mask_common_]
# Calculate highest-contributing loading values by their distance from origin
loading_dists = np.zeros(n_f)
for i in range(n_f):
# calculate Euclidean distance from loadings to origin (0,0):
loading_dists[i] = np.sqrt(loadings[comps[0]-1,i]**2 + \
loadings[comps[1]-1,i]**2)
# get indices for `loading_dists` sorted highest to lowest
hc = np.argsort(loading_dists)[::-1]
hc = hc[:n_highest] # indices of the highest contributors
# Needed to set axis limits and draw lines at origin
mmax = np.abs(loadings[[comps[0]-1,comps[1]-1]]).max()+\
0.2*loadings[[comps[0]-1,comps[1]-1]].std()
# Set up plot and subplots
fig,ax = plt.subplots(nrows=1,ncols=1,facecolor='w',figsize=(9,9))
# draw lines at origin
ax.vlines(0.,-mmax,mmax,
colors='k',
linestyle='solid',
alpha=0.5)
ax.hlines(0.,-mmax,mmax,
colors='k',
linestyle='solid',
alpha=0.5)
# Plot loadings for each PC as a separate bar plot:
ax.plot(*loadings[[comps[0]-1,comps[1]-1]],
marker='o',
markersize=14,
markeredgecolor='None',
markerfacecolor='k',
linestyle='None',
alpha=0.3)
# circle highest-contributing features
# (plot them in reverse order so top contrib is on top)
color = iter(plt.cm.viridis_r(np.linspace(0,1,len(hc))))
# using viridis_r matches colors with loadings_image_overlay
# (top contrib is yellow)
for i,c in zip(hc,color)[::-1]:
ax.plot(*loadings[[comps[0]-1,comps[1]-1],i],
marker='o',
markersize=18,
linestyle='None',
markeredgewidth=2.5,
markerfacecolor='None',
markeredgecolor=c,
alpha=0.6)
# annotate the highest-contributing features
for h in hc:
ax.annotate(f_labels[h],# Annotate with coefficient label
xy=(0,0),
xycoords='data', #dummy coords
xytext=loadings[[c-1 for c in comps],h],
textcoords='data',
alpha=0.8,
fontsize=14)
# Label x-axis
if method=='pca':
ax.set_xlabel('PC{} ({:.2f}% of total var)'.format(comps[0],var[0]*100))
ax.set_ylabel('PC{} ({:.2f}% of total var)'.format(comps[1],var[1]*100))
elif method=='lda':
ax.set_xlabel('LD{} ({:.2f}% of total var)'.format(comps[0],var[0]*100))
ax.set_ylabel('LD{} ({:.2f}% of total var)'.format(comps[1],var[1]*100))
# rotate the x-axis labels by 45 deg so they don't overlap
plt.setp(ax.xaxis.get_majorticklabels(),rotation=45)
# set axis limits
ax.set_xlim(-mmax,mmax)
ax.set_ylim(-mmax,mmax)
# Change color of the background outside plot to white
ax.set_facecolor('white')
# Title the plot
if method=='pca':
plot_title = 'Loadings on PC{} & {}'.format(comps[0],comps[1])
elif method=='lda':
plot_title = 'Loadings on LD{} & {}'.format(comps[0],comps[1])
if title is not None:
plot_title += (' - '+title)
plt.title(plot_title,fontsize=14)
# adjust plot padding
plt.subplots_adjust(bottom=0.08,
left= 0.08,
right= 0.96,
top= 0.96
)
plt.show()
# TODO: add colorbar (use code from `loadings_image_overlay`)
def loadings_image_overlay(self,image,grid,comps=[1],show_cell_nums=False,
use_chrom=True,title=None):
"""Visualize the loadings of all features on a grid-fitted image.
This produces a 2x3 plot, where columns represent color channels (rgb),
rows represent statistic (mean or stdev), and each subplot shows the
fitted grid plotted on the appropriate channel of `image`.
Parameters
----------
image : array
Input image on which to overlay a representation of loading values
grid : `Grid` instance
An instance of `Grid` fitted to the input image. The grid in `grid`
must match the grid used to sample `features`.
comps : list or 'all', optional (default [1])
List of 1 or more components with which to calculate the highest-
contributing features. '[1]' means the first component/discriminant
will be used to score feature contributions. Values must be between
1 and n_components. If 'all' is passed, highest-contributing
features are determined for all components.
Note: 'all' gives results which seem to vary everytime a PCA is fit
to the same data. Use with caution.
title : str or None (default), optional
A string that will be added to the end of the plot's title.
"""
# Check that a model has been fitted already
if not hasattr(self,'method_'):
raise RuntimeError('A model has not yet been fit to the features. '
'Run `Analyze.fit_pca()` or `Analyze.fit_lda()`.')
# Check that comps is a list and values are within proper range
if comps is 'all':
Comps = range(1,self.n_components_+1)
else:
Comps = comps
if not type(Comps)==list:
raise ValueError("`comps` must be a list or 'all'. Got type: {}"\
.format(type(Comps)))
else:
for c in Comps:
if not 1<=c<=self.n_components_:
raise ValueError('`comps` values must be between 1 &'
' n_components')
# check that `grid` has been fitted
if not hasattr(grid,'radii_'):
raise ValueError('`grid` must be a fitted instance of'
' `core.Grid`')
# Get necessary variables from `grid`
A,B,C = grid.tri_
thetas = grid.thetas_
radii = grid.radii_
coords = grid.cell_px_coords_
max_dim = grid.max_dim
background = grid.background # image background color
n_grid_rows = grid.n_grid_rows
n_grid_cols = grid.n_grid_cols
loadings = self.loadings_ # Loadings
# check that grid size and features length match up
if not self.n_features_==n_grid_rows*n_grid_cols*2*3:
raise ValueError('The grid size in `grid` and n_features do '
'not match.')
if self.f_mask is not None:
n_f = self.n_features_masked_
f_mask_common = self.f_mask_common_
else:
n_f = self.n_features_
# CONTRIBUTIONS
# Calculate highest-contributing loading values by their distance from origin
loading_dists = np.zeros(n_f)
for i in range(n_f):
# calculate Euclidean distance from loadings to origin (0,0):
loading_dists[i] = np.sqrt(np.sum([loadings[j,i]**2 for j in [c-1 for c in Comps]]))
# rescale contributions to between 0 and 1
contrib_resc_vals = (loading_dists-loading_dists.min())/ \
(loading_dists.max()-loading_dists.min()) # unsorted
# Get channel,statistic,row,col indices for all cells in the grid
cells = [(ch,st,row,col) for ch in range(3) for st in range(2)\
for row in range(n_grid_rows) for col in range(n_grid_cols)]
if self.f_mask is not None:
# Get indices of contributing cells
cells_contrib = np.array(cells)[f_mask_common]
# Get indices of non-contributing cells ('edge cells')
cells_noncontrib = np.array(cells)[~f_mask_common]
else:
cells_contrib = cells
cells_noncontrib = np.array([])
# PRE-PLOT
# Set up plot colors
colors = [(103, 255, 30), # for triangle
(103, 255, 30), # for triangle vertices
( 0, 95, 254), # for grid lines
(240, 59, 0), # for grid cell numbers
(191, 191, 191), # for edge cells, gray
]
# Rescale values to between 0 and 1
colors = [(r/255.,g/255.,b/255.) for r,g,b in colors]
# Color function for coloring contributing cells
contrib_color_func = plt.cm.viridis
# Grid linewidth
glw = 0.5
# Prepare image
im = downscaleIf(image,max_dim=max_dim)
if use_chrom:
im = RGB2chrom(im)
# check that `grid` has been fitted to `image`, specifically
if not grid.mask_.shape == im.shape[:2]:
raise ValueError('`grid` does not appear to have been fitted on'
' `image`.')
# Make (then clear) dummy image for colorbar
dummy = np.atleast_2d(contrib_resc_vals)
cbar_dummy = plt.imshow(dummy,cmap=contrib_color_func)
plt.close()
# PLOT
# Set up plot: 2 rows (mean,std) x 3 cols (r,g,b)
fig,axes = plt.subplots(nrows=2,ncols=3,figsize=(12,9),sharex=True,sharey=True)
# Plot the r,g, & b image channels along columns
for ch,col in enumerate(axes.T):
for ax in col:
ax.imshow(im[:,:,ch],cmap=plt.cm.gray)
# Change color of figure background to match image background
if background=='white':
ax.set_facecolor('white')
elif background=='black':
ax.set_facecolor('black')
# Color cells by contribution
for st,row in enumerate(axes): # rows: [mean,std]
for ch,ax in enumerate(row): # cols: [r,g,b]
# For contributing cells in that axis
pertinent_cells = cells_contrib[\
np.bitwise_and(cells_contrib[:,0]==ch,cells_contrib[:,1]==st)]
pertinent_contribs = contrib_resc_vals[\
np.bitwise_and(cells_contrib[:,0]==ch,cells_contrib[:,1]==st)]
for (_,_,r,c),cont in zip(pertinent_cells,pertinent_contribs):
patch = [Wedge(A,
radii[c+1], # outer radius
thetas[r], # theta1 (in deg)
thetas[r+1], # theta2 (in deg)
width=radii[c+1]-radii[c], # outer - inner radius
)]
p = PatchCollection(patch,
facecolor=contrib_color_func(cont),
edgecolor=None,
alpha=0.7)
ax.add_collection(p)
# For non-contributing cells in that axis
pertinent_cells = cells_noncontrib[\
np.bitwise_and(cells_noncontrib[:,0]==ch,
cells_noncontrib[:,1]==st)]
patches = []
for (_,_,r,c) in pertinent_cells:
w = Wedge(A,
radii[c+1], # outer radius
thetas[r], # theta1 (in deg)
thetas[r+1], # theta2 (in deg)
width=radii[c+1]-radii[c]) # outer - inner radius
patches.append(w)
p = PatchCollection(patches,
facecolor=colors[4],
edgecolor=None,
alpha=0.5)
ax.add_collection(p)
# Draw grid and label cells
for ax in axes.flat:
# Draw grid
#Draw arcs as PatchCollection wedges
patches = []
for r in radii:
w = Wedge(A,r,thetas[0],thetas[-1], width=0.001)
patches.append(w)
p = PatchCollection(patches,
facecolor='none',
edgecolor=colors[2],
lw=glw)
ax.add_collection(p)
#Draw lines
for t in np.deg2rad(thetas):
x0 = (radii[0] * np.cos(t)) + A[0]
y0 = (radii[0] * np.sin(t)) + A[1]
x1 = (radii[-1] * np.cos(t)) + A[0]
y1 = (radii[-1] * np.sin(t)) + A[1]
line = lines.Line2D((x0,x1),(y0,y1),
color=colors[2],
lw=glw)
ax.add_line(line)
# Annotate cells with their cell number
if show_cell_nums:
# Get centroids of each cell
cell_centers = list(map(lambda c: np.mean(c,axis=0), coords))
# Contrast with image background
if background=='white':
color = 'black'
elif background=='black':
color = 'white'
for i,(x,y) in enumerate(cell_centers):
ax.text(x,y,str(i),
color=color,
fontsize=12,
ha='center',
va='center')
# Adjust axes
buf = (0.01*radii[-1])
xmin = min(0, # image left edge
# upper left corner of grid
radii[0]*np.cos(np.deg2rad(thetas[0]))+A[0]-buf,
# lower left corner of grid
radii[0]*np.cos(np.deg2rad(thetas[-1]))+A[0]-buf)
xmax = A[0]+radii[-1]+buf # right side of grid
ymax = min(0, # top of image
# top corner of grid
radii[-1]*np.sin(np.deg2rad(thetas[0]))+A[1]-buf)
ymin = max(im.shape[0], # bottom of image
# bottom corner of grid
radii[-1]*np.sin(np.deg2rad(thetas[-1]))+A[1]+buf)
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
# turn off axes ticks
ax.set_xticks([])
ax.set_yticks([])
#ax.set_axis_off()
# Title the plot
if self.method_=='pca':
if comps=='all':
plot_title = "Loadings on all PCs".format(comps)
elif len(comps)==1:
plot_title = "Loadings on PC {}".format(comps)
else:
plot_title = "Loadings on PCs {}".format(comps)
elif self.method_=='lda':
if comps=='all':
plot_title = "Loadings on all LDs".format(comps)
elif len(comps)==1:
plot_title = "Loadings on LD {}".format(comps)
else:
plot_title = "Loadings on LDs {}".format(comps)
if title is not None:
plot_title += (' - '+title)
fig.suptitle(plot_title,fontsize=14)
# Label rows and columns with channel and statistic
for st,row in enumerate(axes): # rows: [mean,std]
for channel,ax in zip(['red','green','blue'],row): # cols: [r,g,b]
if st==1: # bottom row
ax.set_xlabel(channel+' channel',color=channel,fontsize=12)
if channel=='red': # first column
if st==0:
ax.set_ylabel("mean px. value",fontsize=12)
elif st==1:
ax.set_ylabel("stdev of px. values",fontsize=12)
# adjust plot padding and spacing between plots
plt.subplots_adjust(hspace=0.001,
wspace=0.001,
# padding on sides
bottom=0.04,
left=0.04,
right=0.97,
top=0.95,
)
# Add colorbar
cbar = fig.colorbar(cbar_dummy,
ax=axes.ravel().tolist(),#cax=cax,
ticks=[0.,1.], # add ticks at min and max
pad=0.005, # padding b/w plot and cbar
fraction=0.06) # how much of the image to use
cbar.ax.set_yticklabels(['least', 'most']) # relabel ticks
plt.show()
| gpl-3.0 |
guschmue/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 111 | 7865 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
bluegod/OSCAAR | oscaar/oscaarGUI.py | 2 | 238923 | import os
import re
import sys
import wx
import IO
import shutil
import oscaar
import urllib2
import zipfile
import datetime
import subprocess
import webbrowser
import numpy as np
import systematics
import timeConversions
from glob import glob
from matplotlib import pyplot
from mathMethods import medianBin
from oscaar.extras.knownSystemParameters import returnSystemParams
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
APP_EXIT = 1
class OscaarFrame(wx.Frame):
'''
This class is the main frame of the OSCAAR GUI.
'''
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
self.aboutOpen = False
self.loadOldPklOpen = False
self.loadFittingOpen = False
self.etdOpen = False
self.loadMasterFlat = False
self.overWrite = False
self.ds9Open = False
self.messageFrame = False
self.IP = wx.Frame
self.loadFitError = False
self.loadEphFrame = False
self.singularOccurance = 0
self.extraRegionsOpen = False
self.programmersEdit = False
self.loadObservatoryFrame = False
self.preprocessedImagesFrame = False
self.ccdGain = "1.0"
self.exposureTime = "JD"
self.switchTimes = 0
self.title = "OSCAAR"
wx.Frame.__init__(self, None, -1, self.title)
self.panel = wx.Panel(self)
if sys.platform == "win32":
self.fontType = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
else:
self.fontType = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
self.static_bitmap = wx.StaticBitmap(self.panel)
self.logo = wx.Image(os.path.join(os.path.dirname(__file__), 'images',
'logo4.png'), wx.BITMAP_TYPE_ANY)
self.bitmap = wx.BitmapFromImage(self.logo)
self.static_bitmap.SetBitmap(self.bitmap)
self.paths = AddLCB(self.panel, -1, name="mainGUI", rowNum=5, vNum=15,
hNum=5, font=self.fontType)
self.topBox = wx.BoxSizer(wx.HORIZONTAL)
self.topBox.Add(self.paths, border=5, flag=wx.ALL)
tupleList = [('zoom', "Track Zoom: ",
'Enter a number for the zoom here.', '15'),
('radius', "Aperture Radius: ",
'Enter a decimal for the radius here.', '4.5'),
('smoothing', "Smoothing Constant: ",
'Enter an integer for smoothing here.', '3')]
self.leftBox = ParameterBox(self.panel, -1, tupleList, rows=5, cols=2,
vNum=10, hNum=10, font=self.fontType)
tupleList = [('ingress', "Ingress, UT (YYYY/MM/DD)",
"Enter a date in the correct format here.",
"YYYY/MM/DD"),
('egress', "Egress, UT (YYYY/MM/DD)",
"Enter a date in the correct format here.",
"YYYY/MM/DD"),
('rbTrackPlot', "Tracking Plots: ", "On", "Off"),
('rbPhotPlot', "Photometry Plots: ", "On", "Off"),
('rbFitAfterPhot', "Fit After Photometry ", "On", "Off")]
self.radioBox = ParameterBox(self.panel, -1, tupleList, rows=5, cols=3,
vNum=10, hNum=10, font=self.fontType)
self.sizer0 = wx.FlexGridSizer(rows=1, cols=4)
self.buttonBox = wx.BoxSizer(wx.HORIZONTAL)
self.buttonBox.Add(self.sizer0, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.ephButton = wx.Button(self.panel, label="Ephemeris")
self.masterFlatButton = wx.Button(self.panel,
label="Master Flat Maker")
self.ds9Button = wx.Button(self.panel, label="Open DS9")
self.runButton = wx.Button(self.panel, label="Run")
self.observatoryButton = wx.Button(self.panel, label="Extra " + \
"Observatory Parameters")
self.Bind(wx.EVT_BUTTON,
lambda evt: self.singularExistance(evt,
self.loadObservatoryFrame,
"observatory"),
self.observatoryButton)
self.Bind(wx.EVT_BUTTON,
lambda evt: self.singularExistance(evt, self.loadEphFrame,
"ephemeris"),
self.ephButton)
self.Bind(wx.EVT_BUTTON,
lambda evt: self.singularExistance(evt, self.loadMasterFlat,
"masterFlat"),
self.masterFlatButton)
self.Bind(wx.EVT_BUTTON,
lambda evt: self.singularExistance(evt, self.ds9Open,
"ds9"),
self.ds9Button)
self.Bind(wx.EVT_BUTTON, self.runOscaar, self.runButton)
self.sizer0.Add(self.ephButton, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.sizer0.Add(self.masterFlatButton, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.sizer0.Add(self.ds9Button, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.sizer0.Add(self.runButton, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.rightBox = wx.BoxSizer(wx.VERTICAL)
self.rightBox.Add(self.radioBox, 0, flag=wx.ALIGN_CENTER | wx.ALL,
border=5)
self.rightBox.Add(self.buttonBox, 0, flag=wx.ALIGN_CENTER | wx.ALL,
border=5)
self.leftBox2 = wx.BoxSizer(wx.VERTICAL)
self.leftBox2.Add(self.leftBox, 0, flag=wx.ALIGN_CENTER | wx.ALL,
border=5)
self.leftBox2.Add(self.observatoryButton, 0, flag=wx.ALIGN_CENTER |
wx.ALL, border=5)
self.bottomBox = wx.BoxSizer(wx.HORIZONTAL)
self.bottomBox.Add(self.leftBox2, 0, flag=wx.ALIGN_CENTER)
self.bottomBox.Add(self.rightBox, 0, flag=wx.ALIGN_CENTER | wx.ALL,
border=5)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.static_bitmap, 0, flag=wx.ALIGN_LEFT)
self.vbox.Add(self.topBox, 0, flag=wx.ALIGN_CENTER)
self.vbox.Add(self.bottomBox, 0, flag=wx.CENTER | wx.ALL, border=5)
self.create_menu()
self.CreateStatusBar()
self.vbox.AddSpacer(10)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.setDefaults()
iconloc = os.path.join(os.path.dirname(__file__), 'images',
'logo4noText.ico')
icon1 = wx.Icon(iconloc, wx.BITMAP_TYPE_ICO)
self.SetIcon(icon1)
self.Center()
self.Show()
def create_menu(self):
'''
This method creates the menu bars that are at the top of the main GUI.
Notes
-----
This method has no input or return parameters. It will simply be used
as self.create_menu() when in the initialization method for an
OscaarFrame instance.
'''
menubar = wx.MenuBar()
menu_file = wx.Menu()
m_quit = menu_file.Append(wx.ID_EXIT, "Quit\tCtrl+Q",
"Quit this application.")
self.Bind(wx.EVT_MENU, self.on_exit, m_quit)
menu_help = wx.Menu()
m_help = menu_help.Append(wx.ID_HELP, "Help\tCtrl+H",
"More Information about how to use this" + \
" application.")
self.Bind(wx.EVT_MENU,
lambda evt: self.openLink(evt,
"https://github.com/OSCAAR/" + \
"OSCAAR/tree/master/docs/" + \
"documentationInProgress"),
m_help)
menu_oscaar = wx.Menu()
m_loadOld = menu_oscaar.Append(-1, "Load old output\tCtrl+L",
"Load an old output file for " + \
"further analysis.")
m_loadFitting = menu_oscaar.Append(-1, "Fitting Routines\tCtrl-F",
"Different fitting methods for " + \
"analysis of an old .pkl file.")
m_extraRegions = menu_oscaar.Append(-1, "Extra Regions File Sets",
"Add extra regions files to " + \
"specific referenced images.")
self.Bind(wx.EVT_MENU,
lambda evt: self.singularExistance(evt, self.loadOldPklOpen,
"loadOld"),
m_loadOld)
self.Bind(wx.EVT_MENU,
lambda evt: self.singularExistance(evt, self.loadFittingOpen,
"loadFitting"),
m_loadFitting)
self.Bind(wx.EVT_MENU,
lambda evt: self.singularExistance(evt,
self.extraRegionsOpen,
"extra"),
m_extraRegions)
menu_czech = wx.Menu()
m_etd = menu_czech.Append(-1, "Czech ETD Format", "Take a .pkl file " \
"and convert the data to a format that is " \
"accepted by the Czech Astronomical " \
"Society's exoplanet transit database.")
m_ttp = menu_czech.Append(-1, "Transit Time Predictions",
"Transit time predictions from the " + \
"Czech Astronomical Society.")
self.Bind(wx.EVT_MENU,
lambda evt: self.openLink(evt,
"http://var2.astro.cz/ETD/" + \
"predictions.php"),
m_ttp)
self.Bind(wx.EVT_MENU,
lambda evt: self.singularExistance(evt, self.etdOpen, "etd"),
m_etd)
menu_update = wx.Menu()
m_update = menu_update.Append(-1, "Check For Updates", "Check to see" \
"if you have the latest commit for " \
"this version of oscaar.")
self.Bind(wx.EVT_MENU, self.checkSHA, m_update)
menu_about = wx.Menu()
m_about = menu_about.Append(-1, "About", "Contributors of OSCAAR.")
self.Bind(wx.EVT_MENU,
lambda evt: self.singularExistance(evt, self.aboutOpen,
"about"),
m_about)
menubar.Append(menu_file, "File")
menubar.Append(menu_oscaar, "Oscaar")
menubar.Append(menu_czech, "Czech ETD")
menubar.Append(menu_update, "Update")
menubar.Append(menu_help, "Help")
menubar.Append(menu_about, "About")
self.SetMenuBar(menubar)
def runOscaar(self, event):
'''
This method will activate when the run button on the main GUI is
pressed. It executes the differentialPhotometry.py script.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The *
represents a wild card value.
Notes
-----
There is nothing to return for this method. Upon completion a window
will open with the light curve that was produced from the data and
input parameters.
'''
self.values = {}
invalidDarkFrames = self.checkFileInputs(self.paths.boxList[1].
GetValue(), saveNum=1)
masterFlat = self.paths.boxList[2].GetValue().strip()
invalidDataImages = self.checkFileInputs(self.paths.boxList[3].
GetValue(), saveNum=3)
regionsFile = self.paths.boxList[4].GetValue().strip()
self.outputFile = self.paths.boxList[5].GetValue().strip()
self.values["radius"] = self.leftBox.userParams["radius"].GetValue()
self.radiusError = "radius"
if invalidDarkFrames != "":
self.IP = InvalidParameter(invalidDarkFrames, self, -1,
stringVal="fits",
secondValue="the path to Dark Frames")
elif masterFlat != "" and (os.path.isfile(masterFlat) != True or \
(masterFlat.lower().endswith(".fit") != True and \
masterFlat.lower().endswith(".fits") != True)):
tempString = masterFlat
if len(masterFlat.split(",")) > 1:
tempString = ""
for string in masterFlat.split(","):
if string == "" and len(masterFlat.split(",")) == 2:
tempString += ","
else:
tempString += "\n" + string.strip()
self.IP = InvalidParameter(tempString, self, -1,
stringVal="master",
secondValue="path to the Master Flat")
elif invalidDataImages != "":
self.IP = InvalidParameter(invalidDataImages, self, -1,
stringVal="fits",
secondValue="the path to Data Images")
elif self.checkRegionsBox(regionsFile) == False:
pass
elif not os.path.isdir(self.outputFile.rpartition(str(os.sep))[0]) or \
not len(self.outputFile) > \
(len(self.outputFile[:self.outputFile.rfind(os.sep)]) + 1):
self.IP = InvalidParameter(self.outputFile, self, -1,
stringVal="output",
secondValue="output file")
elif self.checkAperture(self.values["radius"]) != True:
self.IP = InvalidParameter(self.leftBox.userParams["radius"].
GetValue(), self, -1,
stringVal=self.radiusError)
elif self.timeAndDateCheck(self.radioBox.userParams['ingress1'].
GetValue(),
self.radioBox.userParams['egress1'].
GetValue(),
self.radioBox.userParams['ingress'].
GetValue(),
self.radioBox.userParams['egress'].
GetValue()) == True:
try:
tempList = ["smoothing", "zoom"]
for string in tempList:
self.values[string] = int(self.leftBox.userParams[string].GetValue())
self.leftBox.userParams[string].SetValue(str(self.values[string]))
self.paths.boxList[2].SetValue(masterFlat)
self.paths.boxList[5].SetValue(self.outputFile)
# This code here writes all the parameters to the init.par file.
init = open(os.path.join(os.path.dirname(__file__),'init.par'), 'w')
init.write("Path to Dark Frames: " + self.paths.boxList[1].GetValue() + "\n")
init.write("Path to Data Images: " + self.paths.boxList[3].GetValue() + "\n")
init.write("Path to Master-Flat Frame: " + masterFlat + "\n")
init.write("Path to Regions File: " + self.paths.boxList[4].GetValue() + "\n")
if not self.paths.boxList[5].GetValue().lower().endswith(".pkl"):
init.write("Output Path: " + self.paths.boxList[5].GetValue() + ".pkl\n")
else:
init.write("Output Path: " + self.paths.boxList[5].GetValue() + "\n")
self.parseTime(self.radioBox.userParams["ingress"].GetValue(),
self.radioBox.userParams["ingress1"].GetValue(), 'Ingress: ', init, name="ingress")
self.parseTime(self.radioBox.userParams["egress"].GetValue(),
self.radioBox.userParams["egress1"].GetValue(), 'Egress: ', init, name="egress")
if self.radioBox.userParams['rbTrackPlot'].GetValue():
init.write("Plot Tracking: " + "on"+ "\n")
else:
init.write("Plot Tracking: " + "off"+ "\n")
if self.radioBox.userParams['rbPhotPlot'].GetValue():
init.write("Plot Photometry: " + "on"+ "\n")
else:
init.write("Plot Photometry: " + "off"+ "\n")
init.write("Smoothing Constant: " + str(self.values["smoothing"]) + '\n')
init.write("Radius: " + str(self.values["radius"]) + '\n')
init.write("Tracking Zoom: " + str(self.values["zoom"]) + '\n')
init.write("CCD Gain: " + self.ccdGain + "\n")
init.write("Exposure Time Keyword: " + self.exposureTime + "\n")
init.close()
if self.loadFittingOpen == False:
if self.preprocessedImagesFrame == False and \
self.overWrite == False and \
(self.paths.boxList[1].GetValue() == "" or \
self.paths.boxList[2].GetValue() == ""):
OverWrite(self, -1, "Preprocessed Images Check", "", "PreprocessedImages")
self.preprocessedImagesFrame = True
elif self.preprocessedImagesFrame == False and \
(os.path.isfile(self.outputFile) or \
os.path.isfile(self.outputFile + '.pkl')):
if self.overWrite == False:
OverWrite(self, -1, "Overwrite Output File", self.outputFile, "Output File")
self.overWrite = True
elif self.preprocessedImagesFrame == False and \
self.overWrite == False:
diffPhotCall = "from oscaar import differentialPhotometry"
subprocess.check_call(['python','-c',diffPhotCall])
if self.radioBox.userParams["rbFitAfterPhot"].GetValue() == True:
wx.CallAfter(self.createFrame)
else:
if self.loadFitError == False:
self.IP = InvalidParameter("", self, -1, stringVal="fitOpen")
self.loadFitError = True
except ValueError:
string2 = string
if string2 == "smoothing":
string2 = "smoothing constant"
self.IP = InvalidParameter(self.leftBox.userParams[string].GetValue(),self,-1, stringVal="leftbox", secondValue=string2)
def timeAndDateCheck(self, time1, time2, date1, date2):
'''
This method checks that the times and dates entered in the main GUI are in the correct format.
Parameters
----------
time1 : string
The ingress time of the transit that was observed.
time2 : string
The egress time of the transit that was observed.
date1 : string
The date for the ingress of the transit.
date2 : string
The date for the egress of the transit.
Returns
-------
literal : bool
Returns true if the parameters are all in the correct format, otherwise it returns false.
Notes
-----
The correct format for the times is HH:MM:SS, while for the dates it is YYYY/MM/DD. This method
will also check that real dates have been entered, as well as that the ingress time always
is before the egress time.
'''
years = []
months = []
days = []
hours = []
minutes = []
seconds = []
for timeArray, value in [(time1.split(":"), time1),
(time2.split(":"), time2)]:
if len(timeArray) != 3:
self.IP = InvalidParameter(value, self, -1, stringVal="dateTime", secondValue="time")
return False
else:
try:
hour = int(timeArray[0].strip())
hours.append(hour)
minute = int(timeArray[1].strip())
minutes.append(minute)
second = int(timeArray[2].strip())
seconds.append(second)
if len(timeArray[0].strip()) > 2 or len(timeArray[1].strip()) > 2 or len(timeArray[2].strip()) > 2:
self.IP = InvalidParameter(value, self, -1, stringVal="dateTime", secondValue="time")
return False
if hour > 23 or hour < 0 or minute > 59 or minute < 0 or second > 59 or second < 0:
self.IP = InvalidParameter(value, self, -1, stringVal="dateTime", secondValue="time")
return False
except ValueError:
self.IP = InvalidParameter(value, self, -1, stringVal="dateTime", secondValue="time")
return False
for dateArray,value in [(date1.split("/"),date1),
(date2.split("/"),date2)]:
if len(dateArray) != 3:
self.IP = InvalidParameter(value, self, -1, stringVal="dateTime", secondValue="date")
return False
else:
try:
year = int(dateArray[0].strip())
years.append(year)
month = int(dateArray[1].strip())
months.append(month)
day = int(dateArray[2].strip())
days.append(day)
if len(dateArray[0].strip()) != 4 or len(dateArray[1].strip()) > 2 or len(dateArray[2].strip()) > 2:
self.IP = InvalidParameter(value, self, -1, stringVal="dateTime", secondValue="date")
return False
minYear = datetime.date.today().year - 100
maxYear = datetime.date.today().year + 100
if year < minYear or year > maxYear or month > 12 or month < 0 or day > 31 or day < 0 or \
month == 0 or year == 0 or day == 0:
self.IP = InvalidParameter(value, self, -1, stringVal="dateTime", secondValue="date")
return False
except ValueError:
self.IP = InvalidParameter(value, self, -1, stringVal="dateTime", secondValue="date")
return False
if years[0] > years[1]:
self.IP = InvalidParameter(date1, self, -1, stringVal="logicalDate")
return False
elif years[0] == years[1]:
if months[0] > months[1]:
self.IP = InvalidParameter(date1, self, -1, stringVal="logicalDate")
return False
elif months[0] == months[1]:
if days[0] > days[1]:
self.IP = InvalidParameter(date1, self, -1, stringVal="logicalDate")
return False
elif days[0] == days[1]:
if hours[0] > hours[1]:
self.IP = InvalidParameter(time1, self, -1, stringVal="logicalTime")
return False
elif hours[0] == hours[1]:
if minutes[0] > minutes[1]:
self.IP = InvalidParameter(time1, self, -1, stringVal="logicalTime")
return False
elif minutes[0] == minutes[1]:
if seconds[0] >= seconds [1]:
self.IP = InvalidParameter(time1, self, -1, stringVal="logicalTime")
return False
return True
def checkAperture(self, stringVal):
'''
This method parses the string from the aperture radius text box to make sure that the values
are in the correct format and valid.
Parameters
----------
stringVal : string
The input of the aperture radius text box in the main GUI.
Returns
-------
literal : bool
True if the values are valid and false otherwise.
Notes
-----
This method will check the radius step interval is not larger than the max and min radii, as well
as that the max radius is always larger than the min radius. Only when using 3 values in this text control
box, the GUI will interpret it as (min radius, max radius, step interval), otherwise it only computes
the specific values entered.
'''
splitString = stringVal.split(",")
if len(splitString) == 1:
try:
float(splitString[0])
self.leftBox.userParams["radius"].SetValue(str(float(splitString[0])))
return True
except ValueError:
self.radiusError = "radiusNum"
return False
elif len( splitString) == 3:
minRadius = splitString[0].strip()
maxRadius = splitString[1].strip()
stepSize = splitString[2].strip()
try:
minRadius = float(minRadius)
maxRadius = float(maxRadius)
stepSize = float(stepSize)
if minRadius == maxRadius:
self.radiusError = "radiusEqual"
return False
elif minRadius > maxRadius:
self.radiusError = "radiusLogic"
return False
elif (maxRadius-minRadius) < stepSize:
self.radiusError = "radiusStep"
return False
if stepSize == 0:
self.radiusError = "radiusLogic"
return False
elif minRadius == 0 or maxRadius == 0:
self.radiusError = "radiusLogic"
return False
self.values["radius"] = str(minRadius) + "," + str(maxRadius) + "," + str(stepSize)
self.leftBox.userParams["radius"].SetValue(str(minRadius) + "," + str(maxRadius) + "," + str(stepSize))
return True
except ValueError:
self.radiusError = "radiusNum"
return False
else:
stringTemp = ""
for num in splitString:
numStrip = num.strip()
try:
float(numStrip)
if numStrip == 0:
self.radiusError = "radiusLogic2"
return False
except ValueError:
self.radiusError = "radiusNum"
return False
stringTemp += str(float(numStrip)) + ","
self.values["radius"] = stringTemp.rpartition(",")[0]
self.leftBox.userParams["radius"].SetValue(stringTemp.rpartition(",")[0])
return True
def setDefaults(self):
'''
This method will set the default values for the text boxes in the main GUI with those
listed in the init.par file.
Notes
-----
This is a recursive string parser that searches for the provided keywords.
'''
if self.programmersEdit == True:
init = open("init.par","r").read().splitlines()
else:
oscaarpath = os.path.dirname(os.path.abspath(oscaar.__file__))
init = open(os.path.join(oscaarpath,'init.par'), 'r').read().splitlines()
for line in init:
if len(line.split()) > 1:
inline = line.split(':', 1)
name = inline[0].strip()
value = str(inline[1].strip())
tempList = [("Path to Master-Flat Frame", 2),
("Path to Regions File", 4),
("Ingress", "ingress"),("Egress", "egress"),
("Radius", "radius"),("Tracking Zoom", "zoom"),
("Plot Tracking", "rbTrackPlot"),
("Plot Photometry", "rbPhotPlot"),("Smoothing Constant", "smoothing"),
("Output Path",5),("Path to Dark Frames", 1),("Path to Data Images", 3),
("CCD Gain",""),("Exposure Time Keyword","")]
for string,save in tempList:
if string == name:
if name == "Smoothing Constant" or name == "Tracking Zoom":
self.leftBox.userParams[save].SetValue(value)
elif name == "Radius":
stripTemp = [x.strip() for x in value.split(",")]
stringTemp = ""
for eachTemp in stripTemp:
stringTemp += eachTemp + ","
self.leftBox.userParams[save].SetValue(stringTemp.rpartition(",")[0])
elif name == "Plot Photometry" or name == "Plot Tracking":
if value == "off":
save += "1"
self.radioBox.userParams[save].SetValue(True)
elif name == "Path to Dark Frames" or name == "Path to Data Images":
tempArray = value.split(",")
tempArray[:] = [x.strip() for x in tempArray]
finalString = ""
for eachString in tempArray:
finalString += eachString + ","
self.paths.boxList[save].SetValue(finalString.rpartition(",")[0])
elif name == "Path to Master-Flat Frame" or name == "Path to Regions File" or\
name == "Output Path":
self.paths.boxList[save].SetValue(value)
elif name == "CCD Gain":
self.ccdGain = value
elif name == "Exposure Time Keyword":
self.exposureTime = value
else:
date = value.split(";")[0].strip().replace("-","/")
time = value.split(";")[1].strip()
for eachOne, other in [(date,""),(time,"1")]:
if other == "1":
separator = ":"
else:
separator = "/"
stripTemp = [x.strip() for x in eachOne.split(separator)]
stringTemp = ""
for eachTemp in stripTemp:
stringTemp += eachTemp + separator
if other == "1":
self.radioBox.userParams[save+"1"].SetValue(stringTemp.rpartition(separator)[0])
else:
self.radioBox.userParams[save].SetValue(stringTemp.rpartition(separator)[0])
def checkFileInputs(self,array,saveNum):
'''
This checks that the files from a text control box are valid .fit/.fits files. Then it refreshes
the text control box with a string of the valid files.
Parameters
----------
array : string
The list of files from a text control box in the main GUI.
saveNum : int
When it refreshes the text control box, the method needs to know which box to do it for. The box numbers from
the main GUI are in order 1-5 (this is only for the input file text boxes).
Returns
-------
errorString : string
A string of all of the invalid files that were entered in the input file text box.
Notes
-----
If errorString returns '' (empty), this means that all of the entered files were valid.
'''
errorString = ""
setValueString = ""
array2 = []
smallArray = ""
if array.strip() == "" and saveNum != 3:
return errorString
for element in array.split(","):
element = element.strip()
if element.lower().endswith(os.sep):
tempElement = element + "*.fit"
element += "*.fits"
smallArray = "-1"
if smallArray == "":
if len(glob(element)) < 1:
errorString += element
elif len(glob(element)) > 1:
for element2 in glob(element):
if element2.lower().endswith(".fit") or element2.lower().endswith(".fits"):
array2.append(element2)
else:
errorString += "\n" + element2
elif not element.lower().endswith(".fit") and not element.lower().endswith(".fits"):
errorString += "\n" + element
else:
array2.append(glob(element)[0])
else:
if len(glob(tempElement)) < 1 and len(glob(element)) < 1:
errorString += "\n" + tempElement + ",\n" + element
else:
if len(glob(tempElement)) >= 1:
for element2 in glob(tempElement):
array2.append(element2)
if len(glob(element)) >= 1:
for element2 in glob(element):
array2.append(element2)
if not array:
return "No Values Entered"
else:
if errorString == "":
setValueString = ""
uniqueArray = np.unique(array2).tolist()
for eachString in uniqueArray:
setValueString += eachString + ","
if saveNum == 3 and (len(uniqueArray) < 2):
errorString = self.paths.boxList[3].GetValue()
return errorString
self.paths.boxList[saveNum].SetValue(setValueString.rpartition(",")[0])
return errorString
def checkRegionsBox(self, boxValue):
'''
This method specifically checks that the regions file input box in the main GUI has files that are in
the correct format.
Parameters
----------
boxValue : string
The value of the regions file box.
Returns
-------
literal : bool
True if all of the files are valid, false otherwise.
Notes
-----
The correct format for files in the regions file box is (somefile.reg,referencefile.fits;). The semicolon will
separate different sets of regions and reference files. Only if there is one regions file is it acceptable to
not include a reference file, otherwise you must.
'''
setValueString = ""
tempString = ""
if boxValue == "":
self.IP = InvalidParameter(boxValue, self, -1, stringVal="emptyReg")
return False
splitSets = boxValue.split(";")
checkSet = self.paths.boxList[3].GetValue().strip().split(",")
try:
if len(splitSets[0].split(",")) == 1 and len(splitSets[1]) == 0 and len(splitSets) == 2 and \
splitSets[0].split(",")[0].strip().lower().endswith(".reg"):
setValueString = splitSets[0].strip() + "," + self.paths.boxList[3].GetValue().split(",")[0].strip() + ";"
elif splitSets[0].split(",")[1].strip() == "" and len(splitSets[1]) == 0 and len(splitSets) == 2:
if splitSets[0].split(",")[0].strip().lower().endswith(".reg") != True or \
len(glob(splitSets[0].split(",")[0])) != 1:
self.IP = InvalidParameter("\nRegions: "+ splitSets[0].split(",")[0]
+ "\nReference: " + splitSets[0].split(",")[1], self, -1, stringVal="invalidReg")
return False
setValueString = splitSets[0].split(",")[0].strip() + "," + \
self.paths.boxList[3].GetValue().split(",")[0].strip() + ";"
else:
try:
for eachSet in splitSets:
if eachSet != "":
tempString = "tempReg"
tempReg = eachSet.split(",")[0].strip()
tempString = "tempRef"
tempRef = eachSet.split(",")[1].strip()
if len(glob(tempReg)) != 1 or tempReg.lower().endswith(".reg") == False:
self.IP = InvalidParameter("\nRegions: "+tempReg + "\nReference: " + tempRef, self, -1, stringVal="invalidReg")
return False
elif len(glob(tempRef)) != 1 or (tempRef.lower().endswith(".fits") == False and
tempRef.lower().endswith(".fit") == False):
self.IP = InvalidParameter("\nRegions: "+tempReg + "\nReference: " + tempRef, self, -1, stringVal="invalidRef")
return False
elif all(tempRef != temp for temp in checkSet):
self.IP = InvalidParameter("\nRegions: "+tempReg + "\nReference: " + tempRef, self, -1, stringVal="invalidRefExist")
return False
setValueString += tempReg + "," + tempRef + ";"
except IndexError:
if tempString == "tempReg":
tempReg = ""
elif tempString == "tempRef":
tempRef = ""
if len(eachSet.split(",")) == 1:
self.IP = InvalidParameter("\nRegions: "+tempReg + "\nReference: " + tempRef, self, -1, stringVal="outofbounds")
return False
except IndexError:
if splitSets[0].split(",")[0].strip().lower().endswith(".reg") != True or \
len(glob(splitSets[0].split(",")[0])) != 1:
if len(splitSets[0].split(",")) == 1:
temp = ""
else:
temp = splitSets[0].split(",")[1]
self.IP = InvalidParameter("\nRegions: "+ splitSets[0].split(",")[0]
+ "\nReference: " + temp, self, -1, stringVal="invalidReg")
return False
setValueString = splitSets[0].split(",")[0].strip() + "," + \
self.paths.boxList[3].GetValue().split(",")[0].strip()
splitSets[0] = setValueString
setValueString = ""
try:
for eachSet in splitSets:
if eachSet != "":
tempString = "tempReg"
tempReg = eachSet.split(",")[0].strip()
tempString = "tempRef"
tempRef = eachSet.split(",")[1].strip()
if len(glob(tempReg)) != 1 or tempReg.lower().endswith(".reg") == False:
self.IP = InvalidParameter("\nRegions: "+tempReg + "\nReference: " + tempRef, self, -1, stringVal="invalidReg")
return False
elif len(glob(tempRef)) != 1 or (tempRef.lower().endswith(".fits") == False and
tempRef.lower().endswith(".fit") == False):
self.IP = InvalidParameter("\nRegions: "+tempReg + "\nReference: " + tempRef, self, -1, stringVal="invalidRef")
return False
elif all(tempRef != temp for temp in checkSet):
self.IP = InvalidParameter("\nRegions: "+tempReg + "\nReference: " + tempRef, self, -1, stringVal="invalidRefExist")
return False
setValueString += tempReg + "," + tempRef + ";"
except IndexError:
if tempString == "tempReg":
tempReg = ""
elif tempString == "tempRef":
tempRef = ""
if len(eachSet.split(",")) == 1:
self.IP = InvalidParameter("\nRegions: "+tempReg + "\nReference: " + tempRef, self, -1, stringVal="outofbounds")
return False
refArray = []
regArray = []
tempDict = {}
for eachSet in setValueString.split(";"):
if len(eachSet.split(",")) != 1:
reg = eachSet.split(",")[0]
ref = eachSet.split(",")[1]
regTemp = reg in regArray
refTemp = ref in refArray
if regTemp == False and refTemp == False:
regArray.append(reg)
refArray.append(ref)
tempDict[reg] = ref
elif regTemp == False and refTemp == True:
for key, val in tempDict.items():
if val == ref:
tempReg = key
tempString = "\nRegions: " + reg + "\nReference: " + ref + "\nBecause ---" + "\nRegions: " + \
tempReg + "\nIs already associated with the reference file."
self.IP = InvalidParameter(tempString, self, -1, stringVal="referenceImageDup")
return False
elif regTemp == True and refTemp == False:
tempRef = tempDict.get(reg)
tempString = "\nRegions: " + reg + "\nReference: " + ref + "\nBecause ---" + "\nRegions: " + \
reg + "\nIs already associated with:\nReference: " + tempRef
self.IP = InvalidParameter(tempString, self, -1, stringVal="regionsDup")
return False
setValueString = ""
for key, val in tempDict.items():
setValueString += key + "," + val + ";"
self.paths.boxList[4].SetValue(setValueString)
return True
def singularExistance(self, event, value, name):
'''
This method checks to make sure that there is only one frame of each class open at once, as to not
have two fitting frames open and such.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
value : bool
Indicates whether or not there is already an instance of the class open.
name : string
The keyword defining the name of the class for which a frame is about to be opened.
Notes
-----
There is nothing returned for this method. On a successful completion, a new frame will appear. If
`value` is True however, then the method does nothing because there is already an instance of the frame
open, so it will not duplicate it.
'''
if value == False:
if name == "about":
AboutFrame(self,-1)
self.aboutOpen = True
elif name == "loadOld":
LoadOldPklFrame(self, -1)
self.loadOldPklOpen = True
elif name == "loadFitting":
FittingFrame(self, -1)
self.loadFittingOpen = True
elif name == "masterFlat":
MasterFlatFrame(self, -1)
self.loadMasterFlat = True
elif name == "ephemeris":
EphemerisFrame(self, -1)
self.loadEphFrame = True
elif name == "ds9":
if sys.platform == "win32":
errorType = WindowsError
else:
errorType = OSError
try:
subprocess.Popen([os.path.join(os.path.dirname(os.path.abspath(oscaar.__file__)),
'extras','ds9',sys.platform,'ds9')])
except errorType:
self.IP = InvalidParameter("", self, -1, stringVal="ds9")
elif name == "extra":
invalidDataImages = self.checkFileInputs(self.paths.boxList[3].GetValue(), saveNum=3)
if invalidDataImages != "":
self.IP = InvalidParameter(invalidDataImages, self, -1, stringVal="fits", secondValue="the path to Data Images")
elif self.checkRegionsBox(self.paths.boxList[4].GetValue()) == True:
ExtraRegions(self,-1)
self.extraRegionsOpen = True
elif name == "observatory":
invalidDataImages = self.checkFileInputs(self.paths.boxList[3].GetValue(), saveNum=3)
if invalidDataImages != "":
self.IP = InvalidParameter(invalidDataImages, self, -1, stringVal="fits", secondValue="the path to Data Images")
else:
ObservatoryFrame(self, -1)
self.loadObservatoryFrame = True
elif name == "etd":
ETDFrame(self, -1)
self.etdOpen = True
def parseTime(self, date, time, text, filename, name=""):
'''
This method prints the dates and times of the transit into the init.par file in the correct format.
Parameters
----------
date : string
A string of the date in the format YYYY/MM/DD.
time : string
A string of the time in the format HH:MM:SS.
text : string
The name of what should be entered in the init.par file before the actual values (ingress or egress).
filename : file
The open file that the value will be appended to.
name : string, optional
The name of the text box that will be refreshed.
Notes
-----
When it is done printing into init.par, the method refreshes the values of the text control boxes for ingress
and egress so there are no spaces and such in between.
'''
dateArr = str(date).split('/')
result = dateArr[0].strip() + '-' + dateArr[1].strip() + '-' + dateArr[2].strip() + ' ; '
timeArr = str(time).split(":")
result += timeArr[0].strip() + ":" + timeArr[1].strip() + ':' + timeArr[2].strip()
filename.write(text + result + '\n')
self.radioBox.userParams[name].SetValue(dateArr[0].strip() + '/' + dateArr[1].strip() + '/' + dateArr[2].strip())
self.radioBox.userParams[name+"1"].SetValue(timeArr[0].strip() + ":" + timeArr[1].strip() + ':' +
timeArr[2].strip())
def createFrame(self):
'''
This method allows the fitting frame to be opened after the completion of the differentialPhotometry.py script
so that users may work on their light curves.
'''
if self.loadFittingOpen == False:
if not self.outputFile.lower().endswith(".pkl"):
FittingFrame(self, -1, self.outputFile + ".pkl")
self.loadFittingOpen = True
else:
FittingFrame(self, -1, self.outputFile)
self.loadFittingOpen = True
def checkSHA(self, event):
'''
This method checks the secure hash algorithm that is saved when
oscaar is installed in __init__.py against the one online for the
latest commit.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The *
represents a wild card value.
Notes
-----
There is no return. If both the sha's are equal, then the latest
version of oscaar is installed, and a pop up message explains so. If
they are not equal, a message pops up to tell the user to download
the latest commit.
'''
try:
url = urllib2.urlopen("https://github.com/OSCAAR/OSCAAR/commits/" \
"master").read()
mostRecentCommit = re.search('href="/OSCAAR/OSCAAR/commit/[a-z0-9]*',
str(url)).group(0).rpartition("/")[2]
try:
currentCommit = oscaar.__sha__
if mostRecentCommit == currentCommit:
self.IP = InvalidParameter("", self, -1, stringVal="upToDate")
else:
self.IP = InvalidParameter("", self, -1, stringVal="newCommit")
except AttributeError:
self.IP = InvalidParameter("", self, -1, stringVal="installAgain")
except urllib2.URLError:
self.IP = InvalidParameter("", self, -1, stringVal="noInternetConnection")
def openLink(self, event, string):
'''
This opens a new tab in the default web browser with the specified link.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
string : string
The web url that will be opened.
'''
webbrowser.open_new_tab(string)
def on_exit(self, event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
class ObservatoryFrame(wx.Frame):
'''
This is a frame for updating extra parameters that would define an observatory's configuration.
'''
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
if sys.platform == "win32":
self.fontType = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
else:
self.fontType = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
wx.Frame.__init__(self, parent, objectID, "Change Observatory Parameters")
self.panel = wx.Panel(self)
self.parent = parent
self.messageFrame = False
self.IP = wx.Frame
self.titlebox = wx.StaticText(self.panel, -1, "Observatory Parameters")
self.titleFont = wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD)
self.titlebox.SetFont(self.titleFont)
paramsList = [('ccd',"CCD Gain: ",
'Enter a decimal for the gain here.', self.parent.ccdGain)]
# Quick check to see the available keywords from the header for a fits file.
# header = pyfits.getheader(self.parent.paths.boxList[3].GetValue().split(",")[0]).keys()
# print header
bestKeyword, self.allKeys, acceptedKeys, conversion = \
timeConversions.findKeyword(self.parent.paths.boxList[3].GetValue().split(",")[0])
if conversion: pass
self.unionKeys = []
for eachKey in self.allKeys:
if eachKey in acceptedKeys:
self.unionKeys.append(eachKey)
self.timeLabel = wx.StaticText(self.panel, -1, 'Select Exposure Time Keyword: ')
self.timeLabel.SetFont(self.fontType)
if self.parent.switchTimes == 0:
self.timeList = wx.ComboBox(self.panel, value = bestKeyword, choices = sorted(self.unionKeys),
size=(75,wx.DefaultSize.GetHeight()))
self.parent.switchTimes = 1
else:
self.timeList = wx.ComboBox(self.panel, value = self.parent.exposureTime, choices = sorted(self.unionKeys),
size=(75,wx.DefaultSize.GetHeight()))
self.timeList.Bind(wx.EVT_COMBOBOX, self.updateTime)
self.dropBox = wx.BoxSizer(wx.HORIZONTAL)
self.dropBox.Add(self.timeLabel, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 10)
self.dropBox.Add(self.timeList, 0, flag = wx.ALIGN_CENTER)
self.params = ParameterBox(self.panel, -1, paramsList, rows=5, cols=2, vNum=10, hNum=10, font=self.fontType)
self.updateButton = wx.Button(self.panel, label = "Update")
self.Bind(wx.EVT_BUTTON, self.update, self.updateButton)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.titlebox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.vbox.Add(self.params, 0, flag = wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, border = 5)
self.vbox.Add(self.dropBox, 0, flag=wx.ALIGN_CENTER | wx.ALL, border=5)
self.vbox.Add(self.updateButton, 0, flag=wx.ALIGN_CENTER | wx.ALL, border=5)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.create_menu()
self.CreateStatusBar()
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.Center()
self.Show()
def updateTime(self,event):
'''
This updates the exposure time keyword variable for parsing the .fit(s) files in the parent OscaarFrame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.exposureTime = self.timeList.GetValue()
def update(self, event):
'''
This updates the exposure time keyword for parsing .fit(s) files as well as the ccd gain in the init.par file.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
if self.checkParams() == True:
self.parent.ccdGain = self.params.userParams["ccd"].GetValue()
self.parent.exposureTime = self.timeList.GetValue()
string = open(os.path.join(os.path.dirname(__file__),'init.par'), 'r').read().splitlines()
stringCopy = np.copy(string)
for line in stringCopy:
if ("CCD Gain:" in line) or ("Exposure Time Keyword:" in line):
string.remove(line)
observ = open(os.path.join(os.path.dirname(__file__),'init.par'), 'w')
observ.write('\n'.join(string))
observ.write("\nCCD Gain: " + self.params.userParams["ccd"].GetValue() + "\n")
observ.write("Exposure Time Keyword: " + self.timeList.GetValue() + "\n")
def checkParams(self):
'''
This is check to make sure that the ccd gain and exposure time keyword are valid,
before updating the init.par file.
Returns
-------
literal : bool
True if both ccd gain and exposure time keyword are valid, false otherwise.
'''
try:
tempCCD = float(self.params.userParams["ccd"].GetValue())
self.params.userParams["ccd"].SetValue(str(tempCCD))
timeKey = self.timeList.GetValue().strip()
if timeKey == "":
self.IP = InvalidParameter(timeKey, self, -1, stringVal="emptyKeyword")
return False
elif not timeKey in self.allKeys:
self.IP = InvalidParameter(timeKey, self, -1, stringVal="invalidKeyword")
return False
elif (not timeKey in self.unionKeys) and (timeKey in self.allKeys):
self.IP = InvalidParameter(timeKey, self, -1, stringVal="emailKeyword")
return False
self.timeList.SetValue(timeKey)
except ValueError:
self.IP = InvalidParameter(self.params.userParams["ccd"].GetValue(),self,-1, stringVal="leftbox", secondValue="ccd")
return False
return True
def create_menu(self):
'''
This method creates the menu bars that are at the top of the observatory frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
menubar = wx.MenuBar()
menu_file = wx.Menu()
m_quit = menu_file.Append(wx.ID_EXIT, "Quit\tCtrl+Q", "Quit this application.")
self.Bind(wx.EVT_MENU, self.on_exit, m_quit)
menubar.Append(menu_file, "File")
self.SetMenuBar(menubar)
def on_exit(self,event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self,event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.loadObservatoryFrame = False
class ExtraRegions(wx.Frame):
'''
This frame allows a user to append multiple regions files and their respective reference files as sets to the
regions file text box in the parent OscaarFrame.
'''
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
if sys.platform == "win32":
self.fontType = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
else:
self.fontType = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
wx.Frame.__init__(self, parent, objectID, "Extra Regions Files")
self.panel = wx.Panel(self)
self.parent = parent
self.messageFrame = False
self.IP = wx.Frame
self.titlebox = wx.StaticText(self.panel, -1, "Extra Regions Files")
self.titleFont = wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD)
self.titlebox.SetFont(self.titleFont)
self.set1 = AddLCB(self.panel, -1, name="Path to Regions File: ,Path to Reference Image: ", rowNum=2, vNum=5,
hNum=5, boxName ="Set 1", font=self.fontType)
self.set2 = AddLCB(self.panel, -1, name="Path to Regions File: ,Path to Reference Image: ", rowNum=2, vNum=5,
hNum=5, boxName="Set 2", font=self.fontType)
self.set3 = AddLCB(self.panel, -1, name="Path to Regions File: ,Path to Reference Image: ", rowNum=2, vNum=5,
hNum=5, boxName="Set 3", font=self.fontType)
self.set4 = AddLCB(self.panel, -1, name="Path to Regions File: ,Path to Reference Image: ", rowNum=2, vNum=5,
hNum=5, boxName="Set 4", font=self.fontType)
self.set5 = AddLCB(self.panel, -1, name="Path to Regions File: ,Path to Reference Image: ", rowNum=2, vNum=5,
hNum=5, boxName="Set 5", font=self.fontType)
self.addSet1= wx.Button(self.panel, -1, label = "Add Set 1")
self.Bind(wx.EVT_BUTTON, lambda evt, lambdaStr=self.addSet1.Label: self.addSet(evt,lambdaStr), self.addSet1)
self.addSet2= wx.Button(self.panel, -1, label = "Add Set 2")
self.Bind(wx.EVT_BUTTON, lambda evt, lambdaStr=self.addSet2.Label: self.addSet(evt,lambdaStr), self.addSet2)
self.addSet3= wx.Button(self.panel, -1, label = "Add Set 3")
self.Bind(wx.EVT_BUTTON, lambda evt, lambdaStr=self.addSet3.Label: self.addSet(evt,lambdaStr), self.addSet3)
self.addSet4= wx.Button(self.panel, -1, label = "Add Set 4")
self.Bind(wx.EVT_BUTTON, lambda evt, lambdaStr=self.addSet4.Label: self.addSet(evt,lambdaStr), self.addSet4)
self.addSet5= wx.Button(self.panel, -1, label = "Add Set 5")
self.Bind(wx.EVT_BUTTON, lambda evt, lambdaStr=self.addSet5.Label: self.addSet(evt,lambdaStr), self.addSet5)
self.vbox2 = wx.BoxSizer(wx.VERTICAL)
self.vbox2.Add(self.addSet1, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 35)
self.vbox2.Add(self.addSet2, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 35)
self.vbox2.Add(self.addSet3, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 35)
self.vbox2.Add(self.addSet4, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 35)
self.vbox2.Add(self.addSet5, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 35)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.set1, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox1.Add(self.addSet1, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.set2, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox2.Add(self.addSet2, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox3.Add(self.set3, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox3.Add(self.addSet3, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox4 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox4.Add(self.set4, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox4.Add(self.addSet4, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox5 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox5.Add(self.set5, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox5.Add(self.addSet5, 0, flag=wx.ALIGN_CENTER | wx.ALL, border = 5)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.titlebox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.vbox.Add(self.hbox1, 0, flag = wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, border = 10)
self.vbox.Add(self.hbox2, 0, flag = wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, border = 10)
self.vbox.Add(self.hbox3, 0, flag = wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, border = 10)
self.vbox.Add(self.hbox4, 0, flag = wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, border = 10)
self.vbox.Add(self.hbox5, 0, flag = wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, border = 10)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.create_menu()
self.CreateStatusBar()
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.Center()
self.Show()
def addSet(self, event, stringName):
'''
This is the method that adds a regions files and reference file set to the regions file
box in the parent frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
stringName : string
A string to differentiate the different sets which a user could be trying to add.
Notes
-----
There is no return, but upon successful completion a set in the form (somefile.reg,referencefile.fits;)
will be added to the regions file box in the parent frame.
'''
if stringName == "Add Set 1":
useSet = self.set1
elif stringName == "Add Set 2":
useSet = self.set2
elif stringName == "Add Set 3":
useSet = self.set3
elif stringName == "Add Set 4":
useSet = self.set4
elif stringName == "Add Set 5":
useSet = self.set5
regions = useSet.boxList[1].GetValue().strip()
reference = useSet.boxList[2].GetValue().strip()
if self.SetCheck(regions, reference) == True:
useSet.boxList[1].SetValue(regions)
useSet.boxList[2].SetValue(reference)
setString = regions + "," + reference
dataImages = self.parent.paths.boxList[3].GetValue().strip().split(",")
regionsBox = self.parent.paths.boxList[4].GetValue()
uniqueSet = True
uniqueReg = True
uniqueRef = True
for eachSet in regionsBox.split(";"):
if len(eachSet.split(",")) == 2:
tempReg = eachSet.split(",")[0].strip()
tempRef = eachSet.split(",")[1].strip()
if tempReg == regions and tempRef == reference:
uniqueSet = False
break
elif tempReg == regions:
uniqueReg = False
break
elif tempRef == reference:
uniqueRef = False
break
if uniqueSet == False:
self.IP = InvalidParameter("", self, -1, stringVal="setExists")
elif uniqueReg == False:
tempString = "\nRegions: " + regions + "\nReference: " + reference + "\nBecause ---" + "\nRegions: " + \
tempReg + "\nIs already associated with:\nReference: " + tempRef
self.IP = InvalidParameter(tempString, self, -1, stringVal="regionsDup")
elif uniqueRef == False:
tempString = "\nRegions: " + regions + "\nReference: " + reference + "\nBecause ---" + "\nRegions: " + \
tempReg + "\nIs already associated with this reference file."
self.IP = InvalidParameter(tempString, self, -1, stringVal="referenceImageDup")
elif all(reference != temp for temp in dataImages):
self.IP = InvalidParameter("\nRegions: "+ regions + "\nReference: " + reference, self, -1, stringVal="invalidRefExist")
else:
regionsBox += setString + ";"
self.parent.paths.boxList[4].SetValue(regionsBox)
self.IP = InvalidParameter("", self, -1, stringVal="regionsUpdate")
def SetCheck(self, reg, ref):
'''
This method checks whether or not the regions file and reference file given are valid files
for their respective roles.
Parameters
----------
reg : string
A value from a regions file text box that needs to be checked.
ref : string
A value from a reference file text box that needs to be checked.
Returns
-------
literal : bool
True if both files are valid, false otherwise.
'''
if reg == "":
self.IP = InvalidParameter(reg, self, -1, stringVal="regionsError1")
return False
elif ref == "":
self.IP = InvalidParameter(ref, self, -1, stringVal="regionsError1")
return False
if len(glob(reg)) != 1:
tempString = reg
if len(reg.split(",")) > 1:
tempString = ""
for string in reg.split(","):
if string == "":
tempString += ","
else:
tempString += "\n" + string.strip()
self.IP = InvalidParameter(tempString, self, -1, stringVal="regionsError2")
return False
elif len(glob(ref)) != 1:
tempString = ref
if len(ref.split(",")) > 1:
tempString = ""
for string in ref.split(","):
if string == "":
tempString += ","
else:
tempString += "\n" + string.strip()
self.IP = InvalidParameter(tempString, self, -1, stringVal="regionsError2")
return False
elif reg.lower().endswith(".reg") == False:
self.IP = InvalidParameter(reg, self, -1, stringVal="regionsError3")
return False
elif ref.lower().endswith(".fits") == False and ref.lower().endswith(".fit") == False:
self.IP = InvalidParameter(ref, self, -1, stringVal="regionsError4")
return False
return True
def create_menu(self):
'''
This method creates the menu bars that are at the top of the extra regions frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
menubar = wx.MenuBar()
menu_file = wx.Menu()
m_quit = menu_file.Append(wx.ID_EXIT, "Quit\tCtrl+Q", "Quit this application.")
self.Bind(wx.EVT_MENU, self.on_exit, m_quit)
menubar.Append(menu_file, "File")
self.SetMenuBar(menubar)
def on_exit(self,event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self,event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.extraRegionsOpen = False
class MasterFlatFrame(wx.Frame):
'''
This frame allows the user to create a master flat using their own images.
'''
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
wx.Frame.__init__(self, parent, objectID, "Master Flat Maker")
self.panel = wx.Panel(self)
self.parent = parent
self.overWrite = False
self.messageFrame = False
self.IP = wx.Frame
self.titlebox = wx.StaticText(self.panel, -1, 'OSCAAR: Master Flat Maker')
self.titleFont = wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD)
self.titlebox.SetFont(self.titleFont)
self.path1 = AddLCB(self.panel, -1, name="Path to Flat Images: ", multFiles=True, saveType=None)
self.path2 = AddLCB(self.panel, -1, name="Path to Dark Flat Images: ", multFiles=True, saveType=None)
self.path3 = AddLCB(self.panel, -1, name="Path to Save Master Flat: ", saveType=wx.FD_SAVE)
tupleList = [('rbTrackPlot',"","On","Off")]
self.plotBox = ParameterBox(self.panel,-1,tupleList, name = "Plots")
tupleList = [('rbFlatType',"","Standard","Twilight")]
self.flatBox = ParameterBox(self.panel,-1,tupleList, name = "Flat Type")
self.runButton = wx.Button(self.panel, -1, label = "Run")
self.Bind(wx.EVT_BUTTON, self.run, self.runButton)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.plotBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.hbox.Add(self.flatBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.hbox.Add(self.runButton, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.titlebox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.vbox.Add(self.path1, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.vbox.Add(self.path2, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.vbox.Add(self.path3, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.vbox.Add(self.hbox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.create_menu()
self.CreateStatusBar()
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.Center()
self.Show()
def run(self,event):
'''
This runs either the standardFlatMaker or twilightFLatMaker method from the systematics.py to create a master flat.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
There is no return, on successful completion a window will open up with what the master flat looks like.
'''
path = self.path3.boxList[1].GetValue().strip()
self.flatImages = self.checkFileInputs(self.path1.boxList[1].GetValue(), self.path1.boxList[1])
self.darkFlatImages = self.checkFileInputs(self.path2.boxList[1].GetValue(), self.path2.boxList[1])
if self.flatImages != "":
self.IP = InvalidParameter(self.flatImages, self, -1, stringVal="flat1")
elif self.darkFlatImages != "":
self.IP = InvalidParameter(self.darkFlatImages, self, -1, stringVal="flat2")
elif not path:
self.IP = InvalidParameter(str(path), self, -1, stringVal="flat3")
elif not os.path.isdir(path[path.rfind(os.sep)]) or \
not len(path) > (len(path[:path.rfind(os.sep)]) + 1):
self.IP = InvalidParameter(path, self, -1, stringVal="flat3")
else:
self.flatImages = []
self.darkFlatImages = []
for pathname in self.path1.boxList[1].GetValue().split(','):
self.flatImages += glob(pathname)
for pathname in self.path2.boxList[1].GetValue().split(','):
self.darkFlatImages += glob(pathname)
if not path.lower().endswith('.fits') and not path.lower().endswith('.fit'):
path += '.fits'
pathCorrected = path.replace('/', os.sep)
outfolder = pathCorrected[:pathCorrected.rfind(os.sep)] + os.sep + '*'
self.plotCheck = self.plotBox.userParams['rbTrackPlot'].GetValue()
if pathCorrected in glob(outfolder):
if self.overWrite == False:
OverWrite(self, -1, "Overwrite Master Flat", pathCorrected, "MasterFlat")
self.overWrite = True
else:
if self.flatBox.userParams['rbFlatType'].GetValue() == True:
systematics.standardFlatMaker(self.flatImages, self.darkFlatImages, self.path3.boxList[1].GetValue(),
self.plotCheck)
else:
systematics.twilightFlatMaker(self.flatImages, self.darkFlatImages, self.path3.boxList[1].GetValue(),
self.plotCheck)
def checkFileInputs(self,array,box):
'''
This method checks to make sure that the files entered in a text box in the master flat frame are valid.
Parameters
----------
array : string
A list of all of the files that need to be checked.
box : wx.TextCtrl
The box that gets refreshed with a string of the valid files.
Returns
-------
errorString : string
A list of all the files that were invalid.
Notes
-----
If `errorString` returns '' (empty), that means that all the files were valid.
'''
errorString = ""
setValueString = ""
array2 = []
smallArray = ""
for element in array.split(","):
element = element.strip()
if element.lower().endswith(os.sep):
tempElement = element + "*.fit"
element += "*.fits"
smallArray = "-1"
if smallArray == "":
if len(glob(element)) < 1:
errorString += element
elif len(glob(element)) > 1:
for element2 in glob(element):
if element2.lower().endswith(".fit") or element2.lower().endswith(".fits"):
array2.append(element2)
else:
errorString += "\n" + element2
elif not element.lower().endswith(".fit") and not element.lower().endswith(".fits"):
errorString += "\n" + element
else:
array2.append(glob(element)[0])
else:
if len(glob(tempElement)) < 1 and len(glob(element)) < 1:
errorString += "\n" + tempElement + ",\n" + element
else:
if len(glob(tempElement)) >= 1:
for element2 in glob(tempElement):
array2.append(element2)
if len(glob(element)) >= 1:
for element2 in glob(element):
array2.append(element2)
if not array:
return "No Values Entered"
else:
if errorString == "":
setValueString = ""
uniqueArray = np.unique(array2).tolist()
for eachString in uniqueArray:
setValueString += eachString + ","
box.SetValue(setValueString.rpartition(",")[0])
return errorString
def create_menu(self):
'''
This method creates the menu bars that are at the top of the master flat frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
menubar = wx.MenuBar()
menu_file = wx.Menu()
m_quit = menu_file.Append(wx.ID_EXIT, "Quit\tCtrl+Q", "Quit this application.")
self.Bind(wx.EVT_MENU, self.on_exit, m_quit)
menubar.Append(menu_file, "File")
self.SetMenuBar(menubar)
def on_exit(self,event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self,event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.loadMasterFlat = False
class AboutFrame(wx.Frame):
'''
This is a frame about OSCAAR and its contributors.
'''
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
wx.Frame.__init__(self, parent, objectID, "About OSCAAR")
self.panel = wx.Panel(self)
self.parent = parent
self.static_bitmap = wx.StaticBitmap(self.panel, style=wx.ALIGN_CENTER)
self.logo = wx.Image(os.path.join(os.path.dirname(os.path.abspath(__file__)),'images/logo4noText.png'),
wx.BITMAP_TYPE_ANY)
self.bitmap = wx.BitmapFromImage(self.logo)
self.static_bitmap.SetBitmap(self.bitmap)
titleText = '\n'.join(['OSCAAR 2.0 beta',\
'Open Source differential photometry Code for Amateur Astronomical Research',\
'Created by Brett M. Morris (NASA GSFC/UMD)\n'])
contribText = '\n'.join(['Other Contributors:',\
'Daniel Galdi (UMD)',\
'Luuk Visser (LU/TUD)',\
'Nolan Matthews (UMD)',\
'Dharmatej Mikkilineni (UMD)',\
'Harley Katz (UMD)',\
'Sam Gross (UMD)',\
'Naveed Chowdhury (UMD)',\
'Jared King (UMD)',\
'Steven Knoll (UMD)'])
self.titleText = wx.StaticText(self.panel, -1, label = titleText, style = wx.ALIGN_CENTER)
self.contribText = wx.StaticText(self.panel, -1, label = contribText, style = wx.ALIGN_CENTER)
self.viewRepoButton = wx.Button(self.panel, -1, label = "Open Code Repository (GitHub)")
self.exitButton = wx.Button(self.panel, -1, label = "Close")
self.Bind(wx.EVT_BUTTON, lambda evt: self.parent.openLink(evt, "https://github.com/OSCAAR/OSCAAR"),
self.viewRepoButton)
self.exitButton.Bind(wx.EVT_BUTTON, self.exit)
self.buttonBox = wx.BoxSizer(wx.HORIZONTAL)
self.buttonBox.Add(self.viewRepoButton, 0, flag = wx.ALIGN_CENTER | wx.RIGHT, border = 20)
self.buttonBox.Add(self.exitButton, 0, flag = wx.ALIGN_CENTER)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.static_bitmap, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.vbox.Add(self.titleText, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.vbox.Add(self.contribText, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.vbox.Add(self.buttonBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.vbox.AddSpacer(10)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.Center()
self.Show()
def exit(self, event):
'''
This method defines the action quit for the button `close`. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.aboutOpen = False
class OverWrite(wx.Frame):
'''
This class creates a frame that prompts a user action for whether or not a file can be overwritten. Based
on the user's response, different methods are activated.
'''
def __init__(self, parent, objectID, title, path, option):
'''
This method defines the initialization of this class.
'''
wx.Frame.__init__(self, parent, objectID, title)
self.panel = wx.Panel(self)
self.parent = parent
self.path = path
if path == "":
self.text = wx.StaticText(self.panel, -1,
"Are you using precorrected images?\n\nYou have left " +
"either the path to Dark Frames or the " +
"Path to the Master Flat empty.\nIf you are " +
"using pre-processed Data Images, press "+
"Yes and OSCAAR will run without \ndark " +
"and flat corrections. If you need to enter "+
"these exposures, press No to return.")
else:
self.text = wx.StaticText(self.panel, -1, "Are you sure you want to overwrite\n" + self.path + "?")
self.yesButton = wx.Button(self.panel, label = "Yes")
self.noButton = wx.Button(self.panel,label = "No")
self.SetFocus()
if option == "MasterFlat":
self.Bind(wx.EVT_BUTTON, self.onMasterFlat, self.yesButton)
elif option == "Output File":
self.Bind(wx.EVT_BUTTON, self.onOutputFile, self.yesButton)
elif option == "PreprocessedImages":
self.Bind(wx.EVT_BUTTON, self.onPreprocessedImages, self.yesButton)
self.Bind(wx.EVT_BUTTON, self.onNO, self.noButton)
self.sizer0 = wx.FlexGridSizer(rows=2, cols=1)
self.buttonBox = wx.BoxSizer(wx.HORIZONTAL)
self.buttonBox.Add(self.yesButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.buttonBox.Add(self.noButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.sizer0,0, wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.text,0,wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.buttonBox, 0,wx.ALIGN_CENTER|wx.ALL,5)
self.Bind(wx.EVT_WINDOW_DESTROY, self.doNothing)
self.panel.SetSizer(self.hbox)
self.hbox.Fit(self)
self.Center()
self.Show()
def onMasterFlat(self,event):
'''
When the user selects `yes` in this frame with the parent frame being the master flat frame, then
a new master flat will be created, overwriting the currently selected one.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
self.parent.overWrite = False
os.remove(self.path)
if self.parent.flatBox.userParams['rbFlatType'].GetValue() == True:
systematics.standardFlatMaker(self.parent.flatImages, self.parent.darkFlatImages,
self.parent.path3.boxList[1].GetValue(), self.parent.plotCheck)
else:
systematics.twilightFlatMaker(self.parent.flatImages, self.parent.darkFlatImages,
self.parent.path3.boxList[1].GetValue(), self.parent.plotCheck)
def onOutputFile(self,event):
'''
This method is for whether or not to override the existing .pkl file that was specified in the output path
text box in the parent OSCAAR frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
self.parent.overWrite = False
diffPhotCall = "from oscaar import differentialPhotometry"
subprocess.check_call(['python','-c',diffPhotCall])
if self.parent.radioBox.userParams["rbFitAfterPhot"].GetValue() == True:
wx.CallAfter(self.parent.createFrame)
def onPreprocessedImages(self, event):
'''
This method is to remind the user that they are trying to run the
differential photometry script without any dark frames or a master flat
and make sure they want to continue.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
self.parent.preprocessedImagesFrame = False
if os.path.isfile(self.parent.outputFile) or os.path.isfile(self.parent.outputFile + '.pkl'):
if self.parent.overWrite == False:
OverWrite(self.parent, -1, "Overwrite Output File", self.parent.outputFile, "Output File")
self.parent.overWrite = True
else:
diffPhotCall = "from oscaar import differentialPhotometry"
subprocess.check_call(['python','-c',diffPhotCall])
if self.parent.radioBox.userParams["rbFitAfterPhot"].GetValue() == True:
wx.CallAfter(self.parent.createFrame)
def onNO(self, event):
'''
When a user presses the `no` button, this method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame. It then
will close the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
if self.path == "":
self.parent.preprocessedImagesFrame = False
else:
self.parent.overWrite = False
self.Destroy()
def doNothing(self,event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
if self.path == "":
self.parent.preprocessedImagesFrame = False
else:
self.parent.overWrite = False
pass
class EphemerisFrame(wx.Frame):
'''
This frame will allow users to calculate the positions of different planets in the sky
for a given time frame at a specified observatory.
'''
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
wx.Frame.__init__(self, parent, objectID, "Ephemerides")
self.panel = wx.Panel(self)
self.parent = parent
self.messageFrame = False
self.IP = wx.Frame
self.titlebox = wx.StaticText(self.panel, -1, 'Ephemeris Calculator')
self.titleFont = wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD)
self.titlebox.SetFont(self.titleFont)
self.titlebox2 = wx.StaticText(self.panel, -1, 'Advanced Options')
self.titlebox2.SetFont(self.titleFont)
if sys.platform == "win32":
self.fontType = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
else:
self.fontType = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
self.calculateButton = wx.Button(self.panel, -1, label = "Calculate")
self.Bind(wx.EVT_BUTTON, self.calculate, self.calculateButton)
obsList = glob(os.path.join(os.path.dirname(os.path.abspath(oscaar.__file__)),'extras','eph','observatories','*.par'))
self.nameList = {}
for currentFile in obsList:
for line in open(currentFile,'r').read().splitlines():
if line.split(":")[0] == "name":
self.nameList[line.split(":")[1].strip()] = currentFile
self.obsLabel = wx.StaticText(self.panel, -1, 'Select Observatory: ')
self.obsLabel.SetFont(self.fontType)
self.obsList = wx.ComboBox(self.panel, value = 'Observatories',
choices = sorted(self.nameList.keys()) + ["Enter New Observatory"])
self.obsList.Bind(wx.EVT_COMBOBOX, self.update)
self.dropBox = wx.BoxSizer(wx.HORIZONTAL)
self.dropBox.Add(self.obsLabel, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 10)
self.dropBox.Add(self.obsList, 0, flag = wx.ALIGN_CENTER)
tupleList = [('observatoryName',"Name of Observatory: ","",""),
('fileName',"Enter File Name: ","",""),
('obsStart',"Start of Observation, UT (YYYY/MM/DD): ",
"Enter a date in the correct format here.",datetime.date.today().strftime("%Y/%m/%d")),
('obsEnd',"End of Observation, UT (YYYY/MM/DD): ",
"Enter a date in the correct format here.",(datetime.datetime.now()+datetime.timedelta(days=7)
).strftime("%Y/%m/%d")),
('upperLimit',"Apparent Mag. Upper Limit: ","","0.0"),
('lowerLimit',"Depth Lower Limit: ","","0.0")]
self.leftBox = ParameterBox(self.panel,-1,tupleList, rows=6, cols=2, vNum = 5, hNum = 15, font = self.fontType)
tupleList = [("latitude","Latitude (deg:min:sec): ","","00:00:00"),
("longitude","Longitude (deg:min:sec): ","","00:00:00"),
("elevation","Observatory Elevation (m): ","","0.0"),
("temperature","Temperature (degrees C): ","","0.0"),
("lowerElevation","Lower Elevation Limit (deg:min:sec): ","","00:00:00")]
self.leftBox2 = ParameterBox(self.panel, -1, tupleList, rows=5, cols=2, vNum = 5, hNum = 15, font =self.fontType)
self.twilightChoices = {}
self.twilightChoices["Civil Twilight (-6 degrees)"] = "-6"
self.twilightChoices["Nautical Twilight (-12 degrees)"] = "-12"
self.twilightChoices["Astronomical Twilight (-18 degrees)"] = "-18"
self.twilightLabel = wx.StaticText(self.panel, -1, "Select Twilight Type: ")
self.twilightLabel.SetFont(self.fontType)
self.twilightList = wx.ComboBox(self.panel, value = "Civil Twilight (-6 degrees)",
choices = sorted(self.twilightChoices.keys()))
self.dropBox2 = wx.BoxSizer(wx.HORIZONTAL)
self.dropBox2.Add(self.twilightLabel, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 10)
self.dropBox2.Add(self.twilightList, 0, flag = wx.ALIGN_CENTER)
tupleList = [('rbBand',"","V","K")]
self.band = ParameterBox(self.panel,-1,tupleList, name = "Band Type")
tupleList = [('rbShowLT',"","On","Off")]
self.showLT = ParameterBox(self.panel,-1,tupleList, name = "Show Local Times", secondButton = True)
self.botRadioBox = wx.BoxSizer(wx.HORIZONTAL)
self.botRadioBox.Add(self.showLT, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 10)
self.botRadioBox.Add(self.band, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 15)
tupleList = [('rbCalcEclipse',"","True","False")]
self.calcEclipseBox = ParameterBox(self.panel,-1,tupleList, name = "Calculate Eclipses", secondButton = True)
tupleList = [('rbHtmlOut',"","True", "False")]
self.htmlBox = ParameterBox(self.panel,-1,tupleList, name = "HTML Out")
tupleList = [('rbTextOut',"","True","False")]
self.textBox = ParameterBox(self.panel,-1,tupleList, name = "Text Out")
tupleList = [('rbCalcTransits',"","True","False")]
self.calcTransitsBox = ParameterBox(self.panel,-1,tupleList, name = "Calculate Transits")
self.radioBox = wx.BoxSizer(wx.VERTICAL)
self.radioBox.Add(self.calcTransitsBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.radioBox.Add(self.calcEclipseBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.radioBox.Add(self.htmlBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.radioBox.Add(self.textBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.topBox = wx.BoxSizer(wx.HORIZONTAL)
self.topBox.Add(self.leftBox, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 5)
self.topBox.Add(self.calculateButton, 0, flag = wx.ALIGN_CENTER | wx.RIGHT | wx.LEFT, border = 5)
self.leftVertBox = wx.BoxSizer(wx.VERTICAL)
self.leftVertBox.Add(self.leftBox2, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.leftVertBox.Add(self.dropBox2, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.leftVertBox.Add(self.botRadioBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.botBox = wx.BoxSizer(wx.HORIZONTAL)
self.botBox.Add(self.leftVertBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.botBox.Add(self.radioBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.titlebox, 0, flag = wx.ALIGN_CENTER | wx.TOP, border = 5)
self.vbox.Add(self.dropBox, 0, flag = wx.ALIGN_LEFT | wx.TOP, border = 10)
self.vbox.Add(self.topBox, 0, flag = wx.ALIGN_CENTER)
self.vbox.Add(self.titlebox2, 0, flag = wx.ALIGN_CENTER)
self.vbox.Add(self.botBox, 0, flag = wx.ALIGN_CENTER)
self.create_menu()
self.CreateStatusBar()
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.Center()
self.Show()
def calculate(self, event):
'''
After checking to see if all of the parameters entered are valid, this method actually runs
the calculateEphemerides method from the eph.py file to get the transit times and such for
different planets.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion a new window will open in the default browser for your machine with the
ephemeris chart open.
'''
try:
import oscaar.extras.eph.calculateEphemerides as eph
import ephem
ephem.sun_radius
if self.parameterCheck() == True:
if self.parent.singularOccurance == 0 and self.showLT.userParams["rbShowLT"].GetValue():
self.parent.singularOccurance = 1
self.IP = InvalidParameter("", self, -1, stringVal="warnError")
else:
outputPath = str(os.path.join(os.path.dirname(os.path.abspath(oscaar.__file__)),
'extras','eph','ephOutputs','eventReport.html'))
path = os.path.join(os.path.dirname(os.path.abspath(oscaar.__file__)),
'extras','eph','observatories', self.leftBox.userParams["fileName"].GetValue() + '.par')
if not self.nameList.has_key(self.name):
self.nameList[self.name] = path
self.obsList.Append(self.name)
self.saveFile(path)
eph.calculateEphemerides(path)
if self.htmlBox.userParams["rbHtmlOut"].GetValue() == True:
webbrowser.open_new_tab("file:"+2*os.sep+outputPath)
except ImportError:
self.IP = InvalidParameter("", self, -1, stringVal="importError")
def parameterCheck(self):
'''
This is a local method for this class that checks to make sure all of the
parameters that can be manipulated by the user are valid.
Returns
-------
literal : bool
False if any of the parameters are invalid, true otherwise.
'''
self.name = self.leftBox.userParams["observatoryName"].GetValue().strip()
self.fileName = self.leftBox.userParams["fileName"].GetValue().strip()
self.latitude = self.leftBox2.userParams["latitude"].GetValue().strip()
self.longitude = self.leftBox2.userParams["longitude"].GetValue().strip()
self.elevation = self.leftBox2.userParams["elevation"].GetValue().strip()
self.temperature = self.leftBox2.userParams["temperature"].GetValue().strip()
self.lowerElevation = self.leftBox2.userParams["lowerElevation"].GetValue().strip()
self.startingDate = self.leftBox.userParams["obsStart"].GetValue().strip()
self.endingDate = self.leftBox.userParams["obsEnd"].GetValue().strip()
self.upperLimit = self.leftBox.userParams["upperLimit"].GetValue().strip()
self.lowerLimit = self.leftBox.userParams["lowerLimit"].GetValue().strip()
self.twilight = self.twilightList.GetValue().strip()
if self.name == "" or self.name == "Enter the name of the Observatory":
self.IP = InvalidParameter(self.name, self, -1, stringVal="obsName")
return False
elif self.fileName == "" or self.fileName == "Enter the name of the file":
self.IP = InvalidParameter(self.fileName, self, -1, stringVal="obsFile")
return False
years = []
months = []
days = []
for dateArray,value in [(self.startingDate.split("/"),self.startingDate),
(self.endingDate.split("/"),self.endingDate)]:
if len(dateArray) != 3:
self.IP = InvalidParameter(value, self, -1, stringVal="obsDate")
return False
else:
try:
year = int(dateArray[0].strip())
years.append(year)
month = int(dateArray[1].strip())
months.append(month)
day = int(dateArray[2].strip())
days.append(day)
if len(dateArray[0].strip()) != 4 or len(dateArray[1].strip()) > 2 or len(dateArray[2].strip()) > 2:
self.IP = InvalidParameter(value, self, -1, stringVal="obsDate")
return False
minYear = datetime.date.today().year - 100
maxYear = datetime.date.today().year + 100
if year < minYear or year > maxYear or month > 12 or month < 0 or day > 31 or day < 0 or \
month == 0 or year == 0 or day == 0:
self.IP = InvalidParameter(value, self, -1, stringVal="dateRange")
return False
except ValueError:
self.IP = InvalidParameter(value, self, -1, stringVal="obsDate")
return False
if years[0] > years[1]:
self.IP = InvalidParameter(self.startingDate, self, -1, stringVal="logicalDate")
return False
elif years[0] == years[1]:
if months[0] > months[1]:
self.IP = InvalidParameter(self.startingDate, self, -1, stringVal="logicalDate")
return False
elif months[0] == months[1]:
if days[0] >= days[1]:
self.IP = InvalidParameter(self.startingDate, self, -1, stringVal="logicalDate")
return False
for coordArray, value, coordType in [(self.latitude.split(":"),self.latitude, "lat"),
(self.longitude.split(":"),self.longitude, "long")]:
if(len(coordArray) != 3):
self.IP = InvalidParameter(value, self, -1, stringVal="coordTime")
return False
else:
try:
deg = float(coordArray[0].strip())
minutes = float(coordArray[1].strip())
sec = float(coordArray[2].strip())
if coordType == "lat":
self.latitude = str(deg) + ":" + str(minutes) + ":" + str(sec)
if abs(deg) > 90.0 or minutes >= 60 or minutes < 0.0 or sec >= 60 or sec < 0.0:
self.IP = InvalidParameter(value, self, -1, stringVal="coordRange")
return False
elif coordType == "long":
self.longitude = str(deg) + ":" + str(minutes) + ":" + str(sec)
if abs(deg) > 180.0 or minutes >= 60 or minutes < 0.0 or sec >= 60 or sec < 0.0:
self.IP = InvalidParameter(value, self, -1, stringVal="coordRange")
return False
if abs(deg) == 90 and coordType == "lat":
if minutes != 0 or sec != 0:
self.IP = InvalidParameter(value, self, -1, stringVal="coordRange")
return False
elif abs(deg) == 180 and coordType == "long":
if minutes != 0 or sec != 0:
self.IP = InvalidParameter(value, self, -1, stringVal="coordRange")
return False
except ValueError:
self.IP = InvalidParameter(value, self, -1, stringVal="coordTime")
return False
try:
tempString = "elevation"
temp1 = float(self.elevation)
tempString = "temperature"
temp2 = float(self.temperature)
tempString = "apparent magnitude upper limit"
temp3 = float(self.upperLimit)
tempString = "depth lower limit"
temp4 = float(self.lowerLimit)
tempString = "lower elevation limit"
if temp3: pass
stripElevation = self.lowerElevation.split(":")
if len(stripElevation) != 3:
self.IP = InvalidParameter(self.lowerElevation, self, -1, stringVal="lowerElevation")
return False
temp6 = int(stripElevation[0])
temp7 = int(stripElevation[1])
temp8 = int(stripElevation[2])
if temp6 < 0.0 or temp6 > 90 or temp7 >= 60 or temp7 < 0.0 or temp8 >= 60 or temp8 < 0.0:
self.IP = InvalidParameter(self.lowerElevation, self, -1, stringVal="lowerElevation")
return False
elif temp6 == 90:
if temp7 != 0 or temp8 != 0:
self.IP = InvalidParameter(self.lowerElevation, self, -1, stringVal="lowerElevation")
return False
self.lowerElevation = stripElevation[0].strip() + ":" + stripElevation[1].strip() + ":" +\
stripElevation[2].strip()
if temp1 < 0:
self.IP = InvalidParameter(self.elevation, self, -1, stringVal="tempElevNum", secondValue="elevation")
return False
elif temp2 < 0:
self.IP = InvalidParameter(self.temperature, self, -1, stringVal="tempElevNum", secondValue="temperature")
return False
elif temp4 < 0:
self.IP = InvalidParameter(self.lowerLimit, self, -1, stringVal="tempElevNum", secondValue="depth lower limit")
return False
except ValueError:
if tempString == "temperature":
self.IP = InvalidParameter(self.temperature, self, -1, stringVal="tempElevNum", secondValue=tempString)
elif tempString == "apparent magnitude upper limit":
self.IP = InvalidParameter(self.upperLimit, self, -1, stringVal="tempElevNum", secondValue=tempString)
elif tempString == "depth lower limit":
self.IP = InvalidParameter(self.lowerLimit, self, -1, stringVal="tempElevNum", secondValue=tempString)
elif tempString == "lower elevation limit":
self.IP = InvalidParameter(self.lowerElevation, self, -1, stringVal="lowerElevation")
else:
self.IP = InvalidParameter(self.elevation, self, -1, stringVal="tempElevNum", secondValue=tempString)
return False
if all(self.twilight != temp for temp in ["Civil Twilight (-6 degrees)",
"Nautical Twilight (-12 degrees)",
"Astronomical Twilight (-18 degrees)"]):
self.IP = InvalidParameter(self.twilight, self, -1, stringVal="twilight")
return False
return True
def update(self, event):
'''
This method is bound to the drop down list of observatories that can be selected in the
frame. Once an observatory is chosen, this method updates all relevant text fields with the
appropriate parameters.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
if self.obsList.GetValue() == "Enter New Observatory":
self.leftBox.userParams["observatoryName"].SetValue("Enter the name of the Observatory")
self.leftBox.userParams["fileName"].SetValue("Enter the name of the file")
else:
radioBoxes = self.radioBox.GetChildren()
radioList = []
for eachBox in radioBoxes:
window = eachBox.GetWindow()
children = window.GetChildren()
for child in children:
if isinstance(child, wx.RadioButton):
radioList.append(child)
lines = open(self.nameList[self.obsList.GetValue()],"r").read().splitlines()
self.leftBox.userParams["fileName"].SetValue(os.path.split(self.nameList[self.obsList.GetValue()
])[1].split(".")[0])
for eachLine in lines:
if len(eachLine.split()) > 1:
inline = eachLine.split(':', 1)
name = inline[0].strip()
value = str(inline[1].strip())
tempList = [("name","observatoryName"),("min_horizon","lowerElevation"),("mag_limit","upperLimit"),
("depth_limit","lowerLimit"),("latitude",""),("longitude",""),("elevation",""),
("temperature",""),("twilight",""),("calc_transits",0),("calc_eclipses",2),
("html_out",4),("text_out",6), ("show_lt","rbShowLT"), ("band","rbBand")]
for string,saveName in tempList:
if string == name:
if any(temp == name for temp in ["name","mag_limit","depth_limit"]):
self.leftBox.userParams[saveName].SetValue(str(value))
elif any(temp == name for temp in ["latitude","longitude","elevation","temperature",
"twilight","min_horizon","time_zone", "band"]):
if saveName == "":
saveName = name
if name == "twilight":
tempStr = [temp for temp in self.twilightChoices.keys() \
if self.twilightChoices[temp] == value]
if len(tempStr) != 0:
self.twilightList.SetValue(tempStr[0])
elif name == "show_lt":
if value == "0":
saveName = saveName + "1"
self.showLT.userParams[saveName].SetValue(True)
elif name == "band":
if value == "K":
saveName = saveName + "1"
self.band.userParams[saveName].SetValue(True)
else:
self.leftBox2.userParams[saveName].SetValue(str(value))
elif any(temp == name for temp in ["calc_transits","calc_eclipses","html_out","text_out"]):
if(value == "False"):
saveName = saveName + 1
radioList[saveName].SetValue(True)
def saveFile(self, fileName):
'''
This method saves all the current parameters in the window for a selected
observatory to a text file. This allows the user to quickly select the observatory
with pre-loaded parameters after an initial setup.
Parameters
----------
fileName : string
The name of the file that will be saved with all of the user inputs.
'''
startDate = [x.strip() for x in self.leftBox.userParams["obsStart"].GetValue().split("/")]
endDate = [x.strip() for x in self.leftBox.userParams["obsEnd"].GetValue().split("/")]
dates = {}
for date, stringDate in [(startDate,"date1"), (endDate,"date2")]:
for stringNum in date:
if stringNum == "08":
date[date.index(stringNum)] = "8"
elif stringNum == "09":
date[date.index(stringNum)] = "9"
date += ["0","0","0"]
tempString = "("
for num in range(0,len(date)):
if num != len(date)-1:
tempString += date[num] + ","
else:
tempString += date[num]
tempString += ")"
dates[stringDate] = tempString
newObs = open(fileName, "w")
newObs.write("name: " + self.name + "\n")
newObs.write("latitude: " + self.latitude + "\n")
newObs.write("longitude: " + self.longitude + "\n")
newObs.write("elevation: " + self.elevation + "\n")
newObs.write("temperature: " + self.temperature + "\n")
newObs.write("min_horizon: " + self.lowerElevation + "\n")
newObs.write("start_date: " + dates["date1"] + "\n")
newObs.write("end_date: " + dates["date2"] + "\n")
newObs.write("mag_limit: " + self.upperLimit + "\n")
newObs.write("depth_limit: " + self.lowerLimit + "\n")
newObs.write("calc_transits: " + str(self.calcTransitsBox.userParams["rbCalcTransits"].GetValue()) + "\n")
newObs.write("calc_eclipses: " + str(self.calcEclipseBox.userParams["rbCalcEclipse"].GetValue()) + "\n")
newObs.write("html_out: " + str(self.htmlBox.userParams["rbHtmlOut"].GetValue()) + "\n")
newObs.write("text_out: " + str(self.textBox.userParams["rbTextOut"].GetValue()) + "\n")
newObs.write("twilight: " + self.twilightChoices[self.twilight] + "\n")
tempLT = str(self.showLT.userParams["rbShowLT"].GetValue())
if tempLT == "True":
tempLT = "1"
else:
tempLT = "0"
newObs.write("show_lt: " + tempLT + "\n")
tempString = str(self.band.userParams["rbBand"].GetValue())
if tempString == "True":
bandString = "V"
else:
bandString = "K"
newObs.write("band: "+ bandString)
newObs.close()
def create_menu(self):
'''
This method creates the menu bars that are at the top of the ephemeris frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
menubar = wx.MenuBar()
menu_file = wx.Menu()
m_save = menu_file.Append(wx.ID_SAVE, "Save\tCtrl+S", "Save data to a zip folder.")
m_quit = menu_file.Append(wx.ID_EXIT, "Quit\tCtrl+Q", "Quit this application.")
self.Bind(wx.EVT_MENU, self.on_exit, m_quit)
self.Bind(wx.EVT_MENU, self.saveOutput, m_save)
menubar.Append(menu_file, "File")
self.SetMenuBar(menubar)
def saveOutput(self, event):
'''
This method will save the output of the ephemeris calculations as a zip file.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
dlg = wx.FileDialog(self, message = "Save your output...", style = wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
outputPath = dlg.GetPath()
if self.parameterCheck():
self.calculate(None)
shutil.copytree(os.path.join(os.path.dirname(os.path.abspath(oscaar.__file__)),'extras','eph','ephOutputs'),
outputPath)
outputArchive = zipfile.ZipFile(outputPath+'.zip', 'w')
for name in glob(outputPath+os.sep+'*'):
outputArchive.write(name, os.path.basename(name), zipfile.ZIP_DEFLATED)
shutil.rmtree(outputPath)
outputArchive.close()
dlg.Destroy()
def on_exit(self,event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.loadEphFrame = False
class FittingFrame(wx.Frame):
'''
After you have created your own light curve, there are different fitting methods that
you can do. Currently the only fitting method in place is MCMC.
'''
def __init__(self, parent, objectID, path = ''):
'''
This method defines the initialization of this class.
'''
self.path = path
self.title = "Fitting Methods"
self.loadMCMC = False
wx.Frame.__init__(self, parent, objectID, self.title)
self.panel = wx.Panel(self)
self.parent = parent
self.messageFrame = False
self.IP = wx.Frame
self.box = AddLCB(self.panel,-1,name="Path to Output File: ")
self.box2 = AddLCB(self.panel, -1, name="Results Output Path (.txt): ", saveType=wx.FD_SAVE)
self.vbox2= wx.BoxSizer(wx.VERTICAL)
self.vbox2.Add(self.box, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox2.Add(self.box2, border=5, flag=wx.ALL)
self.box.boxList[1].SetValue(self.path)
self.plotMCMCButton = wx.Button(self.panel,label="MCMC Fit", size = (130,25))
self.Bind(wx.EVT_BUTTON, self.plotMCMC, self.plotMCMCButton)
self.sizer0 = wx.FlexGridSizer(rows=2, cols=4)
self.sizer0.Add(self.plotMCMCButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.pklPathTxt = self.box.boxList[1]
self.saveLocation = self.box2.boxList[1]
self.create_menu()
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.vbox2, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.sizer0, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.vbox.AddSpacer(10)
self.panel.SetSizer(self.vbox)
self.CreateStatusBar()
self.vbox.Fit(self)
self.Center()
self.Show()
def create_menu(self):
'''
This method creates the menu bars that are at the top of the ephemeris frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_browse = menu_file.Append(-1,"Browse","Browse for a .pkl file to use.")
self.Bind(wx.EVT_MENU, lambda event: self.browseButtonEvent(event,'Choose Path to Output File',self.pklPathTxt,
False,wx.FD_OPEN),m_browse)
m_browse2 = menu_file.Append(-1, "Browse2", "Browse a save location for the results.")
self.Bind(wx.EVT_MENU, lambda event: self.browseButtonEvent(event,'Choose Path to Output File',self.saveLocation,
False,wx.FD_SAVE),m_browse2)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "Exit\tCtrl-Q", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def browseButtonEvent(self, event, message, textControl, fileDialog, saveDialog):
'''
This method defines the `browse` function for selecting a file on any OS.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
message : string
The message that tells the user what to choose.
textControl : wx.TextCtrl
The box in the frame that will be refreshed with the files that are chosen by the user.
fileDialog : bool
If true, the style is wx.FD_MULTIPLE, otherwise it is the same as the `saveDialog`.
saveDialog : wx.FD_*
The style of the box that will appear. The * represents a wild card value for different types.
'''
if not fileDialog:
dlg = wx.FileDialog(self, message = message, style = saveDialog)
else:
dlg = wx.FileDialog(self, message = message, style = wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
if saveDialog == wx.SAVE:
filenames = [dlg.GetPath()]
else:
filenames = dlg.GetPaths()
textControl.Clear()
for i in range(0,len(filenames)):
if i != len(filenames)-1:
textControl.WriteText(filenames[i] + ',')
else:
textControl.WriteText(filenames[i])
dlg.Destroy()
def plotLSFit(self,event):
'''
This method is for a least squares fitting method that is not in use right now.
'''
if self.validityCheck():
global pathText
global loadLSFit
pathText = self.pklPathTxt.GetValue()
if loadLSFit == False:
LeastSquaresFitFrame()
loadLSFit = True
def plotMCMC(self,event):
'''
This method checks that the file chosen to be loaded is valid, and that there is a valid save
file selected for the output of the MCMC calculations.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
if self.validityCheck():
tempSaveLoc = self.saveLocation.GetValue()
if not os.path.isdir(tempSaveLoc.rpartition(str(os.sep))[0]) or \
not len(tempSaveLoc) > (len(tempSaveLoc[:tempSaveLoc.rfind(os.sep)]) + 1):
self.IP = InvalidParameter(tempSaveLoc, self, -1, stringVal="output", secondValue="results output file")
else:
try:
self.pathText = self.pklPathTxt.GetValue()
self.data = IO.load(self.pathText)
if self.loadMCMC == False:
MCMCFrame(self, -1)
self.loadMCMC = True
except AttributeError:
self.IP = InvalidParameter("", self, -1, stringVal="oldPKL")
def validityCheck(self):
'''
This is a fitting frame specific method that checks whether or not the given .pkl file
is valid.
'''
pathName = self.pklPathTxt.GetValue()
if pathName != "":
if pathName.lower().endswith(".pkl"):
if os.path.isfile(pathName) == False:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
else:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
else:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
return True
def on_exit(self, event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.loadFittingOpen = False
class ETDFrame(wx.Frame):
'''
This frame converts the data from a .pkl into the correct format in a text
file that can be accepted by the Czech exoplanet transit database.
'''
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
self.title = "ETD Conversion"
wx.Frame.__init__(self, parent, objectID, self.title)
self.panel = wx.Panel(self)
self.parent = parent
self.messageFrame = False
self.data = ""
self.box = AddLCB(self.panel,-1, parent2 = self, name="Path to Output File: ", updateRadii = True)
self.box2 = AddLCB(self.panel, -1, name="Results Output Path (.txt): ", saveType=wx.FD_SAVE)
self.apertureRadii = []
self.apertureRadiusIndex = 0
self.radiusLabel = wx.StaticText(self.panel, -1, 'Select Aperture Radius: ')
self.radiusList = wx.ComboBox(self.panel, value = "", choices = "", size = (100, wx.DefaultSize.GetHeight()))
self.radiusList.Bind(wx.EVT_COMBOBOX, self.radiusIndexUpdate)
self.updateRadiiButton = wx.Button(self.panel, label = "Update Radii List")
self.Bind(wx.EVT_BUTTON, self.updateRadiiList, self.updateRadiiButton)
self.dropBox = wx.BoxSizer(wx.HORIZONTAL)
self.dropBox.Add(self.radiusLabel, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 10)
self.dropBox.Add(self.radiusList, 0, flag = wx.ALIGN_CENTER)
self.convertToETDButton = wx.Button(self.panel,label = 'Convert to ETD Format')
self.Bind(wx.EVT_BUTTON, self.convertToETD, self.convertToETDButton)
self.sizer0 = wx.FlexGridSizer(rows=2, cols=3)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.sizer0, 0, wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox.Add(self.updateRadiiButton, 0, wx.ALIGN_CENTER |wx. ALL, border = 5)
self.hbox.Add(self.dropBox, 0, flag=wx.ALIGN_CENTER | wx.ALL, border=10)
self.sizer0.Add(self.convertToETDButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.pklPathTxt = self.box.boxList[1]
self.saveLocation = self.box2.boxList[1]
self.create_menu()
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.box, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.box2, 0, flag=wx.ALIGN_CENTER | wx.ALL, border=5)
self.vbox.Add(self.hbox, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.vbox.AddSpacer(10)
self.panel.SetSizer(self.vbox)
self.CreateStatusBar()
self.vbox.Fit(self)
self.Center()
self.Show()
def create_menu(self):
'''
This method creates the menu bars that are at the top of the ETDFrame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_browse = menu_file.Append(-1,"Browse","Browse for a .pkl file to use.")
self.Bind(wx.EVT_MENU, lambda event: self.browseButtonEvent(event,'Choose Path to Output File',self.pklPathTxt,False,
wx.FD_OPEN),m_browse)
m_browse2 = menu_file.Append(-1, "Browse2", "Browse a save location for the results.")
self.Bind(wx.EVT_MENU, lambda event: self.browseButtonEvent(event,'Choose Path to Output File',self.saveLocation,
False,wx.FD_SAVE),m_browse2)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "Exit\tCtrl-Q", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def browseButtonEvent(self, event, message, textControl, fileDialog, saveDialog):
'''
This method defines the `browse` function for selecting a file on any OS.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
message : string
The message that tells the user what to choose.
textControl : wx.TextCtrl
The box in the frame that will be refreshed with the files that are chosen by the user.
fileDialog : bool
If true, the style is wx.FD_MULTIPLE, otherwise it is the same as the `saveDialog`.
saveDialog : wx.FD_*
The style of the box that will appear. The * represents a wild card value for different types.
'''
if not fileDialog:
dlg = wx.FileDialog(self, message = message, style = saveDialog)
else:
dlg = wx.FileDialog(self, message = message, style = wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
if saveDialog == wx.SAVE:
filenames = [dlg.GetPath()]
else:
filenames = dlg.GetPaths()
textControl.Clear()
for i in range(0,len(filenames)):
if i != len(filenames)-1:
textControl.WriteText(filenames[i] + ',')
else:
textControl.WriteText(filenames[i])
if self.validityCheck(throwException = False):
try:
self.radiusList.Clear()
self.data = IO.load(self.box.boxList[1].GetValue())
self.apertureRadii = np.empty_like(self.data.apertureRadii)
self.apertureRadii[:] = self.data.apertureRadii
radiiString = [str(x) for x in self.data.apertureRadii]
for string in radiiString:
self.radiusList.Append(string)
self.radiusList.SetValue(radiiString[0])
except AttributeError:
self.IP = InvalidParameter("", self, -1, stringVal="oldPKL")
dlg.Destroy()
def convertToETD(self, event):
'''
This method uses the czechETDstring method from the databank.py class
to convert the data into the appropriate format.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
if self.validityCheck() and self.radiusCheck():
tempSaveLoc = self.saveLocation.GetValue()
if not os.path.isdir(tempSaveLoc.rpartition(str(os.sep))[0]) or \
not len(tempSaveLoc) > (len(tempSaveLoc[:tempSaveLoc.rfind(os.sep)]) + 1):
self.IP = InvalidParameter(tempSaveLoc, self, -1, stringVal="output", secondValue="results output file")
else:
if not tempSaveLoc.lower().endswith(".txt"):
tempSaveLoc += ".txt"
openFile = open(tempSaveLoc, 'w')
openFile.write(self.data.czechETDstring(self.apertureRadiusIndex))
openFile.close()
self.IP = InvalidParameter("", self, -1, stringVal="successfulConversion")
def validityCheck(self, throwException=True):
'''
This method checks to make sure that the entered .pkl file is valid and can
be used.
Parameters
----------
throwException : bool, optional
If true there will be a pop up frame that will explain the reason for why
the selected file cannot be used if it is invalid. If false, no error message
will pop up when an invalid file is selected.
Returns
-------
literal : bool
False if the selected file is invalid, true otherwise.
'''
pathName = self.pklPathTxt.GetValue()
if pathName != "":
if pathName.lower().endswith(".pkl"):
if os.path.isfile(pathName) == False:
if throwException:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
else:
if throwException:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
else:
if throwException:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
return True
def radiusCheck(self):
'''
This method checks to make sure that if the user enters an aperture radius that they
would like to plot, that it is a valid number in the list of saved aperture radii for
the selected file.
Returns
-------
literal : bool
False if the aperture radius selected is not a number or not in the approved list,
true otherwise.
'''
if len(self.apertureRadii) == 0:
self.IP = InvalidParameter(str(self.apertureRadii), self, -1, stringVal="radiusListError", secondValue="etdError")
return False
elif self.radiusList.GetValue() == "":
self.IP = InvalidParameter(self.radiusList.GetValue(), self, -1, stringVal="radiusError")
return False
try:
self.tempNum = np.where(self.epsilonCheck(self.apertureRadii,float(self.radiusList.GetValue())))
if len(self.tempNum[0]) == 0:
tempString = self.radiusList.GetValue() + " was not found in " + str(self.apertureRadii)
self.IP = InvalidParameter(tempString, self, -1, stringVal="radiusListError2")
return False
except ValueError:
self.IP = InvalidParameter(self.radiusList.GetValue(), self, -1, stringVal="radiusError")
return False
return True
def updateRadiiList(self, event):
'''
This method will manually update the drop down menu for the available aperture radii that can
be chosen from the .pkl file.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion, a list of available radii should be shown in the drop down menu of the frame.
'''
if self.validityCheck():
try:
self.radiusList.Clear()
self.data = IO.load(self.box.boxList[1].GetValue())
self.apertureRadii = np.empty_like(self.data.apertureRadii)
self.apertureRadii[:] = self.data.apertureRadii
radiiString = [str(x) for x in self.data.apertureRadii]
for string in radiiString:
self.radiusList.Append(string)
self.radiusList.SetValue(radiiString[0])
except AttributeError:
self.IP = InvalidParameter("", self, -1, stringVal="oldPKL")
def epsilonCheck(self,a,b):
'''
This method checks that two numbers are within machine precision of each other
because otherwise we get machine precision difference errors when mixing
single and double precision NumPy floats and pure Python built-in float types.
Parameters
----------
a : array
An array of float type numbers to check through.
b : float
The number that is being checked for in the array.
Returns
-------
literal : array
This is an array of booleans.
Notes
-----
There a boolean literals of true in the return array if any number in `a` is within machine precision
of `b`.
Examples
--------
Inputs: `a` = [0, 1.0, 2.0, 3.0, 4.0], `b` = 3.0
Return: [False, False, False, True, False]
'''
return np.abs(a-b) < np.finfo(np.float32).eps
def radiusIndexUpdate(self, event):
'''
This method updates the current index in the list of available radii that this frame will use to plot different
things. It does this by calling self.epsiloCheck to get an array of booleans. Afterwards, it selects the location
of the boolean 'True' and marks that as the new index.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.apertureRadiusIndex = np.where(self.epsilonCheck(self.apertureRadii, float(self.radiusList.GetValue())))[0][0]
def on_exit(self, event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.etdOpen = False
class LoadOldPklFrame(wx.Frame):
'''
This frame loads an old .pkl file so that you can make different plots with the
saved data.
'''
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
self.title = "Load An Old .pkl File"
wx.Frame.__init__(self, parent, objectID, self.title)
self.panel = wx.Panel(self)
self.parent = parent
self.loadGraphFrame = False
self.messageFrame = False
self.IP = wx.Frame
self.data = ""
self.box = AddLCB(self.panel,-1, parent2 = self, buttonLabel="Browse\t (Ctrl-O)",
name="Path to Output File: ", updateRadii = True)
self.apertureRadii = []
self.apertureRadiusIndex = 0
self.radiusLabel = wx.StaticText(self.panel, -1, 'Select Aperture Radius: ')
self.radiusList = wx.ComboBox(self.panel, value = "", choices = "", size = (100, wx.DefaultSize.GetHeight()))
self.radiusList.Bind(wx.EVT_COMBOBOX, self.radiusIndexUpdate)
self.updateRadiiButton = wx.Button(self.panel, label = "Update Radii List")
self.Bind(wx.EVT_BUTTON, self.updateRadiiList, self.updateRadiiButton)
self.dropBox = wx.BoxSizer(wx.HORIZONTAL)
self.dropBox.Add(self.radiusLabel, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 10)
self.dropBox.Add(self.radiusList, 0, flag = wx.ALIGN_CENTER)
self.rightBox = wx.BoxSizer(wx.VERTICAL)
self.rightBox.Add(self.updateRadiiButton, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.rightBox.Add(self.dropBox, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.box, 0, flag = wx.ALIGN_CENTER | wx.ALL, border = 10)
if sys.platform == 'win32':
self.plotLightCurveButton = wx.Button(self.panel,label = 'Plot Light Curve', size = (130,25))
self.plotRawFluxButton = wx.Button(self.panel,label = 'Plot Raw Fluxes', size = (130,25))
self.plotScaledFluxesButton = wx.Button(self.panel,label = 'Plot Scaled Fluxes', size = (130,25))
self.plotCentroidPositionsButton = wx.Button(self.panel, label = 'Trace Stellar Centroid Positions', size = (170,25))
self.plotComparisonStarWeightingsButton = wx.Button(self.panel,label = 'Plot Comparison\nStar Weightings', size = (110,37))
self.plotInteractiveLightCurveButton = wx.Button(self.panel,label = 'Plot Interactive Light Curve', size = (170,25))
elif sys.platform == 'darwin':
self.plotLightCurveButton = wx.Button(self.panel,label = 'Plot Light Curve', size = (130,25))
self.plotRawFluxButton = wx.Button(self.panel,label = 'Plot Raw Fluxes', size = (130,25))
self.plotScaledFluxesButton = wx.Button(self.panel,label = 'Plot Scaled Fluxes', size = (130,25))
self.plotCentroidPositionsButton = wx.Button(self.panel,-1,label = 'Trace Stellar\nCentroid Positions', size = (150,40))
self.plotComparisonStarWeightingsButton = wx.Button(self.panel,-1,label = 'Plot Comparison\nStar Weightings', size = (150,40))
self.plotInteractiveLightCurveButton = wx.Button(self.panel,-1,label = 'Plot Interactive Light Curve', size = (190,25))
else:
self.plotLightCurveButton = wx.Button(self.panel,label = 'Plot Light Curve', size = (130,30))
self.plotRawFluxButton = wx.Button(self.panel,label = 'Plot Raw Fluxes', size = (130,30))
self.plotScaledFluxesButton = wx.Button(self.panel,label = 'Plot Scaled Fluxes', size = (135,30))
self.plotCentroidPositionsButton = wx.Button(self.panel,-1,label = 'Trace Stellar\nCentroid Positions', size = (150,45))
self.plotComparisonStarWeightingsButton = wx.Button(self.panel,-1,label = 'Plot Comparison\nStar Weightings', size = (150,45))
self.plotInteractiveLightCurveButton = wx.Button(self.panel,-1,label = 'Plot Interactive Light Curve', size = (195,30))
self.Bind(wx.EVT_BUTTON, self.plotLightCurve, self.plotLightCurveButton)
self.Bind(wx.EVT_BUTTON, self.plotRawFlux, self.plotRawFluxButton)
self.Bind(wx.EVT_BUTTON, self.plotScaledFluxes,self.plotScaledFluxesButton)
self.Bind(wx.EVT_BUTTON, self.plotCentroidPosition, self.plotCentroidPositionsButton)
self.Bind(wx.EVT_BUTTON, self.plotComparisonStarWeightings, self.plotComparisonStarWeightingsButton)
self.Bind(wx.EVT_BUTTON, self.plotInteractiveLightCurve, self.plotInteractiveLightCurveButton)
self.sizer0 = wx.FlexGridSizer(rows=2, cols=3)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.sizer0, 0, wx.ALIGN_CENTER | wx.ALL, border = 5)
self.hbox2.Add(self.rightBox, 0, wx.ALIGN_CENTER |wx. ALL, border = 5)
self.sizer0.Add(self.plotLightCurveButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.plotRawFluxButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.plotScaledFluxesButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.plotCentroidPositionsButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.plotComparisonStarWeightingsButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.plotInteractiveLightCurveButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.pklPathTxt = self.box.boxList[1]
self.create_menu()
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.hbox, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.vbox.AddSpacer(10)
self.panel.SetSizer(self.vbox)
self.CreateStatusBar()
self.vbox.Fit(self)
self.Center()
self.Show()
def create_menu(self):
'''
This method creates the menu bars that are at the top of the load old pkl frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_browse = menu_file.Append(-1,"Browse\tCtrl-O","Browse")
self.Bind(wx.EVT_MENU,lambda event: self.browseButtonEvent(event,'Choose Path to Output File',self.pklPathTxt,False,
wx.FD_OPEN),m_browse)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "Exit\tCtrl-Q", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def browseButtonEvent(self, event, message, textControl, fileDialog, saveDialog):
'''
This method defines the `browse` function for selecting a file on any OS.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
message : string
The message that tells the user what to choose.
textControl : wx.TextCtrl
The box in the frame that will be refreshed with the files that are chosen by the user.
fileDialog : bool
If true, the style is wx.FD_MULTIPLE, otherwise it is the same as the `saveDialog`.
saveDialog : wx.FD_*
The style of the box that will appear. The * represents a wild card value for different types.
'''
if not fileDialog:
dlg = wx.FileDialog(self, message = message, style = saveDialog)
else:
dlg = wx.FileDialog(self, message = message, style = wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
if saveDialog == wx.SAVE:
filenames = [dlg.GetPath()]
else:
filenames = dlg.GetPaths()
textControl.Clear()
for i in range(0,len(filenames)):
if i != len(filenames)-1:
textControl.WriteText(filenames[i] + ',')
else:
textControl.WriteText(filenames[i])
if self.validityCheck(throwException = False):
try:
self.radiusList.Clear()
self.data = IO.load(self.box.boxList[1].GetValue())
self.apertureRadii = np.empty_like(self.data.apertureRadii)
self.apertureRadii[:] = self.data.apertureRadii
radiiString = [str(x) for x in self.data.apertureRadii]
for string in radiiString:
self.radiusList.Append(string)
self.radiusList.SetValue(radiiString[0])
except AttributeError:
self.IP = InvalidParameter("", self, -1, stringVal="oldPKL")
dlg.Destroy()
def plotLightCurve(self, event):
'''
This method will plot the light curve of the data that has been saved in an
old .pkl file for the specific aperture radius that is selected.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion a plot will open up in a new window.
'''
if self.validityCheck() and self.radiusCheck():
if self.tempNum[0][0] != self.apertureRadiusIndex:
self.apertureRadiusIndex = self.tempNum[0][0]
print 'Loading file: '+self.pklPathTxt.GetValue()
commandstring = "import oscaar.IO; data=oscaar.IO.load('%s'); data.plotLightCurve(apertureRadiusIndex=%s)" \
% (self.pklPathTxt.GetValue(),self.apertureRadiusIndex)
subprocess.Popen(['python','-c',commandstring])
def plotRawFlux(self, event):
'''
This method will plot the raw fluxes of the data that has been saved in an
old .pkl file for the specific aperture radius that is selected.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion a plot will open up in a new window.
'''
if self.validityCheck() and self.radiusCheck():
if self.tempNum[0][0] != self.apertureRadiusIndex:
self.apertureRadiusIndex = self.tempNum[0][0]
print 'Loading file: '+self.pklPathTxt.GetValue()
commandstring = "import oscaar.IO; data=oscaar.IO.load('%s'); data.plotRawFluxes(apertureRadiusIndex=%s)" \
% (self.pklPathTxt.GetValue(),self.apertureRadiusIndex)
subprocess.Popen(['python','-c',commandstring])
def plotScaledFluxes(self, event):
'''
This method will plot the scaled fluxes of the data that has been saved in an
old .pkl file for the specific aperture radius that is selected.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion a plot will open up in a new window.
'''
if self.validityCheck() and self.radiusCheck():
if self.tempNum[0][0] != self.apertureRadiusIndex:
self.apertureRadiusIndex = self.tempNum[0][0]
print 'Loading file: '+self.pklPathTxt.GetValue()
commandstring = "import oscaar.IO; data=oscaar.IO.load('%s'); data.plotScaledFluxes(apertureRadiusIndex=%s)" \
% (self.pklPathTxt.GetValue(),self.apertureRadiusIndex)
subprocess.Popen(['python','-c',commandstring])
def plotCentroidPosition(self, event):
'''
This method will plot the centroid positions of the data that has been saved in an
old .pkl file.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion a plot will open up in a new window.
'''
if self.validityCheck():
print 'Loading file: '+self.pklPathTxt.GetValue()
commandstring = "import oscaar.IO; data=oscaar.IO.load('%s'); data.plotCentroidsTrace()" \
% (self.pklPathTxt.GetValue())
subprocess.Popen(['python','-c',commandstring])
def plotComparisonStarWeightings(self, event):
'''
This method will plot the comparison star weightings of the data that has been saved in an
old .pkl file for the specific aperture radius that is selected.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion a plot will open up in a new window.
'''
if self.validityCheck() and self.radiusCheck():
if self.tempNum[0][0] != self.apertureRadiusIndex:
self.apertureRadiusIndex = self.tempNum[0][0]
print 'Loading file: '+self.pklPathTxt.GetValue()
commandstring = "import oscaar.IO; data=oscaar.IO.load('%s');" \
"data.plotComparisonWeightings(apertureRadiusIndex=%s)" \
% (self.pklPathTxt.GetValue(),self.apertureRadiusIndex)
subprocess.Popen(['python','-c',commandstring])
def plotInteractiveLightCurve(self, event):
'''
This method will plot the interactive light curve of the data that has been saved in an
old .pkl file for the specific aperture radius that is selected.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion a plot will open up in a new window.
'''
if self.validityCheck() and self.radiusCheck():
if self.tempNum[0][0] != self.apertureRadiusIndex:
self.apertureRadiusIndex = self.tempNum[0][0]
if self.loadGraphFrame == False:
GraphFrame(self, -1)
self.loadGraphFrame = True
def validityCheck(self, throwException=True):
'''
This method checks to make sure that the entered .pkl file is valid and can
be used.
Parameters
----------
throwException : bool, optional
If true there will be a pop up frame that will explain the reason for why
the selected file cannot be used if it is invalid. If false, no error message
will pop up when an invalid file is selected.
Returns
-------
literal : bool
False if the selected file is invalid, true otherwise.
'''
pathName = self.pklPathTxt.GetValue()
if pathName != "":
if pathName.lower().endswith(".pkl"):
if os.path.isfile(pathName) == False:
if throwException:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
else:
if throwException:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
else:
if throwException:
self.IP = InvalidParameter(pathName, self, -1, stringVal="path")
return False
return True
def radiusCheck(self):
'''
This method checks to make sure that if the user enters an aperture radius that they
would like to plot, that it is a valid number in the list of saved aperture radii for
the selected file.
Returns
-------
literal : bool
False if the aperture radius selected is not a number or not in the approved list,
true otherwise.
'''
if len(self.apertureRadii) == 0:
self.IP = InvalidParameter(str(self.apertureRadii), self, -1, stringVal="radiusListError")
return False
elif self.radiusList.GetValue() == "":
self.IP = InvalidParameter(self.radiusList.GetValue(), self, -1, stringVal="radiusError")
return False
try:
self.tempNum = np.where(self.epsilonCheck(self.apertureRadii,float(self.radiusList.GetValue())))
if len(self.tempNum[0]) == 0:
tempString = self.radiusList.GetValue() + " was not found in " + str(self.apertureRadii)
self.IP = InvalidParameter(tempString, self, -1, stringVal="radiusListError2")
return False
except ValueError:
self.IP = InvalidParameter(self.radiusList.GetValue(), self, -1, stringVal="radiusError")
return False
return True
def updateRadiiList(self, event):
'''
This method will manually update the drop down menu for the available aperture radii that can
be chosen from the .pkl file.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion, a list of available radii should be shown in the drop down menu of the frame.
'''
if self.validityCheck():
try:
self.radiusList.Clear()
self.data = IO.load(self.box.boxList[1].GetValue())
self.apertureRadii = np.empty_like(self.data.apertureRadii)
self.apertureRadii[:] = self.data.apertureRadii
radiiString = [str(x) for x in self.data.apertureRadii]
for string in radiiString:
self.radiusList.Append(string)
self.radiusList.SetValue(radiiString[0])
except AttributeError:
self.IP = InvalidParameter("", self, -1, stringVal="oldPKL")
def epsilonCheck(self,a,b):
'''
This method checks that two numbers are within machine precision of each other
because otherwise we get machine precision difference errors when mixing
single and double precision NumPy floats and pure Python built-in float types.
Parameters
----------
a : array
An array of float type numbers to check through.
b : float
The number that is being checked for in the array.
Returns
-------
literal : array
This is an array of booleans.
Notes
-----
There a boolean literals of true in the return array if any number in `a` is within machine precision
of `b`.
Examples
--------
Inputs: `a` = [0, 1.0, 2.0, 3.0, 4.0], `b` = 3.0
Return: [False, False, False, True, False]
'''
return np.abs(a-b) < np.finfo(np.float32).eps
def radiusIndexUpdate(self, event):
'''
This method updates the current index in the list of available radii that this frame will use to plot different
things. It does this by calling self.epsiloCheck to get an array of booleans. Afterwords, it selects the location
of the boolean 'True' and marks that as the new index.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.apertureRadiusIndex = np.where(self.epsilonCheck(self.apertureRadii, float(self.radiusList.GetValue())))[0][0]
def on_exit(self, event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.loadOldPklOpen = False
class GraphFrame(wx.Frame):
'''
This is the class for the interactive light curve plot frame. It allows a user to continuously
plot a light curve with a new bin size as well as change the names of the axes and title.
'''
title = 'Interactive Light Curve Plot'
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
wx.Frame.__init__(self, parent, objectID, self.title, style = wx.DEFAULT_FRAME_STYLE & ~ (wx.RESIZE_BORDER |
wx.RESIZE_BOX | wx.MAXIMIZE_BOX))
self.pT = parent.pklPathTxt.GetValue()
self.parent = parent
self.apertureRadiusIndex = self.parent.apertureRadiusIndex
self.create_menu()
self.statusbar = self.CreateStatusBar()
self.create_main_panel()
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.Centre()
self.Show()
def create_menu(self):
'''
This method creates the menu bars that are at the top of the graph frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-Q", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
'''
This method creates a wxPython panel that will update everytime a new instance of the
light curve plot is generated.
'''
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.box = ScanParamsBox(self.panel,-1)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.box, border=5, flag=wx.ALL)
self.plotButton = wx.Button(self.panel,label = 'Plot')
self.Bind(wx.EVT_BUTTON,self.draw_plot, self.plotButton)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.plotButton,0,flag=wx.ALIGN_CENTER|wx.TOP)
self.vbox.AddSpacer(10)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def init_plot(self):
'''
This is the initial plot that is displayed. It uses a bin size of 10 for the light curve.
'''
self.data = IO.load(self.pT)
self.pointsPerBin = 10
binnedTime, binnedFlux, binnedStd = medianBin(self.data.times,self.data.lightCurves[self.apertureRadiusIndex],
self.pointsPerBin)
self.fig = pyplot.figure(num=None, figsize=(10, 8), facecolor='w',edgecolor='k')
self.dpi = 100
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor('white')
self.axes.set_title('Light Curve', size=12)
def format_coord(x, y):
'''
Function to give data value on mouse over plot.
'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
self.axes.format_coord = format_coord
self.axes.errorbar(self.data.times,self.data.lightCurves[self.apertureRadiusIndex],
yerr=self.data.lightCurveErrors[self.apertureRadiusIndex],fmt='k.',ecolor='gray')
self.axes.errorbar(binnedTime, binnedFlux, yerr=binnedStd, fmt='rs-', linewidth=2)
self.axes.axvline(ymin=0,ymax=1,x=self.data.ingress,color='k',ls=':')
self.axes.axvline(ymin=0,ymax=1,x=self.data.egress,color='k',ls=':')
self.axes.set_title(('Light curve for aperture radius %s' % self.data.apertureRadii[self.apertureRadiusIndex]))
self.axes.set_xlabel('Time (JD)')
self.axes.set_ylabel('Relative Flux')
def draw_plot(self,event):
'''
This method will redraw the plot every time the user presses the plot button in the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
Notes
-----
On successful completion with at least one parameter changed, the new plot will show up in the panel of
the frame.
'''
self.box.update()
self.box.setMax(len(self.data.times))
if self.box.boxCorrect() == True and self.box.boxDiff() == True:
print "Re-drawing Plot"
self.xlabel = self.box.userinfo['xlabel'].GetValue()
self.ylabel = self.box.userinfo['ylabel'].GetValue()
self.plotTitle = self.box.userinfo['title'].GetValue()
self.pointsPerBin = int(self.box.userinfo['bin'].GetValue())
binnedTime, binnedFlux, binnedStd = medianBin(self.data.times,self.data.lightCurves[self.apertureRadiusIndex],
self.pointsPerBin)
if sys.platform == 'win32':
self.fig = pyplot.figure(num=None, figsize=(10, 6.75), facecolor='w',edgecolor='k')
else:
self.fig = pyplot.figure(num=None, figsize=(10, 8.0), facecolor='w',edgecolor='k')
self.dpi = 100
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor('white')
self.axes.set_title('Light Curve', size=12)
def format_coord(x, y):
'''
Function to give data value on mouse over plot.
'''
return 'JD=%1.5f, Flux=%1.4f' % (x, y)
self.axes.format_coord = format_coord
self.axes.errorbar(self.data.times,self.data.lightCurves[self.apertureRadiusIndex],
yerr=self.data.lightCurveErrors[self.apertureRadiusIndex],fmt='k.',ecolor='gray')
self.axes.errorbar(binnedTime, binnedFlux, yerr=binnedStd, fmt='rs-', linewidth=2)
self.axes.axvline(ymin=0,ymax=1,x=self.data.ingress,color='k',ls=':')
self.axes.axvline(ymin=0,ymax=1,x=self.data.egress,color='k',ls=':')
self.axes.set_title(self.plotTitle)
self.axes.set_xlabel(self.xlabel)
self.axes.set_ylabel(self.ylabel)
self.canvas = FigCanvas(self.panel, -1, self.fig)
def on_save_plot(self, event):
'''
This method will save the plot you create as a .png file.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def flash_status_message(self, msg, flash_len_ms=1500):
'''
This method will show a message for a brief moment on the status bar at the bottom of the frame.
Parameters
----------
msg : string
The message that will appear.
flash_len_ms : int, optional
The amount of time the message should appear for in milliseconds.
'''
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
'''
This clears the status bar of the frame after a message has been displayed.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.statusbar.SetStatusText('')
def on_exit(self, event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.loadGraphFrame = False
class LeastSquaresFitFrame(wx.Frame):
'''
This class is not in use right now.
'''
"""
title = "Least Squares Fit"
def __init__(self):
wx.Frame.__init__(self, None,-1, self.title)
self.panel = wx.Panel(self)
self.pT = pathText
self.data = IO.load(self.pT)
self.box1 = AddLCB(self.panel,-1,name="planet")
self.Bind(wx.EVT_BUTTON,self.update,self.box1.updateButton)
self.topBox = wx.BoxSizer(wx.HORIZONTAL)
self.topBox.Add(self.box1, border=5, flag=wx.ALL)
self.list = [
('Rp/Rs',"Ratio of Radii (Rp/Rs):",
'Enter a ratio of the radii here.',''),
('a/Rs',"a/Rs:",
'Enter a value for a/Rs here.',''),
('per',"Period:",
'Enter a value for the period here.',''),
('inc',"Inclination:",
'Enter a value for the inclination here.',''),
('ecc',"Eccentricity: ",
'Enter a value for the eccentricity here.',''),
('t0',"t0:",
'Enter a value for t0 here.',
str(transiterFit.calcMidTranTime(self.data.times,self.data.lightCurves[radiusNum]))),
('gamma1',"Gamma 1:",
'Enter a value for gamma 1 here.','0.0'),
('gamma2'," Gamma 2:",
'Enter a value for gamma 2 here.','0.0'),
('pericenter',"Pericenter:",
'Enter an arguement for the pericenter here.','0.0'),
('limbdark',"Limb-Darkening Parameter:",
'Enter an arguement for limb-darkening here.','False')
]
self.box = ParameterBox(self.panel,-1,self.list,name="Input Parameters")
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.box, border=5, flag=wx.ALL)
self.plotButton = wx.Button(self.panel,label = 'Plot')
self.Bind(wx.EVT_BUTTON,self.plot, self.plotButton)
self.sizer0 = wx.FlexGridSizer(rows=1, cols=10)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.sizer0,0, wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.plotButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.topBox, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.hbox, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_CENTER | wx.TOP)
#
# self.box.userParams['t0'].SetValue(str(oscaar.transiterFit.calcMidTranTime(self.data.times,self.data.lightCurve)))
#
self.vbox.AddSpacer(10)
self.vbox.AddSpacer(10)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.create_menu()
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.Center()
self.Show()
def plot(self,event):
self.tempLimbDark = self.box.userParams['limbdark'].GetValue()
list = [(self.box.userParams['Rp/Rs'].GetValue(),"Rp/Rs"),(self.box.userParams['a/Rs'].GetValue(),"a/Rs"),
(self.box.userParams['per'].GetValue(),"per"), (self.box.userParams['inc'].GetValue(),"inc"),
(self.box.userParams['ecc'].GetValue(),"ecc"), (self.box.userParams['t0'].GetValue(),"t0"),
(self.box.userParams['gamma1'].GetValue(),"gamma1"),(self.box.userParams['gamma2'].GetValue(),"gamma2"),
(self.box.userParams['pericenter'].GetValue(),"pericenter"),
(self.tempLimbDark,"limbdark")]
if checkParams(self,list) == True:
if self.box.userParams['limbdark'].GetValue() == 'False':
self.tempLimbDark = False
fit, success = transiterFit.run_LMfit(self.data.getTimes(), self.data.lightCurves[radiusNum],
self.data.lightCurveErrors[radiusNum],
float(self.box.userParams['Rp/Rs'].GetValue()),
float(self.box.userParams['a/Rs'].GetValue()),
float(self.box.userParams['inc'].GetValue()),
float(self.box.userParams['t0'].GetValue()),
float(self.box.userParams['gamma1'].GetValue()),
float(self.box.userParams['gamma2'].GetValue()),
float(self.box.userParams['per'].GetValue()),
float(self.box.userParams['ecc'].GetValue()),
float(self.box.userParams['pericenter'].GetValue()),
fitLimbDark=self.tempLimbDark, plotting=True)
n_iter = 300
# Rp,aRs,inc,t0,gam1,gam2=oscaar.transiterFit.run_MCfit(n_iter,self.data.getTimes(),
# self.data.lightCurve, self.data.lightCurveError,fit,success,
# float(self.box.GetPeriod()),float(self.box.GetEcc()),
# float(self.box.GetPericenter()),float(self.box.GetGamma1()),float(self.box.GetGamma2()), plotting=False)
def update(self,event):
if self.box1.boxList[1].GetValue() == '':
self.IP = InvalidParameter(self.box1.boxList[1].GetValue(), None,-1, stringVal="planet")
else:
self.planet = self.box1.boxList[1].GetValue()
[RpOverRs,AOverRs,per,inc,ecc] = returnSystemParams.transiterParams(self.planet)
if RpOverRs == -1 or AOverRs == -1 or per == -1 or inc == -1 or ecc == -1:
self.IP = InvalidParameter(self.box1.boxList[1].GetValue(), None,-1, stringVal="planet")
else:
self.box.userParams['Rp/Rs'].SetValue(str(RpOverRs))
self.box.userParams['a/Rs'].SetValue(str(AOverRs))
self.box.userParams['per'].SetValue(str(per))
self.box.userParams['inc'].SetValue(str(inc))
self.box.userParams['ecc'].SetValue(str(ecc))
self.IP = InvalidParameter("",None,-1, stringVal="params")
def create_menu(self):
# These commands create a drop down menu with the exit command.
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-Q", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def on_exit(self, event):
self.Destroy()
def onDestroy(self, event):
global loadLSFit
loadLSFit = False
"""
class MCMCFrame(wx.Frame):
'''
This frame allows the user to edit a number of different parameters to run the
Markov Chain Monte Carlo routine for fitting.
'''
title = "MCMC Fit"
def __init__(self, parent, objectID):
'''
This method defines the initialization of this class.
'''
wx.Frame.__init__(self, parent, objectID, self.title)
self.panel = wx.Panel(self)
self.parent = parent
self.messageFrame = False
self.IP = wx.Frame
self.pT = self.parent.pathText
self.saveLoc = self.parent.saveLocation.GetValue()
self.data = self.parent.data
self.LCB = AddLCB(self.panel,-1,name="planet")
self.Bind(wx.EVT_BUTTON,self.update,self.LCB.updateButton)
radiiString = [str(x) for x in self.data.apertureRadii]
self.apertureRadiusIndex = 0
self.radiusLabel = wx.StaticText(self.panel, -1, 'Select Aperture Radius: ')
self.radiusList = wx.ComboBox(self.panel, value = str(self.data.apertureRadii[0]), choices = radiiString)
self.radiusList.Bind(wx.EVT_COMBOBOX, self.radiusUpdate)
self.dropBox = wx.BoxSizer(wx.HORIZONTAL)
self.dropBox.Add(self.radiusLabel, 0, flag = wx.ALIGN_CENTER | wx.LEFT, border = 10)
self.dropBox.Add(self.radiusList, 0, flag = wx.ALIGN_CENTER)
self.topBox = wx.BoxSizer(wx.HORIZONTAL)
self.topBox.Add(self.LCB, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
self.topBox.Add(self.dropBox, flag = wx.ALIGN_CENTER | wx.ALL, border = 5)
tupleList = [('Rp/Rs',"Ratio of Radii (Rp/Rs):", 'Enter a ratio of the radii here.','0.11'),
('a/Rs',"a/Rs:", 'Enter a value for a/Rs here.','14.1'),
('inc',"Inclination:", 'Enter a value for the inclination here.','90.0'),
('t0',"t0:", 'Enter a value for the mid transit time here.','2456427.9425593214')]
self.box = ParameterBox(self.panel,-1,tupleList,"Free Parameters",rows=4,cols=2)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.box, border=5, flag=wx.ALL)
tupleList = [('b-Rp/Rs',"Beta Rp/Rs:", 'Enter a beta for Rp/Rs here.','0.005'),
('b-a/Rs',"Beta a/Rs:", 'Enter a beta for a/Rs here.','0.005'),
('b-inc',"Beta Inclination:", 'Enter a beta for inclination here.','0.005'),
('b-t0',"Beta t0:", 'Enter a beta for the mid transit time here.','0.005')]
self.box2 = ParameterBox(self.panel,-1,tupleList,"Beta's",rows=4,cols=2)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.box2, border=5, flag=wx.ALL)
tupleList = [('per',"Period:", 'Enter a value for the period here.','1.580400'),
('gamma1',"gamma1:", 'Enter a value for gamma1 here.','0.23'),
('gamma2',"gamma2:", 'Enter a value for gamma2 here.','0.3'),
('ecc',"Eccentricity:", 'Enter a value for the eccentricity here.','0.0'),
('pericenter',"Pericenter:", 'Enter a value for the pericenter here.','0.0')]
self.box3 = ParameterBox(self.panel,-1,tupleList,"Fixed Parameters")
self.hbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox3.Add(self.box3, border=5, flag=wx.ALL)
tupleList = [('saveiteration',"Iteration to save:", 'Enter a number for the nth iteration to be saved.','10'),
('burnfrac',"Burn Fraction:", 'Enter a decimal for the burn fraction here.','0.20'),
('acceptance',"Acceptance:", 'Enter a value for the acceptance rate here.','0.30'),
('number', "Number of Steps:", 'Enter a value for the total steps here.','10000')]
self.box4 = ParameterBox(self.panel,-1,tupleList,"Fit Parameters")
self.hbox4 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox4.Add(self.box4, border=5, flag=wx.ALL)
self.plotButton = wx.Button(self.panel,label = 'Run and Plot')
self.Bind(wx.EVT_BUTTON,self.plot, self.plotButton)
self.sizer0 = wx.FlexGridSizer(rows=1, cols=10)
self.hbox5 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox5.Add(self.sizer0,0, wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.plotButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.vbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.vbox2.Add(self.hbox, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox2.Add(self.hbox2, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.topBox, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.vbox2, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.hbox3, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.hbox4, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.Add(self.hbox5, 0, flag=wx.ALIGN_CENTER | wx.TOP)
self.vbox.AddSpacer(10)
self.vbox.AddSpacer(10)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.create_menu()
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.Center()
self.Show()
def create_menu(self):
'''
This method creates the menu bars that are at the top of the MCMC frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-Q", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def plot(self,event):
'''
After checking that all of the user editable parameters in the frame are valid and loaded
as a list of variables, this method actually exexcutes the MCMC fitting routine by calling it from
the fitting.py file.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
tupleList = [(self.box.userParams['Rp/Rs'].GetValue(),"Rp/Rs"),
(self.box.userParams['a/Rs'].GetValue(),"a/Rs"),
(self.box3.userParams['per'].GetValue(),"per"),
(self.box.userParams['inc'].GetValue(),"inc"),
(self.box3.userParams['ecc'].GetValue(),"ecc"),
(self.box.userParams['t0'].GetValue(),"t0"),
(self.box3.userParams['gamma1'].GetValue(),"gamma1"),
(self.box3.userParams['gamma2'].GetValue(),"gamma2"),
(self.box3.userParams['pericenter'].GetValue(),"pericenter"),
(self.box4.userParams['saveiteration'].GetValue(),"saveiteration"),
(self.box4.userParams['acceptance'].GetValue(),"acceptance"),
(self.box4.userParams['burnfrac'].GetValue(),"burnfrac"),
(self.box4.userParams['number'].GetValue(),"number")]
if checkParams(self,tupleList) == True and self.radiusCheck() == True:
initParams = [float(self.box.userParams['Rp/Rs'].GetValue()),float(self.box.userParams['a/Rs'].GetValue()),
float(self.box3.userParams['per'].GetValue()), float(self.box.userParams['inc'].GetValue()),
float(self.box3.userParams['gamma1'].GetValue()),float(self.box3.userParams['gamma2'].GetValue()),
float(self.box3.userParams['ecc'].GetValue()),float(self.box3.userParams['pericenter'].GetValue()),
float(self.box.userParams['t0'].GetValue())]
nSteps = float(self.box4.userParams['number'].GetValue())
initBeta = (np.zeros([4]) + 0.012).tolist()
idealAcceptanceRate = float(self.box4.userParams['acceptance'].GetValue())
interval = float(self.box4.userParams['saveiteration'].GetValue())
burnFraction = float(self.box4.userParams['burnfrac'].GetValue())
# Spawn a new process to execute the MCMC run separately.
mcmcCall = 'import oscaar.fitting; mcmcinstance = oscaar.fitting.mcmcfit("%s",%s,%s,%s,%s,%s,%s); mcmcinstance.run(updatepkl=True, apertureRadiusIndex=%s); mcmcinstance.plot(num=%s)' % \
(self.pT,initParams,initBeta,nSteps,interval,idealAcceptanceRate,burnFraction,
self.apertureRadiusIndex,self.apertureRadiusIndex)
subprocess.check_call(['python','-c',mcmcCall])
# Load the data again and save it in a text file.
self.data = IO.load(self.pT)
if not self.saveLoc.lower().endswith(".txt"):
self.saveLoc += ".txt"
outfile = open(self.saveLoc,'w')
outfile.write(self.data.uncertaintyString())
outfile.close()
def radiusCheck(self):
'''
This method checks to make sure that the aperture radius entered is valid and in the list
available for the selected .pkl file.
Returns
-------
literal : bool
True if the radius is valid, false otherwise.
'''
if self.radiusList.GetValue() == "":
self.IP = InvalidParameter(self.radiusList.GetValue(), self, -1, stringVal="radiusError")
return False
try:
condition = self.epsilonCheck(self.data.apertureRadii,float(self.radiusList.GetValue()))
self.tempNum = np.array(self.data.apertureRadii)[condition]
if len(self.tempNum) == 0:
tempString = self.radiusList.GetValue() + " was not found in " + str(self.data.apertureRadii)
self.IP = InvalidParameter(tempString, self, -1, stringVal="radiusListError2")
return False
except ValueError:
self.IP = InvalidParameter(self.radiusList.GetValue(), self, -1, stringVal="radiusError")
return False
return True
def update(self,event):
'''
This method will update the appropriate parameters for the frame, if a user selects
an appropriate planet name from the exoplanet.org database.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
if self.LCB.boxList[1].GetValue() == '':
self.IP = InvalidParameter(self.LCB.boxList[1].GetValue(), self,-1, stringVal="planet")
else:
self.planet = self.LCB.boxList[1].GetValue()
[RpOverRs,AOverRs,per,inc,ecc] = returnSystemParams.transiterParams(self.planet)
if RpOverRs == -1 or AOverRs == -1 or per == -1 or inc == -1 or ecc == -1:
self.IP = InvalidParameter(self.LCB.boxList[1].GetValue(), self,-1, stringVal="planet")
else:
self.box.userParams['Rp/Rs'].SetValue(str(RpOverRs))
self.box.userParams['a/Rs'].SetValue(str(AOverRs))
self.box3.userParams['per'].SetValue(str(per))
self.box.userParams['inc'].SetValue(str(inc))
self.box3.userParams['ecc'].SetValue(str(ecc))
self.IP = InvalidParameter("",self,-1, stringVal="params")
def epsilonCheck(self,a,b):
'''
This method checks that two numbers are within machine precision of each other
because otherwise we get machine precision difference errors when mixing
single and double precision NumPy floats and pure Python built-in float types.
Parameters
----------
a : array
An array of float type numbers to check through.
b : float
The number that is being checked for in the array.
Returns
-------
literal : array
This is an array of booleans.
Notes
-----
There a boolean literals of true in the return array if any number in `a` is within machine precision
of `b`.
Examples
--------
Inputs: `a` = [0, 1.0, 2.0, 3.0, 4.0], `b` = 3.0
Return: [False, False, False, True, False]
'''
a = np.array(a)
return np.abs(a-b) < np.finfo(np.float32).eps
def radiusUpdate(self, event):
'''
This method updates the current index in the list of available radii that this frame will use to plot MCMC.
It does this by calling self.epsiloCheck to get an array of booleans. Afterwords, it selects the location
of the boolean 'True' and marks that as the new index.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.apertureRadiusIndex = np.where(self.epsilonCheck(self.data.apertureRadii,
float(self.radiusList.GetValue())))[0][0]
def on_exit(self, event):
'''
This method defines the action quit from the menu. It closes the frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.loadMCMC = False
class ParameterBox(wx.Panel):
'''
This is a general method that is used throughout the GUI to create an interactive box
with multiple text controls for user input.
Parameters
----------
parent : window
The parent window that this box will be associated with.
objectID : int
The identity number of the object.
tupleList : array
An array of tuples for the different text controls desired. The tuple must be four strings.
name : string, optional
The name of the box for the current set of parameters. It is displayed in the upper left hand corner.
rows : int, optional
The number of rows for the box.
cols : int, optional
The number of columns for the box.
vNum : int, optional
The vertical displacement between each text control.
hNum : int, optional
The horizontal displacement between each text control.
font : wx.font(), optional
The type of style you would like the text to be displayed as.
secondButton : bool, optional
If a radio button is created by this class, the first value of the radio button will be selected
since the default value is false. IF this variable is true however, the second value of the radio
button is selected.
Notes
-----
The list that is given as a parameter must be an array of tuples. The format for these tuples is
(string, string, string, string). The first string will be the keyword (widget) to select that specific
text box to work with in the code. The second string is the name of the parameter that will appear in the GUI.
The third string will be the tooltip that is seen if the user hovers over the box. The fourth string is
the default value for that parameter.
If however, the widget name begins with 'rb', a radio button will be created. In this scenario, the second
string will be the name of the parameter, with the 3rd and 4th strings being the values of the two radio
buttons that will be created.
'''
def __init__(self, parent, objectID, tupleList, name="", rows=1, cols=10, vNum=0, hNum=0, font=wx.NORMAL_FONT,
secondButton=False):
wx.Panel.__init__(self,parent,objectID)
box1 = wx.StaticBox(self, -1, name)
sizer = wx.StaticBoxSizer(box1, wx.VERTICAL)
self.userParams = {}
sizer0 = wx.FlexGridSizer(rows=rows, cols=cols, vgap=vNum, hgap=hNum)
sizer.Add(sizer0, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
for (widget, labeltxt, ToolTip, value) in tupleList:
label = wx.StaticText(self, -1, labeltxt, style=wx.ALIGN_CENTER)
sizer0.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 3)
label.SetFont(font)
if widget == "observatoryName" or widget == "fileName":
self.userParams[widget] = wx.TextCtrl(self, -1, value = value, size = (220,wx.DefaultSize.GetHeight()))
elif not widget.find('rb') == 0:
self.userParams[widget] = wx.TextCtrl(self, -1, value = value)
if widget.find('rb') == 0:
label1 = ToolTip
label2 = value
self.userParams[widget] = wx.RadioButton(self, label = label1, style = wx.RB_GROUP)
sizer0.Add(self.userParams[widget], 0, wx.ALIGN_CENTRE|wx.ALL, 0)
if secondButton == True:
self.userParams[widget+"1"] = wx.RadioButton(self, label = label2)
self.userParams[widget+"1"].SetValue(True)
else:
self.userParams[widget+"1"] = wx.RadioButton(self, label = label2)
self.userParams[widget].SetValue(True)
sizer0.Add(self.userParams[widget+"1"], 0, wx.ALIGN_CENTRE|wx.ALL, 0)
else:
self.userParams[widget].SetToolTipString(ToolTip)
sizer0.Add(self.userParams[widget], 0, wx.ALIGN_CENTRE|wx.ALL, 0)
if widget == "ingress" or widget == "egress":
value = "00:00:00"
self.userParams[widget+"1"] = wx.TextCtrl(self, -1, value = value)
self.userParams[widget+"1"].SetToolTipString(ToolTip)
sizer0.Add(self.userParams[widget+"1"], 0, wx.ALIGN_CENTRE|wx.ALL, 0)
self.SetSizer(sizer)
sizer.Fit(self)
class AddLCB(wx.Panel):
'''
This creates the set of a label, control box, and button. Usually used to let a user
browse and select a file.
Parameters
----------
parent : window
The parent panel that this box will be associated with.
objectID : int
The identity number of the object.
parent2 : window, optional
Usually the parent is the panel that the LCB gets created in. If however, there is a need
to use the actual parent frame, a second window is allowed to be linked.
name : string, optional
The name of the label for the static box. If the name is 'mainGUI' or 'planet' a different set gets
created.
buttonLabel : string, optional
The name of the button that is created.
multFiles : bool, optional
If true, when browsing for files the user can select multiple ones. If false, only one file is
allowed to be selected.
rowNum : int, optional
The number of rows for the box.
colNum : int, optional
The number of columns for the box.
vNum : int, optional
The vertical displacement between each text control.
hNum : int, optional
The horizontal displacement between each text control.
font : wx.font(), optional
The type of style you would like the text to be displayed as.
updateRadii : bool, optional
If true, this method will update the available aperture radii list for the drop down menu in the
parent frame.
boxName : string, optional
The name of the box for the current LCB set. It is displayed in the upper left hand corner.
height : int, optional
The height of the control box.
saveType : wx.FD_*, optional
The style of the box that will appear. The * represents a wild card value for different types.
'''
def __init__(self, parent, objectID, parent2=None, name='', buttonLabel="Browse", multFiles=False, rowNum=1, colNum=3,
vNum=0, hNum=0, font=wx.NORMAL_FONT, updateRadii=False, boxName="", height=20, saveType=wx.FD_OPEN):
wx.Panel.__init__(self,parent,objectID)
box1 = wx.StaticBox(self, -1, boxName)
box1.SetFont(font)
sizer = wx.StaticBoxSizer(box1, wx.VERTICAL)
self.parent = parent2
self.messageFrame = False
self.IP = wx.Frame
self.boxList = {}
self.buttonList = {}
sizer0 = wx.FlexGridSizer(rows=rowNum, cols=colNum, vgap=vNum, hgap=hNum)
sizer.Add(sizer0, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
iterationNumber = 0
extraName = ""
if name == "mainGUI":
extraName = "mainGUI"
name = "Path to Dark Frames: ,Path to Master Flat: ,Path to Data Images: ,Path to Regions File: ," + \
"Output Path: "
for eachName in name.split(","):
if sys.platform != "win32":
if eachName == "Path to Dark Frames: " or eachName == "Path to Data Images: ":
height = 35
else:
height = 25
if eachName == "Path to Dark Frames: " or eachName == "Path to Data Images: " or eachName == "Path to "+\
"Regions File: ":
if extraName == "mainGUI":
multFiles = True
saveType = None
elif eachName == "Path to Master Flat: ":
multFiles = False
saveType = wx.FD_OPEN
elif eachName == "Output Path: ":
multFiles = False
saveType = wx.FD_SAVE
iterationNumber += 1
if eachName == 'planet':
self.label = wx.StaticText(self, -1, "Planet Name", style=wx.ALIGN_CENTER)
self.label.SetFont(font)
self.boxList[iterationNumber] = wx.TextCtrl(self, -1, value='GJ 1214 b', style=wx.TE_RICH)
self.boxList[iterationNumber].SetToolTipString("Enter the name of a planet from the" +\
"exoplanet.org database here.")
else:
self.label = wx.StaticText(self, -1, eachName, style=wx.ALIGN_CENTER)
self.label.SetFont(font)
self.boxList[iterationNumber] = wx.TextCtrl(self, -1, size=(500,height), style=wx.TE_RICH)
sizer0.Add(self.label, 0, wx.ALIGN_CENTRE|wx.ALL, 3)
sizer0.Add(self.boxList[iterationNumber], 0, wx.ALIGN_CENTRE|wx.ALL, 0)
if eachName == 'planet':
self.updateButton = wx.Button(self, -1, "Update Parameters")
sizer0.Add(self.updateButton,0,wx.ALIGN_CENTER|wx.ALL,0)
else:
if sys.platform != 'win32':
if buttonLabel == "Browse\t (Cntrl-O)":
buttonLabel = "Browse\t("+u'\u2318'"-O)"
self.buttonList[iterationNumber] = wx.Button(self, -1, buttonLabel)
else:
self.buttonList[iterationNumber] = wx.Button(self, -1, buttonLabel)
self.buttonList[iterationNumber].Bind(wx.EVT_BUTTON, lambda event, lambdaIter = iterationNumber,
lambdaMult = multFiles, lambdaSave = saveType:
self.browseButtonEvent(event, "Choose Path(s) to File(s)",self.boxList[lambdaIter], lambdaMult,
lambdaSave, update=updateRadii))
sizer0.Add(self.buttonList[iterationNumber],0,wx.ALIGN_CENTRE|wx.ALL,0)
self.SetSizer(sizer)
sizer.Fit(self)
def browseButtonEvent(self, event, message, textControl, fileDialog, saveDialog, update=False):
'''
This method defines the `browse` function for selecting a file on any OS.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
message : string
The message that tells the user what to choose.
textControl : wx.TextCtrl
The box in the frame that will be refreshed with the files that are chosen by the user.
fileDialog : bool
If true, the style is wx.FD_MULTIPLE, otherwise it is the same as the `saveDialog`.
saveDialog : wx.FD_*
The style of the box that will appear. The * represents a wild card value for different types.
update : bool, optional
This will update the aperture radii list for a selected file in the parent frame if true.
'''
if not fileDialog:
dlg = wx.FileDialog(self, message = message, style = saveDialog)
else:
dlg = wx.FileDialog(self, message = message, style = wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
if saveDialog == wx.SAVE:
filenames = [dlg.GetPath()]
else:
filenames = dlg.GetPaths()
textControl.Clear()
for i in range(0,len(filenames)):
if i != len(filenames)-1:
textControl.WriteText(filenames[i] + ',')
else:
textControl.WriteText(filenames[i])
if update == True:
try:
if self.parent.validityCheck(throwException = False):
self.parent.radiusList.Clear()
self.parent.data = IO.load(self.parent.box.boxList[1].GetValue())
self.parent.apertureRadii = np.empty_like(self.parent.data.apertureRadii)
self.parent.apertureRadii[:] = self.parent.data.apertureRadii
radiiString = [str(x) for x in self.parent.data.apertureRadii]
for string in radiiString:
self.parent.radiusList.Append(string)
self.parent.radiusList.SetValue(radiiString[0])
except AttributeError:
self.IP = InvalidParameter("", self, -1, stringVal="oldPKL")
dlg.Destroy()
class ScanParamsBox(wx.Panel):
'''
This is the box that is used in the GraphFrame class for an interactive light curve plot.
'''
def __init__(self,parent,objectID):
'''
This is the initialization of the box. It has four controls: bin size, title, x-axis label,
and y-axis label.
'''
wx.Panel.__init__(self,parent,objectID)
self.messageFrame = False
self.IP = wx.Frame
box1 = wx.StaticBox(self, -1, "Descriptive information")
sizer = wx.StaticBoxSizer(box1, wx.VERTICAL)
self.userinfo = {}
sizer0 = wx.FlexGridSizer(rows=2, cols=4)
sizer.Add(sizer0, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
for (widget,label,ToolTip) in [
('bin',"Bin Size:",
'Enter a bin number here.'),
('title',"Title:",
'Enter a name for the title here.'),
('xlabel',"X-Axis Name:",
'Enter a name for the X-Axis here.'),
('ylabel',"Y-Axis Name:",
'Enter a name for the Y-Axis here.')
]:
label = wx.StaticText(self, -1, label, style=wx.ALIGN_CENTER)
sizer0.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 3)
if widget == 'bin':
self.userinfo[widget] = wx.TextCtrl(self, -1,value='10')
elif widget == 'xlabel':
self.userinfo[widget] = wx.TextCtrl(self, -1,value='Time (JD)')
elif widget == 'ylabel':
self.userinfo[widget] = wx.TextCtrl(self, -1,value='Relative Flux')
elif widget == 'title':
self.userinfo[widget] = wx.TextCtrl(self, -1,value='Light Curve')
self.userinfo[widget].SetToolTipString(ToolTip)
sizer0.Add(self.userinfo[widget], 0, wx.ALIGN_CENTRE|wx.ALL, 0)
self.SetSizer(sizer)
sizer.Fit(self)
self.oldNum = self.userinfo['bin'].GetValue()
self.newNum = self.userinfo['bin'].GetValue()
self.oldX = str(self.userinfo['xlabel'].GetValue())
self.newX = str(self.userinfo['xlabel'].GetValue())
self.oldY = str(self.userinfo['ylabel'].GetValue())
self.newY = str(self.userinfo['ylabel'].GetValue())
self.oldtitle = str(self.userinfo['title'].GetValue())
self.newtitle = str(self.userinfo['title'].GetValue())
self.max = 100
def boxCorrect(self):
'''
This method checks to make sure that the user input for bin size is a number
as well as greater than the miniumum bin size of 5. The maximum bin size depends
on the light curve that was loaded.
'''
if self.userinfo['bin'].GetValue() == '':
self.IP = InvalidParameter(self.userinfo['bin'].GetValue(), self, -1, secondValue=str(self.max))
return False
else:
try:
self.var = int(self.userinfo['bin'].GetValue())
except ValueError:
self.IP = InvalidParameter(self.userinfo['bin'].GetValue(), self, -1, secondValue=str(self.max))
return False
if int(self.userinfo['bin'].GetValue()) <= 4 or int(self.userinfo['bin'].GetValue()) > self.max:
self.IP = InvalidParameter(self.userinfo['bin'].GetValue(), self,-1, secondValue=str(self.max))
return False
else:
return True
def boxDiff(self):
'''
This method will determine if a new plot needs to be made or not.
Returns
-------
literal : bool
If true, one of the four parameters for this box was changed, and a new plot needs to be made. If
no change has been made then it returns false.
'''
if not self.oldNum == self.newNum:
self.oldNum = self.newNum
return True
elif not self.oldX == self.newX:
self.oldX = self.newX
return True
elif not self.oldY == self.newY:
self.oldY = self.newY
return True
elif not self.oldtitle == self.newtitle:
self.oldtitle = self.newtitle
return True
else:
return False
def update(self):
'''
Before checking if a parameter has been changed using the above boxDiff() method, this method
updates the current values of each control to be checked against the old values.
'''
self.newNum = self.userinfo['bin'].GetValue()
self.newX = self.userinfo['xlabel'].GetValue()
self.newY = self.userinfo['ylabel'].GetValue()
self.newtitle = self.userinfo['title'].GetValue()
def setMax(self, length):
'''
Sets the maximum bin size for the plot.
Parameters
----------
length : int
Number for the max bin size.
'''
self.max = length
class InvalidParameter(wx.Frame):
'''
This class is universally used throughout the code to relay any pop-up messages
to the user.
'''
def __init__(self, message, parent, objectID, stringVal='', secondValue='0', columns=2):
'''
This is the initialization of the popup message. It varies greatly depending on what
the user needs to see.
Parameters
----------
message : string
Usually the invalid value that was entered by the user somewhere. Left blank if
instead of an error, a message just needs to be seen by the user.
parent : window
The parent class that this frame will open up from and is associated with.
objectID : int
The identity number of the object.
stringVal : string, optional
This is the string that is used to determine what type of message will appear in the frame
that pops up.
secondValue : string, optional
If a second value needs to be displayed besides `message`, this is where it is entered.
columns : int, optional
The number of columns that this frame will have.
Notes
-----
There is no return, but on successful completion of initialization a window will pop up
with a message for the user.
'''
if sys.platform == "win32":
wx.Frame.__init__(self, parent, objectID, 'Invalid Parameter', size = (500,110))
else:
wx.Frame.__init__(self, parent, objectID, 'Invalid Parameter', size = (500,100))
self.create_menu()
self.Bind(wx.EVT_CHAR_HOOK, self.onCharOkay)
self.parent = parent
if self.parent.messageFrame == True:
pass
else:
self.parent.messageFrame = True
if stringVal == "params":
self.SetTitle("Updated Parameters")
self.Bind(wx.EVT_CHAR_HOOK, self.onOkay)
elif stringVal == "ds9":
self.SetTitle("DS9 Error")
elif stringVal == "fitOpen":
self.SetTitle("Fitting Frame Open Error")
elif stringVal == "warnError":
self.SetTitle("Warning about local times!")
elif stringVal == "regionsUpdate":
self.SetTitle("Regions File Set Added!")
elif stringVal == "setExists":
self.SetTitle("Set Exists!")
self.panel = wx.Panel(self)
self.string = "invalid"
if secondValue != '0':
self.string = "The bin size must be between 5 and "+ secondValue +"."
if stringVal == "Rp/Rs":
self.string = "The value for Rp over Rs must be between 0 and 1."
elif stringVal == "a/Rs":
self.string = "The value for A over Rs must be greater than 1."
elif stringVal == "inc":
self.string = "The value for the inclincation must be between 0 and 90."
elif stringVal == "t0":
self.string = "The value for the mid-transit time, t0, must be greater than 0."
elif stringVal == "gamma1":
self.string = "The value entered for gamma1 must be a number."
elif stringVal == "gamma2":
self.string = "The value entered for gamma2 must be a number."
elif stringVal == "gamma":
self.string = "The value for Gamma1 + Gamma2 must be less than or equal to 1."
elif stringVal == "per":
self.string = "The value for the period must be greater than 0."
elif stringVal == "ecc":
self.string = "The value for the eccentricity must be between 0 and 1."
elif stringVal == "pericenter":
self.string = "The value for the pericenter must be greater than or equal to 0."
elif stringVal == "planet":
self.string = "The name of the planet does not exist in the database."
elif stringVal == "limbdark":
self.string = "The parameter for Limb-Darkening must be either 'False', 'linear', or 'quadratic'."
elif stringVal == "saveiteration":
self.string = "The iterative step to be saved must be greater than or equal to 5."
elif stringVal == "acceptance":
self.string = "The acceptance rate must be greater than 0."
elif stringVal == "burnfrac":
self.string = "The burn number must be greater than 0 and less than or equal to 1."
elif stringVal == "number":
self.string = "The number of total steps must be greater than or equal to 10."
elif stringVal == "mod":
self.string = "The iterative step to be saved cannot be greater than the total number of steps."
elif stringVal == "flat1":
self.string = "The path(s) to flat images must be fixed."
elif stringVal == "flat2":
self.string = "The path(s) to dark flat images must be fixed."
elif stringVal == "flat3":
self.string = "The path to save the master flat must be fixed."
elif stringVal == "fits":
self.string = "One or more of the files in " + secondValue + " need to be fixed."
elif stringVal == "master":
self.string = "Either more than one file has been entered, or the file entered needs to be fixed in the " + \
secondValue + "."
elif stringVal == "output":
self.string = "Either you entered a directory, or the specified path cannot be made for the " + secondValue + \
"."
elif stringVal == "leftbox":
self.string = "Please enter a number for the " + secondValue + "."
elif stringVal == "dateTime":
self.string = "Please check the format and values entered for the ingress or egress " + secondValue + ".\n"
if secondValue == "date":
self.string += "The year must be within 100 years of today, the month must be between 1 and 12\nand" +\
" the day must be between 1 and 31."
elif secondValue == "time":
self.string += "The hour must be between 0 and 23, while both the minutes and seconds must be between"+\
" 0 and 59.\nThe format is hh:mm:ss."
elif stringVal == "obsName" or stringVal == "obsFile":
self.string = "The observatory name or file name must be fixed."
elif stringVal == "logicalDate":
self.string = "The starting date must come before the ending date."
elif stringVal == "logicalTime":
self.string = "The starting time must come before the ending time when the dates are equal."
elif stringVal == "obsDate":
self.string = "The starting date and ending date both need to be in the format YYYY/MM/DD with integers."
elif stringVal == "dateRange":
self.string = "The year must be within 100 years of today, the month must be between 1 and 12,\nand the"+\
" day must be between 1 and 31."
elif stringVal == "coordRange":
self.string = "The latitude must be between 90 and -90 degrees, while the longitude must be \nbetween "+\
"0 and 180 degrees. Both must have min and sec in between 0 and 59."
elif stringVal == "coordTime":
self.string = "The longitude and latitude must be in the format Deg:Min:Sec with numbers."
elif stringVal == "tempElevNum":
if secondValue == "apparent magnitude upper limit":
self.string = "The " + secondValue + " must be a number."
else:
self.string = "The " + secondValue + " must be a number greater than or equal to 0."
elif stringVal == "twilight":
self.string = "The twilight must be -6, -12, or -18. Please select one from the drop down menu."
elif stringVal == "lowerElevation":
self.string = "The lower elevation limist needs to be in the format Deg:Min:Sec, "+\
"with min and sec\nbetween 0 and 59. The degrees must be between 0 and 90."
elif stringVal == "radiusNum":
self.string = "The aperture radii values must be numbers."
elif stringVal == "radiusEqual":
self.string = "The min and max aperture radii cannot be equal."
elif stringVal == "radiusStep":
self.string = "The aperture radii step size cannot be smaller than the difference between the maximum\n" + \
"radius and the minimum radius. The format for this is \"min, max, stepsize\"."
elif stringVal == "radiusLogic":
self.string = "The minimum aperture radius must be smaller than the maximum. None of the 3 parameters\n" + \
"can be equal to 0."
elif stringVal == "radiusLogic2":
self.string = "None of the aperture radii can be equal to 0."
elif stringVal == "radiusError":
self.string = "The radius you entered was empty or not a number. Please enter a valid number."
elif stringVal == "radiusListError":
if secondValue == "etdError":
self.string = "The conversion method here depends on the aperture radii list from the .pkl file. You\n" + \
"must update the radii list to continue."
else:
self.string = "The plotting methods rely on the aperture radii list from the .pkl file. You\n" + \
"must update the radii list to continue."
elif stringVal == "radiusListError2":
self.string = "The radius you entered was not in the aperture radii list for this .pkl file.\n" + \
"Please pick a radius from the approved radii in the drop down menu."
elif stringVal == "utZone":
self.string = "The time zone must be between -12 and 12. Please choose one from the drop down menu."
elif stringVal == "regionsError1":
self.string = "Either the regions file or reference file for this set is empty. You cannot add an " + \
"extra\nregions file without a referenced data image."
elif stringVal == "regionsError2":
self.string = "You have entered a filename that does not exist or more than one file. There can " + \
"only be one regions file\nand one reference file entered at a time for a set."
elif stringVal == "regionsError3":
self.string = "The regions file must be a valid .reg file."
elif stringVal == "regionsError4":
self.string = "The reference file must be a valid .fits or .fit file."
elif stringVal == "emptyReg":
self.string = "You must enter a regions file. If you wish you can enter additional sets of regions " + \
"files\nafter at least one has been entered."
elif stringVal == "invalidReg":
self.string = "This regions file was not found, or is not a vaild .reg file."
elif stringVal == "invalidRef":
self.string = "This reference file was not found, or is not a valid .fits or .fit file."
elif stringVal == "invalidRefExist":
self.string = "This reference file was not found in the list of data images. Please add it to the list of" + \
"data images and try again."
elif stringVal == "outofbounds":
self.string = "You must enter extra regions files as sets with a reference file. The format is " + \
"\"regionsFiles,referenceFile;\"."
elif stringVal == "referenceImageDup":
self.string = "The reference image you have listed in this set is already assigned to another regions file."
elif stringVal == "emptyKeyword":
self.string = "The exposure time keyword cannot be empty. Please use a valid phrase, or choose from " + \
"the drop down menu."
elif stringVal == "invalidKeyword":
self.string = "The keyword you entered was not found in the header of the first data image."
elif stringVal == "emailKeyword":
self.string = "This keyword is in the header file of the first data image, but is not something we " + \
"have a conversion method for.\nPlease email us the keyword you are trying to use and we " + \
"will include it into our list of possible keywords."
elif stringVal == "saveLocation":
self.string = "Either you entered a directory, or the specified path cannot be made to save the results " + \
"of MCMC in a text file."
elif stringVal == "regionsDup":
self.string = "The regions file that you have entered is already assigned to another reference image."
self.okButton = wx.Button(self.panel,label = "Okay", pos = (125,30))
self.Bind(wx.EVT_BUTTON, self.onOkay, self.okButton)
if stringVal == "path":
self.text = wx.StaticText(self.panel, -1, "The following is an invalid output path: " + message)
elif stringVal == "params":
self.text = wx.StaticText(self.panel, -1, "The appropriate parameters have been updated.")
elif stringVal == "ds9":
self.Bind(wx.EVT_WINDOW_DESTROY, self.ds9Error)
self.text = wx.StaticText(self.panel, -1,
"It seems that ds9 may not have installed correctly, please try again.")
elif stringVal == "importError":
self.text = wx.StaticText(self.panel, -1, "Failed to import ephem, please try again.")
elif stringVal == "fitOpen":
self.Bind(wx.EVT_WINDOW_DESTROY, self.fitError)
self.text = wx.StaticText(self.panel, -1, "Please close the fitting frame window and try again.")
elif stringVal == "warnError":
self.Bind(wx.EVT_WINDOW_DESTROY, self.parent.calculate)
self.text = wx.StaticText(self.panel, -1, "Please be careful. The local times are calculated using " + \
"PyEphem's ephem.localtime(\"input\") method. Make sure\nthat this method " + \
"produces the correct local time for yourself. If you don't know how to check " + \
"this, please refer\nto the documentation from the help menu in the main frame. " + \
"This message is shown once per GUI session,\nand will run the calculations " + \
"for the current parameters as soon as you close this window.")
elif stringVal == "oldPKL":
self.text = wx.StaticText(self.panel, -1, "This seems to be an outdated .pkl file, sorry. Try creating" + \
" a new .pkl file from the main frame and try again.\nIf this .pkl file is" + \
" important and cannot be recreated, talk to our developers for information on" + \
" how to extract \nthe data from the file.")
elif stringVal == "regionsUpdate":
self.text = wx.StaticText(self.panel, -1, "This set has been added to the list of regions sets "+ \
"in the main GUI.")
elif stringVal == "setExists":
self.text = wx.StaticText(self.panel, -1, "The set you are trying to add is already there! " + \
"Please add a different set.")
elif stringVal == "upToDate":
self.Title = "Up To Date"
self.text = wx.StaticText(self.panel, -1, "The version of " \
"OSCAAR that you have is currently " \
"up to date!\n\nYour version is from "\
"commit: \n" + oscaar.__sha__ )
elif stringVal == "newCommit":
self.Title = "New Commit Available!"
self.text = wx.StaticText(self.panel, -1, "The current vers" \
"ion that you have is out of date. " \
"Please visit our GitHub page at "\
"\n http://www.github.com/OSCAAR/"\
"OSCAAR\nand retrieve the latest "\
"commit.\n\nYour version is from "\
"commit: \n" + oscaar.__sha__)
elif stringVal == "installAgain":
self.Title = "Error"
self.text = wx.StaticText(self.panel, -1, "There seems to be an outdated __init__ file. Please"\
" reinstall OSCAAR to use this update function.")
elif stringVal == "noInternetConnection":
self.Title = "Error"
self.text = wx.StaticText(self.panel, -1, "An internet"\
" connection is needed to access this function, "\
"no connection is detected.\n\nPlease check your "\
"connection and try again.")
elif stringVal == "successfulConversion":
self.Title = "Conversion Completed"
self.text = wx.StaticText(self.panel, -1, "A file that the Czech ETD will accept has been created!")
else:
self.text = wx.StaticText(self.panel, -1, self.string +"\nThe following is invalid: " + message)
self.sizer0 = wx.FlexGridSizer(rows=2, cols=columns)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(self.sizer0,0, wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.text,0,wx.ALIGN_CENTER|wx.ALL,5)
self.sizer0.Add(self.okButton,0,wx.ALIGN_CENTER|wx.ALL,5)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
self.panel.SetSizer(self.hbox)
self.hbox.Fit(self)
self.Center()
self.Show()
def ds9Error(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.ds9Open = False
def fitError(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.loadFitError = False
def create_menu(self):
'''
This method creates the menu bars that are at the top of the InvalidParameter frame.
Notes
-----
This method has no input or return parameters. It will simply be used as self.create_menu()
when in the initialization method for an instance of this frame.
'''
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_exit = menu_file.Append(-1, "Exit", "Exit")
self.Bind(wx.EVT_MENU, self.onOkay, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def onCharOkay(self,event):
'''
This method allows for users on a Mac to close the InvalidParameter frame by just pressing the
enter key when it pops up.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.keycode = event.GetKeyCode()
if self.keycode == wx.WXK_RETURN:
self.Destroy()
def onOkay(self, event):
'''
This method defines the action quit from the menu. It closes the frame. In this class it also
defines what happens when the user clicks the ok button.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.Destroy()
def onDestroy(self, event):
'''
Whenever this frame is closed, this secondary method updates a variable in the parent
class to make sure that it knows there is no active instance of this frame.
Parameters
----------
event : wx.EVT_*
A wxPython event that allows the activation of this method. The * represents a wild card value.
'''
self.parent.messageFrame = False
def checkParams(self, tupleList):
'''
This method checks to make sure that all of the parameters and values that are in
`tupleList` are valid for the MCMC and LeastSquaresFit classes.
Parameters
----------
tupleList : array
The input is an array of tuples in the form: (int,string).
Returns
-------
literal : bool
True if all of the parameters required to run MCMC or LeastSquaresFit are valid,
false otherwise.
'''
self.tempGamma1 = -1
self.tempGamma2 = -1
self.tempSaveIteration = -1
self.tempNumber = -1
for (number,string) in tupleList:
if number == '':
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
else:
try:
if string !="limbdark":
self.tmp = float(number)
except ValueError:
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
if string == "Rp/Rs":
if float(number)>1 or float(number)<0:
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
if string == "a/Rs":
if float(number) <= 1:
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
if string == "per":
if float(number) < 0:
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
if string == "inc":
if float(number) < 0 or float(number) > 90:
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
if string == "t0":
if float(number) < 0:
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
if string == "ecc":
if float(number) < 0 or float(number) > 1:
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
if string == "pericenter":
if float(number) < 0:
self.IP = InvalidParameter(number, self,-1, stringVal=string)
return False
if string == "limbdark":
if (number != "False"):
if (number != "linear"):
if(number != "quadratic"):
self.IP = InvalidParameter(number,self,-1,stringVal=string)
return False
if string == 'gamma1':
self.tempGamma1 = number
if string == 'gamma2':
self.tempGamma2 = number
if string == "saveiteration":
self.tempSaveIteration = float(number)
if float(number) < 5:
self.IP = InvalidParameter(number,self,-1,stringVal=string)
return False
if string == "number":
self.tempNumber = float(number)
if float(number) < 10:
self.IP = InvalidParameter(number,self,-1,stringVal=string)
return False
if string == "acceptance":
if float(number) <= 0:
self.IP = InvalidParameter(number,self,-1,stringVal=string)
return False
if string == "burnfrac":
if float(number) > 1 or float(number) <= 0:
self.IP = InvalidParameter(number,self,-1,stringVal=string)
return False
if(self.tempNumber != -1) and (self.tempSaveIteration != -1):
if (self.tempNumber % self.tempSaveIteration) != 0:
tempString = str(self.tempSaveIteration)+" < "+str(self.tempNumber)
self.IP = InvalidParameter(tempString,self,-1,stringVal="mod")
return False
self.totalGamma = float(self.tempGamma1) + float(self.tempGamma2)
self.totalString = str(self.totalGamma)
if self.totalGamma > 1:
self.IP = InvalidParameter(self.totalString, self,-1, stringVal="gamma")
return False
return True
###################
#This Runs The GUI#
###################
def main():
'''
This allows oscaarGUI to be imported without
automatically opening the frame every time.
'''
pass
if __name__ == "oscaar.oscaarGUI" or __name__ == "__main__":
'''
If oscaarGUI is imported through oscaar, or if it is run
as a standalone program, the frame will open.
'''
app = wx.App(False)
OscaarFrame(parent=None, objectID=-1)
app.MainLoop()
main()
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.