repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
henrykironde/scikit-learn
|
examples/model_selection/plot_underfitting_overfitting.py
|
230
|
2649
|
"""
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
|
bsd-3-clause
|
mhvk/astropy
|
astropy/wcs/wcsapi/low_level_api.py
|
5
|
15733
|
import os
import abc
import numpy as np
__all__ = ['BaseLowLevelWCS', 'validate_physical_types']
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
return self.pixel_to_world_values(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
pixel_arrays = self.world_to_pixel_values(*world_arrays)
if self.pixel_n_dim == 1:
pixel_arrays = (pixel_arrays,)
else:
pixel_arrays = pixel_arrays[::-1]
array_indices = tuple(np.asarray(np.floor(pixel + 0.5), dtype=np.int_) for pixel in pixel_arrays)
return array_indices[0] if self.pixel_n_dim == 1 else array_indices
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in
order to get numerical values. Alternatively, this argument can be a
callable Python object that taks a high-level coordinate object and
returns the numerical values suitable for passing to the low-level
WCS transformation methods.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements or four elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The third tuple element must be a dictionary with the keyword
arguments required to initialize the class.
* Optionally, for advanced use cases, the fourth element (if present)
should be a callable Python object that gets called instead of the
class and gets passed the positional and keyword arguments. It should
return an object of the type of the first element in the tuple.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`
``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def pixel_axis_names(self):
"""
An iterable of strings describing the name for each pixel axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized.
"""
return [''] * self.pixel_n_dim
@property
def world_axis_names(self):
"""
An iterable of strings describing the name for each world axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized. For standardized axis types, see
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.
"""
return [''] * self.world_n_dim
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that
indicates using booleans whether a given world coordinate depends on a
given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence
of any further information. For completely independent axes, the
diagonal would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes. With this method, one can
do::
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {'wcs': self}
UCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt')
with open(UCDS_FILE) as f:
VALID_UCDS = set([x.strip() for x in f.read().splitlines()[1:]])
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard
"""
for physical_type in physical_types:
if (physical_type is not None and
physical_type not in VALID_UCDS and
not physical_type.startswith('custom:')):
raise ValueError(
f"'{physical_type}' is not a valid IOVA UCD1+ physical type. "
"It must be a string specified in the list (http://www.ivoa.net/documents/latest/UCDlist.html) "
"or if no matching type exists it can be any string prepended with 'custom:'."
)
|
bsd-3-clause
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Multiple_Contact_Classification/continuous_hmm_prediction_two_objects_force_20_states.py
|
1
|
11013
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/384')
from data_384 import Fmat_original
# Define features
def feature_vector_diff_2_objs(Zt1,Zt2,i):
data_matrix = np.array([0,0,0])
n = i+121
while (i < n):
data_instant = np.array([Zt1[i,3],Zt1[i,4],Zt2[i,1]])
data_matrix = np.row_stack([data_matrix, data_instant])
i = i+1
Fvec_a = np.matrix(data_matrix[1:,0]).T
Fvec_b = np.matrix(data_matrix[1:,1]).T
Fvec_c = np.matrix(data_matrix[1:,2]).T
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((20,1)))
sigma = np.matrix(np.zeros((20,1)))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((20,n)))
DIVS = m/20
for i in range(n):
index = 0
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
Fmat = Fmat_original
# First_Object
ta_no_fo_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_data_black_foam_blue_cup_fixed_first_object_trial_5.pkl')
fa_no_fo_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_tracking_data_black_foam_blue_cup_fixed_first_object_trial_5.pkl')
# Second_Object
ta_no_so_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_data_black_foam_blue_cup_fixed_second_object_trial_5.pkl')
fa_no_so_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_tracking_data_black_foam_blue_cup_fixed_second_object_trial_5.pkl')
# Creating Feature Vector
Fmat2 = np.matrix(np.zeros((363,2)))
Fmat2[:,0] = feature_vector_diff_2_objs(ta_no_fo_t1,fa_no_fo_t1,0)
Fmat2[:,1] = feature_vector_diff_2_objs(ta_no_so_t1,fa_no_so_t1,0)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf,sigma_rf = feature_to_mu_sigma(Fmat[0:121,0:35])
mu_rm,sigma_rm = feature_to_mu_sigma(Fmat[0:121,35:70])
mu_sf,sigma_sf = feature_to_mu_sigma(Fmat[0:121,70:105])
mu_sm,sigma_sm = feature_to_mu_sigma(Fmat[0:121,105:140])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = np.zeros((20,2))
B_rm = np.zeros((20,2))
B_sf = np.zeros((20,2))
B_sm = np.zeros((20,2))
for num_states in range(20):
B_rf[num_states,0] = mu_rf[num_states]
B_rf[num_states,1] = sigma_rf[num_states]
B_rm[num_states,0] = mu_rm[num_states]
B_rm[num_states,1] = sigma_rm[num_states]
B_sf[num_states,0] = mu_sf[num_states]
B_sf[num_states,1] = sigma_sf[num_states]
B_sm[num_states,0] = mu_sm[num_states]
B_sm[num_states,1] = sigma_sm[num_states]
B_rf = B_rf.tolist()
B_rm = B_rm.tolist()
B_sf = B_sf.tolist()
B_sm = B_sm.tolist()
# pi - initial probabilities per state
pi = [0.05] * 20
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
# For Training
total_seq = Fmat[0:121,:]
total_seq2 = Fmat2[0:121,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
train_seq_rf = (np.array(total_seq[:,0:35]).T).tolist()
train_seq_rm = (np.array(total_seq[:,35:70]).T).tolist()
train_seq_sf = (np.array(total_seq[:,70:105]).T).tolist()
train_seq_sm = (np.array(total_seq[:,105:140]).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
print " "
print "TRAINED RIGID-FIXED MODEL"
print model_rf
print " "
print " "
print "TRAINED RIGID-MOVABLE MODEL"
print model_rm
print " "
print " "
print "TRAINED SOFT-FIXED MODEL"
print model_sf
print " "
print " "
print "TRAINED SOFT-MOVABLE MODEL"
print model_sm
print " "
print " "
# Test New Objects
test_seq_obj1 = (np.array(total_seq2[:,0]).T).tolist()
#print test_seq_obj1
test_seq_obj2 = (np.array(total_seq2[:,1]).T).tolist()
new_test_seq_obj1 = np.array(sum(test_seq_obj1,[]))
#print new_test_seq_obj1
new_test_seq_obj2 = np.array(sum(test_seq_obj2,[]))
ts_obj1 = new_test_seq_obj1
ts_obj2 = new_test_seq_obj2
final_ts_obj1 = ghmm.EmissionSequence(F,ts_obj1.tolist())
final_ts_obj2 = ghmm.EmissionSequence(F,ts_obj2.tolist())
#print final_ts_obj2
# Find Viterbi Path
path_rf_obj1 = model_rf.viterbi(final_ts_obj1)
path_rm_obj1 = model_rm.viterbi(final_ts_obj1)
path_sf_obj1 = model_sf.viterbi(final_ts_obj1)
path_sm_obj1 = model_sm.viterbi(final_ts_obj1)
path_rf_obj2 = model_rf.viterbi(final_ts_obj2)
path_rm_obj2 = model_rm.viterbi(final_ts_obj2)
path_sf_obj2 = model_sf.viterbi(final_ts_obj2)
path_sm_obj2 = model_sm.viterbi(final_ts_obj2)
obj1 = max(path_rf_obj1[1],path_rm_obj1[1],path_sf_obj1[1],path_sm_obj1[1])
obj2 = max(path_rf_obj2[1],path_rm_obj2[1],path_sf_obj2[1],path_sm_obj2[1])
print " "
if obj1 == path_rf_obj1[1]:
print "ONE OBJECT IS RIGID-FIXED"
elif obj1 == path_rm_obj1[1]:
print "ONE OBJECT IS RIGID-MOVABLE"
elif obj1 == path_sf_obj1[1]:
print "ONE OBJECT IS SOFT-FIXED"
else:
print "ONE OBJECT IS SOFT-MOVABLE"
print " "
if obj2 == path_rf_obj2[1]:
print "THE OTHER OBJECT IS RIGID-FIXED"
elif obj2 == path_rm_obj2[1]:
print "THE OTHER OBJECT IS RIGID-MOVABLE"
elif obj2 == path_sf_obj2[1]:
print "THE OTHER OBJECT IS SOFT-FIXED"
else:
print "THE OTHER OBJECT IS SOFT-MOVABLE"
print " "
|
mit
|
droundy/deft
|
papers/thesis-kirstie/figs/plot_Gaussian.py
|
1
|
1240
|
#!/usr/bin/python3
#RUN this program from the directory it is listed in
#with command ./plot_Gaussian.py
from scipy import special
import numpy as np
import matplotlib.pyplot as plt
import math
#Plot Gaussian vs rprime_magnitude at fixed gw R=0
gw=1
x=np.linspace(-4*gw, 4*gw, 20000)
#Gaussian=(1/np.sqrt(2*np.pi)*gw)*(1/np.sqrt(2*np.pi)*gw)*(1/np.sqrt(2*np.pi)*gw)*np.exp(-(rprime/np.sqrt(2)*gw)*(rprime/np.sqrt(2)*gw))
def Gaussian(x):
return np.exp(-(x/(np.sqrt(2)*gw))*(x/(np.sqrt(2)*gw)))
scale = 0.9
plt.figure(figsize=(4*scale,3*scale))
plt.xlabel('$x$')
plt.xticks([-gw,0,gw], ['$-\sigma$', '$0$', '$\sigma$'])
plt.plot([-gw, -gw], [0, Gaussian(gw)], 'k-')
plt.plot([gw, gw], [0, Gaussian(gw)], 'k-')
plt.ylim(0, 1.1)
plt.xlim(-3*gw, 3*gw)
plt.axvline(0, color='k')
plt.plot(x,Gaussian(x))
plt.yticks([0, 1], ['$0$', r'$\frac{1}{\sqrt{2\pi}\sigma}$'])
plt.axhline(1, linestyle=':', color='k')
# plt.annotate('hello', xy=(0,1), xytext=(0.5*gw,1),
# arrowprops=dict(arrowstyle="->",
# connectionstyle="arc3"),)
plt.text(-2.5*gw, 0.75, r"$\frac{1}{\sqrt{2\pi}\sigma}e^{-\frac{x^2}{2\sigma^2}}$")
plt.tight_layout()
#plt.legend()
plt.savefig("Gaussian.pdf")
# plt.show()
|
gpl-2.0
|
priyanshsaxena/techmeet
|
backup_stuff/text_classification/market_movers.py
|
1
|
1062
|
import datetime
import pandas as pd
def make_url(ticker_symbol,start_date, end_date):
base_url = "http://ichart.finance.yahoo.com/table.csv?s="
# print ticker_symbol
a = start_date
b = end_date
dt_url = '%s&a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&g=d&ignore=.csv'% (ticker_symbol, a.month-1, a.day, a.year, b.month-1, b.day,b.year)
return base_url + dt_url
def getMarketMovers(day):
L = ["aapl","axp","ba","cat","csco","cvx","ko","dd","xom",
"ge","gs","hd","ibm","intc","jnj","jpm","mcd","mmm","mrk","msft","nke","pfe","pg","trv","unh","utx","v","vz","wmt","dis"]
s = datetime.date(2017,3,day)
e = datetime.date(2017,3,day+1)
D = {}
for i in L:
u = make_url(i,s,e)
# print u
data = pd.read_csv(u).values
open_ = data[0][1]
close_ = data[0][4]
percent_change = (close_ - open_)/open_
# print data
# print percent_change
D[i] = percent_change
s = []
for key, value in sorted(D.iteritems(), key=lambda (k,v): (v,k)):
# print "%s: %s" % (key, value)
s.append([key,value])
return s
if __name__ == '__main__':
getMarketMovers(21)
|
gpl-3.0
|
Castronova/EMIT
|
models/topmodel/test/test_topmodel.py
|
2
|
10876
|
__author__ = 'tonycastronova'
import unittest
import time
import timeit
import os
import numpy
from osgeo import ogr
from models.topmodel import topmodel
from utilities.gui import parse_config
from transform.space import *
from transform.time import *
import stdlib
class test_topmodel(unittest.TestCase):
def setUp(self):
# add models
self.mdl = '../topmodel.mdl'
config_params = parse_config(self.mdl)
self.ti = config_params['model inputs'][0]['ti']
def test_initialize(self):
config_params = parse_config(self.mdl)
# load topmodel
top = topmodel.topmodel(config_params)
# check input exchange items
in_items = top.inputs()
self.assertTrue(len(in_items.keys()) == 1)
self.assertTrue('precipitation' in in_items.keys())
precip = in_items['precipitation']
# check that input geometries were created
precip_geoms = precip.getGeometries2()
self.assertTrue(len(precip_geoms) > 0)
# check input geometry type
geom_type = precip_geoms[0].GetGeometryName()
self.assertTrue(geom_type == stdlib.GeomType.POLYGON)
# check output exchange items
# out_items = top.outputs()
# self.assertTrue(len(out_items.keys()) == 1)
# self.assertTrue('streamflow' in out_items.keys())
# flow = out_items['streamflow']
# # check that output geoms exist
# flow_geoms = flow.getGeometries2()
# self.assertTrue(len(flow_geoms) > 0)
#
# # check output geometry type
# geom_type = flow_geoms[0].geom().geometryType()
# self.assertTrue(geom_type == 'LineString')
def test_geometry_parsing(self):
geoms = []
with open('./data/right_hand_fork_ti_trim.txt', 'r') as sr:
lines = sr.readlines()
nrows = int(lines[1].split(' ')[-1].strip())
lowerx = float(lines[2].split(' ')[-1].strip())
lowery = float(lines[3].split(' ')[-1].strip())
cellsize = float(lines[4].split(' ')[-1].strip())
nodata = lines[5].split(' ')[-1].strip()
# set start x, y
y = lowery + cellsize * nrows
for line in lines[6:]:
x = lowerx
l = line.strip().split(' ')
xy = []
for element in l:
if element != nodata:
xy.append((x,y))
geom = stdlib.Geometry2(ogr.wkbPoint)
geom.AddPoint(x, y)
geoms.append(geom)
x += cellsize
y -= cellsize
return geoms
def test_geometry_parsing_numpy(self):
import numpy as np
import matplotlib.pyplot as plt
import utilities.geometry
topo_input = './data/right_hand_fork_ti_trim.txt'
# plt.ion()
# plt.show()
nrows = 0
ncols = 0
cellsize = 0
lowerx = 0
lowery = 0
with open(topo_input, 'r') as sr:
lines = sr.readlines()
ncols = int(lines[0].split(' ')[-1].strip())
nrows = int(lines[1].split(' ')[-1].strip())
lowerx = float(lines[2].split(' ')[-1].strip())
lowery = float(lines[3].split(' ')[-1].strip())
cellsize = float(lines[4].split(' ')[-1].strip())
nodata = float(lines[5].split(' ')[-1].strip())
# read ti data
data = np.genfromtxt(topo_input, delimiter=' ', skip_header=6)
# build X and Y coordinate arrays
xi = np.linspace(lowerx, lowerx+ncols*cellsize, ncols)
yi = np.linspace(lowery+nrows*cellsize, lowery, nrows)
x,y = np.meshgrid(xi,yi) # generate 2d arrays from xi, yi
x = x.ravel() # convert to 1-d
y = y.ravel() # convert to 1-d
data = data.ravel() # convert to 1-d
# remove all nodata points from x, y arrays
nonzero = np.where(data != nodata)
x = x[nonzero]
y = y[nonzero]
points = utilities.geometry.build_point_geometries(x,y)
# self.create_point_shapefile(points, data)
# # return points
# X = x[::4]
# Y = y[::4]
# plt.scatter(X, Y, s=.5, edgecolors='none',color='blue')
# plt.draw()
# plt.show()
def test_execute_simulation(self):
from coordinator import engine
simulator = engine.Coordinator()
# load randomizer component
weather = simulator.add_model(id='weather', attrib={'mdl':'../../test_models/weather/weatherReader.mdl'})
# load topmodel
top = simulator.add_model(id='topmodel', attrib={'mdl':self.mdl})
# add link between randomizer and topmodel
link1 = simulator.add_link_by_name(from_id=weather['id'],
from_item_name='Precipitation',
to_id=top['id'],
to_item_name='precipitation')
# set link tranformations
link1.spatial_interpolation(SpatialInterpolation.ExactMatch)
link1.temporal_interpolation(TemporalInterpolation.NearestNeighbor)
print 'Starting Simulation'
st = time.time()
# begin execution
simulator.run_simulation()
print 'Simulation Complete \n Elapsed time = %3.2f seconds'%(time.time() - st)
def create_point_shapefile(self, point_list, data):
# Save extent to a new Shapefile
outShapefile = "/Users/tonycastronova/Documents/windows_shared/temp/check_pts.shp"
outDriver = ogr.GetDriverByName("ESRI Shapefile")
# Remove output shapefile if it already exists
if os.path.exists(outShapefile):
outDriver.DeleteDataSource(outShapefile)
# Create the output shapefile
datasource = outDriver.CreateDataSource(outShapefile)
layer = datasource.CreateLayer("points", geom_type=ogr.wkbPoint)
# Add an ID field
layer.CreateField(ogr.FieldDefn("Value", ogr.OFTInteger))
i = 0
for i in range(0, len(point_list)):
p = point_list[i]
v = data[i]
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetField("Value", v)
feature.SetGeometry(p)
layer.CreateFeature(feature)
feature.Destroy()
i+= 1
datasource.Destroy()
def test_read_topo_input(self):
# ---- begin reading the values stored in the topo file
with open(self.ti, 'r') as sr:
lines = sr.readlines()
cellsize = float(lines[4].split(' ')[-1].strip())
nodata = lines[5].split(' ')[-1].strip()
# generate topolist by parsing cell data
topoList = [item for sublist in lines[6:] for item in sublist.strip().split(' ') if item != nodata]
self._watershedArea = len(topoList) * cellsize
# ---- calculate frequency of each topographic index
# -- consolidate topo list into unique values
d = {float(i):float(topoList.count(i)) for i in set(topoList)}
# -- calculate topo frequency, then return both topographic index and topo frequency arrays
total = len(topoList)
ti = [round(k,4) for k in d.iterkeys()]
freq = [round((k/total), 10) for k in d.iterkeys()]
return ti, freq
def test_read_topo_input_optimized(self):
# read the header values in the topo file
ncols = 0
nrows = 0
lowerx = 0
lowery = 0
cellsize = 0
nodata = 0
with open(self.ti, 'r') as sr:
lines = sr.readlines()
ncols = int(lines[0].split(' ')[-1].strip())
nrows = int(lines[1].split(' ')[-1].strip())
lowerx = float(lines[2].split(' ')[-1].strip())
lowery = float(lines[3].split(' ')[-1].strip())
cellsize = float(lines[4].split(' ')[-1].strip())
nodata = float(lines[5].split(' ')[-1].strip())
# read ti data
data = numpy.genfromtxt(self.ti, delimiter=' ', skip_header=6)
topoList = data.ravel() # convert into 1-d list
topoList = topoList[topoList != nodata] # remove nodata values
watershedArea = topoList.shape[0]*cellsize # calculate watershed area
topoList = numpy.round(topoList, 4) # round topoList items
total = topoList.shape[0] # total number of element in the topoList
unique, counts = numpy.unique(topoList, return_counts=True) # get bins for topoList elements
ti = unique # topographic index list
freq = unique/total # freq of topo indices
freq = numpy.round(freq, 10) # round the frequencies
return ti, freq
def test_benchmark(self):
benchmarks = []
# print 'Benchmarking test_geometry_parsing ...',
# t = timeit.Timer(lambda: self.test_geometry_parsing())
# time = min(t.repeat(1,1))
# benchmarks.append([time, '%3.5f sec:\t\ttest_geometry_parsing' % time ])
# print 'done'
#
# print 'Benchmarking test_geometry_parsing_numpy ...',
# t = timeit.Timer(lambda: self.test_geometry_parsing_numpy())
# time = min(t.repeat(1,1))
# benchmarks.append([time, '%3.5f sec:\t\ttest_geometry_parsing_numpy' % time ])
# print 'done'
print 'Benchmarking test_read_topo_input ...',
t = timeit.Timer(lambda: self.test_read_topo_input())
time = min(t.repeat(1,1))
benchmarks.append([time, '%3.5f sec:\t\ttest_read_topo_input' % time ])
print 'done'
print 'Benchmarking test_read_topo_input_optimized ...',
t = timeit.Timer(lambda: self.test_read_topo_input_optimized())
time = min(t.repeat(1,1))
benchmarks.append([time, '%3.5f sec:\t\ttest_read_topo_input_optimized' % time ])
print 'done'
sorted_benchmarks = sorted(benchmarks,key=lambda x: x[0])
print '\n' + 36*'-'
print 'Fastest Algorithms'
print 36*'-'
for b in sorted_benchmarks:
print b[1]
def test_validate_optimizations(self):
# this will fail because the optimized method uses gdal geoms instead of shapely geoms
# g1 = self.test_geometry_parsing()
# g2 = self.test_geometry_parsing_numpy()
# self.assertTrue(len(g1) == len(g2))
# self.assertItemsEqual(g1, g2)
ti1, freq1 = self.test_read_topo_input()
ti2, freq2 = self.test_read_topo_input_optimized()
self.assertTrue(len(ti1) == len(ti2))
self.assertTrue(len(freq1) == len(freq2))
self.assertItemsEqual(ti1, ti2)
self.assertItemsEqual(freq1, freq2)
|
gpl-2.0
|
dboonz/polymode
|
setup.py
|
5
|
1794
|
#!/usr/bin/env python
from os.path import join
#Use setuptools for egg installs, if possible
import setuptools
from numpy.distutils.core import setup, Command
from Polymode import __version__
package_name = 'Polymode'
package_version = __version__
package_description ="A package for the modal analysis of microstructured optical fibers"
class generate_api_docs(Command):
"""Generate the api documentation using epydoc
"""
description = "generate the api documentation"
user_options = []
target_dir = "../documentation/api"
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
import os
os.system("epydoc --no-frames -o %s Polymode" % self.target_dir)
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=False,
)
#The module
config.add_subpackage(package_name)
#Other packages used
config.add_subpackage('Nurbs', subpackage_path='other/Nurbs')
return config
def setup_package():
setup(
name = package_name,
version = package_version,
description = package_description,
maintainer = "Andrew Docherty",
url='http://polymode.googlecode.com',
license='GPL3',
configuration = configuration,
# install_requires = ['numpy >= 1.0.1', 'scipy>=0.5.2', 'matplotlib>=0.92',],
zip_safe = True,
cmdclass = {'doc' : generate_api_docs}
)
return
if __name__ == '__main__':
setup_package()
|
gpl-3.0
|
Jozhogg/iris
|
docs/iris/example_code/General/lineplot_with_legend.py
|
18
|
1131
|
"""
Multi-line temperature profile plot
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
fname = iris.sample_data_path('air_temp.pp')
# Load exactly one cube from the given file.
temperature = iris.load_cube(fname)
# We only want a small number of latitudes, so filter some out
# using "extract".
temperature = temperature.extract(
iris.Constraint(latitude=lambda cell: 68 <= cell < 78))
for cube in temperature.slices('longitude'):
# Create a string label to identify this cube (i.e. latitude: value).
cube_label = 'latitude: %s' % cube.coord('latitude').points[0]
# Plot the cube, and associate it with a label.
qplt.plot(cube, label=cube_label)
# Add the legend with 2 columns.
plt.legend(ncol=2)
# Put a grid on the plot.
plt.grid(True)
# Tell matplotlib not to extend the plot axes range to nicely
# rounded numbers.
plt.axis('tight')
# Finally, show it.
iplt.show()
if __name__ == '__main__':
main()
|
lgpl-3.0
|
sencha/chromium-spacewalk
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
94
|
3083
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows but it is not
# clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
# run_breakpad_browser_process_crash_test is flaky.
# See http://crbug.com/317890
tests_to_disable.append('run_breakpad_browser_process_crash_test')
# See http://crbug.com/332301
tests_to_disable.append('run_breakpad_crash_in_syscall_test')
# It appears that crash_service.exe is not being reliably built by
# default in the CQ. See: http://crbug.com/380880
tests_to_disable.append('run_breakpad_untrusted_crash_test')
tests_to_disable.append('run_breakpad_trusted_crash_in_startup_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
bsd-3-clause
|
Adai0808/scikit-learn
|
benchmarks/bench_sample_without_replacement.py
|
397
|
8008
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
kaiserroll14/301finalproject
|
main/pandas/tests/test_rplot.py
|
9
|
11560
|
# -*- coding: utf-8 -*-
from pandas.compat import range
import pandas.util.testing as tm
from pandas import read_csv
import os
import nose
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def between(a, b, x):
"""Check if x is in the somewhere between a and b.
Parameters:
-----------
a: float, interval start
b: float, interval end
x: float, value to test for
Returns:
--------
True if x is between a and b, False otherwise
"""
if a < b:
return x >= a and x <= b
else:
return x <= a and x >= b
@tm.mplskip
class TestUtilityFunctions(tm.TestCase):
"""
Tests for RPlot utility functions.
"""
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
def test_make_aes1(self):
aes = rplot.make_aes()
self.assertTrue(aes['x'] is None)
self.assertTrue(aes['y'] is None)
self.assertTrue(aes['size'] is None)
self.assertTrue(aes['colour'] is None)
self.assertTrue(aes['shape'] is None)
self.assertTrue(aes['alpha'] is None)
self.assertTrue(isinstance(aes, dict))
def test_make_aes2(self):
self.assertRaises(ValueError, rplot.make_aes,
size=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
colour=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
shape=rplot.ScaleSize('test'))
self.assertRaises(ValueError, rplot.make_aes,
alpha=rplot.ScaleShape('test'))
def test_dictionary_union(self):
dict1 = {1 : 1, 2 : 2, 3 : 3}
dict2 = {1 : 1, 2 : 2, 4 : 4}
union = rplot.dictionary_union(dict1, dict2)
self.assertEqual(len(union), 4)
keys = list(union.keys())
self.assertTrue(1 in keys)
self.assertTrue(2 in keys)
self.assertTrue(3 in keys)
self.assertTrue(4 in keys)
self.assertEqual(rplot.dictionary_union(dict1, {}), dict1)
self.assertEqual(rplot.dictionary_union({}, dict1), dict1)
self.assertEqual(rplot.dictionary_union({}, {}), {})
def test_merge_aes(self):
layer1 = rplot.Layer(size=rplot.ScaleSize('test'))
layer2 = rplot.Layer(shape=rplot.ScaleShape('test'))
rplot.merge_aes(layer1, layer2)
self.assertTrue(isinstance(layer2.aes['size'], rplot.ScaleSize))
self.assertTrue(isinstance(layer2.aes['shape'], rplot.ScaleShape))
self.assertEqual(layer2.aes['size'], layer1.aes['size'])
for key in layer2.aes.keys():
if key != 'size' and key != 'shape':
self.assertTrue(layer2.aes[key] is None)
def test_sequence_layers(self):
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='SepalLength', y='SepalWidth',
size=rplot.ScaleSize('PetalLength'))
layer3 = rplot.GeomPolyFit(2)
result = rplot.sequence_layers([layer1, layer2, layer3])
self.assertEqual(len(result), 3)
last = result[-1]
self.assertEqual(last.aes['x'], 'SepalLength')
self.assertEqual(last.aes['y'], 'SepalWidth')
self.assertTrue(isinstance(last.aes['size'], rplot.ScaleSize))
self.assertTrue(self.data is last.data)
self.assertTrue(rplot.sequence_layers([layer1])[0] is layer1)
@tm.mplskip
class TestTrellis(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/tips.csv')
self.data = read_csv(path, sep=',')
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='total_bill', y='tip')
layer3 = rplot.GeomPolyFit(2)
self.layers = rplot.sequence_layers([layer1, layer2, layer3])
self.trellis1 = rplot.TrellisGrid(['sex', 'smoker'])
self.trellis2 = rplot.TrellisGrid(['sex', '.'])
self.trellis3 = rplot.TrellisGrid(['.', 'smoker'])
self.trellised1 = self.trellis1.trellis(self.layers)
self.trellised2 = self.trellis2.trellis(self.layers)
self.trellised3 = self.trellis3.trellis(self.layers)
def test_grid_sizes(self):
self.assertEqual(len(self.trellised1), 3)
self.assertEqual(len(self.trellised2), 3)
self.assertEqual(len(self.trellised3), 3)
self.assertEqual(len(self.trellised1[0]), 2)
self.assertEqual(len(self.trellised1[0][0]), 2)
self.assertEqual(len(self.trellised2[0]), 2)
self.assertEqual(len(self.trellised2[0][0]), 1)
self.assertEqual(len(self.trellised3[0]), 1)
self.assertEqual(len(self.trellised3[0][0]), 2)
self.assertEqual(len(self.trellised1[1]), 2)
self.assertEqual(len(self.trellised1[1][0]), 2)
self.assertEqual(len(self.trellised2[1]), 2)
self.assertEqual(len(self.trellised2[1][0]), 1)
self.assertEqual(len(self.trellised3[1]), 1)
self.assertEqual(len(self.trellised3[1][0]), 2)
self.assertEqual(len(self.trellised1[2]), 2)
self.assertEqual(len(self.trellised1[2][0]), 2)
self.assertEqual(len(self.trellised2[2]), 2)
self.assertEqual(len(self.trellised2[2][0]), 1)
self.assertEqual(len(self.trellised3[2]), 1)
self.assertEqual(len(self.trellised3[2][0]), 2)
def test_trellis_cols_rows(self):
self.assertEqual(self.trellis1.cols, 2)
self.assertEqual(self.trellis1.rows, 2)
self.assertEqual(self.trellis2.cols, 1)
self.assertEqual(self.trellis2.rows, 2)
self.assertEqual(self.trellis3.cols, 2)
self.assertEqual(self.trellis3.rows, 1)
@tm.mplskip
class TestScaleGradient(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient("SepalLength", colour1=(0.2, 0.3,
0.4),
colour2=(0.8, 0.7, 0.6))
def test_gradient(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
@tm.mplskip
class TestScaleGradient2(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient2("SepalLength", colour1=(0.2, 0.3, 0.4), colour2=(0.8, 0.7, 0.6), colour3=(0.5, 0.5, 0.5))
def test_gradient2(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
r3, g3, b3 = self.gradient.colour3
value = row[self.gradient.column]
a_ = min(self.data[self.gradient.column])
b_ = max(self.data[self.gradient.column])
scaled = (value - a_) / (b_ - a_)
if scaled < 0.5:
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
else:
self.assertTrue(between(r2, r3, r))
self.assertTrue(between(g2, g3, g))
self.assertTrue(between(b2, b3, b))
@tm.mplskip
class TestScaleRandomColour(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.colour = rplot.ScaleRandomColour('SepalLength')
def test_random_colour(self):
for index in range(len(self.data)):
colour = self.colour(self.data, index)
self.assertEqual(len(colour), 3)
r, g, b = colour
self.assertTrue(r >= 0.0)
self.assertTrue(g >= 0.0)
self.assertTrue(b >= 0.0)
self.assertTrue(r <= 1.0)
self.assertTrue(g <= 1.0)
self.assertTrue(b <= 1.0)
@tm.mplskip
class TestScaleConstant(tm.TestCase):
def test_scale_constant(self):
scale = rplot.ScaleConstant(1.0)
self.assertEqual(scale(None, None), 1.0)
scale = rplot.ScaleConstant("test")
self.assertEqual(scale(None, None), "test")
class TestScaleSize(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.scale1 = rplot.ScaleShape('Name')
self.scale2 = rplot.ScaleShape('PetalLength')
def test_scale_size(self):
for index in range(len(self.data)):
marker = self.scale1(self.data, index)
self.assertTrue(marker in ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x'])
def test_scale_overflow(self):
def f():
for index in range(len(self.data)):
self.scale2(self.data, index)
self.assertRaises(ValueError, f)
@tm.mplskip
class TestRPlot(tm.TestCase):
def test_rplot1(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['sex', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot2(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['.', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot3(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['sex', '.']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot_iris(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/iris.csv')
plt.figure()
self.data = read_csv(path, sep=',')
plot = rplot.RPlot(self.data, x='SepalLength', y='SepalWidth')
plot.add(rplot.GeomPoint(colour=rplot.ScaleGradient('PetalLength', colour1=(0.0, 1.0, 0.5), colour2=(1.0, 0.0, 0.5)),
size=rplot.ScaleSize('PetalWidth', min_size=10.0, max_size=200.0),
shape=rplot.ScaleShape('Name')))
self.fig = plt.gcf()
plot.render(self.fig)
if __name__ == '__main__':
import unittest
unittest.main()
|
gpl-3.0
|
sarahgrogan/scikit-learn
|
examples/exercises/plot_cv_diabetes.py
|
231
|
2527
|
"""
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
|
bsd-3-clause
|
Denvi/FlatCAM
|
descartes/tests.py
|
2
|
1527
|
from shapely.geometry import *
import unittest
from descartes.patch import PolygonPatch
class PolygonTestCase(unittest.TestCase):
polygon = Point(0, 0).buffer(10.0).difference(
MultiPoint([(-5, 0), (5, 0)]).buffer(3.0))
def test_patch(self):
patch = PolygonPatch(self.polygon)
self.failUnlessEqual(str(type(patch)),
"<class 'matplotlib.patches.PathPatch'>")
path = patch.get_path()
self.failUnless(len(path.vertices) == len(path.codes) == 198)
class JSONPolygonTestCase(unittest.TestCase):
polygon = Point(0, 0).buffer(10.0).difference(
MultiPoint([(-5, 0), (5, 0)]).buffer(3.0))
def test_patch(self):
geo = self.polygon.__geo_interface__
patch = PolygonPatch(geo)
self.failUnlessEqual(str(type(patch)),
"<class 'matplotlib.patches.PathPatch'>")
path = patch.get_path()
self.failUnless(len(path.vertices) == len(path.codes) == 198)
class GeoInterfacePolygonTestCase(unittest.TestCase):
class GeoThing:
__geo_interface__ = None
thing = GeoThing()
thing.__geo_interface__ = Point(0, 0).buffer(10.0).difference(
MultiPoint([(-5, 0), (5, 0)]).buffer(3.0)).__geo_interface__
def test_patch(self):
patch = PolygonPatch(self.thing)
self.failUnlessEqual(str(type(patch)),
"<class 'matplotlib.patches.PathPatch'>")
path = patch.get_path()
self.failUnless(len(path.vertices) == len(path.codes) == 198)
|
mit
|
AnishShah/tensorflow
|
tensorflow/examples/learn/iris_run_config.py
|
76
|
2565
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. tf_random_seed.
run_config = tf.estimator.RunConfig().replace(tf_random_seed=1)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3,
config=run_config)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
yonglehou/scikit-learn
|
sklearn/metrics/metrics.py
|
233
|
1262
|
import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
|
bsd-3-clause
|
datascopeanalytics/sensor_fusion
|
sensor.py
|
1
|
3722
|
import random
import os
import copy
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from scipy.stats import linregress
from utils import gaussian
def plot_linear_fit(ax, x_array, y_array, fit_function, fit_sigma, color, cmap):
xlim = (min(x_array), max(x_array))
ylim = (min(y_array), max(y_array))
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
x_range = np.linspace(*xlim)
y_range = np.linspace(*ylim)
ax.scatter(x_array, y_array, lw=0, alpha=0.5, color=color)
fit_line = [fit_function(x) for x in x_range]
ax.plot(x_range, fit_line, color=color)
xx, yy = np.meshgrid(x_range, y_range)
zz = xx + yy
for i in range(len(x_range)):
for j in range(len(y_range)):
zz[j, i] = gaussian(yy[j, i], fit_function(xx[j, i]), fit_sigma)
im = ax.imshow(
zz, origin='lower', interpolation='bilinear',
cmap=cmap, alpha=0.5, aspect='auto',
extent=(xlim[0], xlim[-1], ylim[0], ylim[-1]),
vmin=0.0, vmax=gaussian(0, 0, fit_sigma)
)
return ax, im
class Sensor(object):
def __init__(self, name, **kwargs):
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
def read(self, variable):
variable = max(0, random.gauss(variable, self.proc_sigma))
reading = variable * self.slope + self.intersect
return random.gauss(reading, self.sigma)
def fit(self, data):
self.experiment_data = copy.deepcopy(data)
n_samples = len(data)
model_slope, model_intercept = np.polyfit(
[o for o, r in data], [r for o, r in data], 1)
def model(occupants):
return occupants * model_slope + model_intercept
self.model = model
def predictor(sensor_reading):
return (sensor_reading-model_intercept)/model_slope
self.predictor = predictor
error = 0.0
for occupants, reading in data:
error += (predictor(reading) - occupants)**2
sigma = np.sqrt(error / (n_samples - 1))
self.predictor_sigma = sigma
def plot_experiment(self, path=""):
color = self.color
data = self.experiment_data
cmap = sns.light_palette(color, as_cmap=True)
fig, ax = plt.subplots()
occupants, readings = (np.array(array) for array in zip(*data))
# ax_left, im_left = plot_linear_fit(
# ax_left, occupants, readings, self.model, self.model_sigma, color,
# cmap)
ax, im = plot_linear_fit(
ax, readings, occupants,
self.predictor, self.predictor_sigma,
color, cmap
)
ax.set_xlabel("{} sensor readout ({})".format(self.name, self.units))
ax.set_ylabel("Number of train car occupants")
# cax, kw = mpl.colorbar.make_axes(
# [ax_left, ax_right], location="bottom"
# )
# norm = mpl.colors.Normalize(vmin=0, vmax=1)
# cbar = mpl.colorbar.ColorbarBase(
# ax, cmap=cmap, norm=norm, alpha=0.5)
cbar = plt.colorbar(im, alpha=0.5, extend='neither', ticks=[
gaussian(3 * self.predictor_sigma, 0, self.predictor_sigma),
gaussian(2 * self.predictor_sigma, 0, self.predictor_sigma),
gaussian(self.predictor_sigma, 0, self.predictor_sigma),
gaussian(0, 0, self.predictor_sigma),
])
# cbar.solids.set_edgecolor("face")
cbar.set_ticklabels(
['$3 \sigma$', '$2 \sigma$', '$\sigma$', '{:.2%}'.format(
gaussian(0, 0, self.predictor_sigma))],
update_ticks=True
)
fig.savefig(os.path.join(path, self.name+".svg"))
|
unlicense
|
jbloom/mapmuts
|
scripts/mapmuts_countparsedmuts.py
|
1
|
7559
|
#!python
"""Counts number of mutations that occur >= a set number of times.
Designed to analyze how extensively a library samples all mutations, and
all synonymous mutations (at the codon level).
To run, type::
mapmuts_countparsedmuts.py infile.txt
after creating the appropriate input file ``infile.txt``.
"""
import sys
import os
import time
import mapmuts.plot
import mapmuts.io
def main():
"""Main body of script."""
print "Beginning execution of mapmuts_countparsedmuts.py..."
if not mapmuts.plot.PylabAvailable():
raise ImportError("This script requires matplotlib / pylab, which are not available.")
sites = 'all'
readsomecounts = False
mapmuts.io.PrintVersions(sys.stdout)
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument"\
+ ' specifying the name of the input file.')
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile of %s" % infilename)
lines = [line for line in open(infilename).readlines() if \
line[0] != '#' and not line.isspace()]
plotfileprefix = maxn = None
legendloc = 'bottom' # default
writecounts = True
samples = [] # tuples are (name, all_counts, syn_counts)
for line in lines:
entries = line.split()
if entries[0].strip() == 'plotfileprefix':
if plotfileprefix != None:
raise ValueError("Duplicate plotfileprefix keys")
plotfileprefix = entries[1].strip()
elif entries[0].strip() == 'maxn':
if maxn != None:
raise ValueError("Duplicate maxn key")
if entries[1].strip().upper() == 'NONE':
maxn = None
else:
try:
maxn = int(entries[1])
except ValueError:
raise ValueError("maxn does not specify a valid integer: %s" % entries[1])
if maxn < 1:
raise ValueError("max must be at least one")
elif entries[0].strip() == 'sites':
if readsomecounts:
raise ValueError("You must put the line for sites BEFORE the codon counts files")
if len(entries) == 2 and (entries[1].upper() in ['ALL', 'NONE']):
pass # already set to all
elif len(entries) == 3:
sites = (int(entries[1]), int(entries[2]))
sites = [r for r in range(sites[0], sites[1] + 1)]
else:
raise ValueError("Invalid line for sites:\n" % line)
elif entries[0].strip() == 'legendloc':
legendloc = entries[1].strip().lower()
if legendloc not in ['bottom', 'right']:
raise ValueError("legendloc must be either bottom or right, got: %s" % legendloc)
elif entries[0].strip() == 'writecounts':
if entries[1].strip().upper() == 'FALSE':
writecounts = False
else:
if len(entries) < 2:
raise ValueError("Line must contain at least two entries:\n%s" % line)
name = entries[0].strip()
counts = []
for codoncountfile in entries[1 : ]:
codoncountfile = codoncountfile.strip()
if not os.path.isfile(codoncountfile):
raise IOError("Failed to find specified codon counts file of %s" % codoncountfile)
print "Reading codon counts for %s from %s" % (name, codoncountfile)
counts.append(mapmuts.io.ReadCodonCounts(open(codoncountfile)))
(all_counts, multi_nt_all_counts, syn_counts, multi_nt_syn_counts) = mapmuts.sequtils.TallyCodonCounts(counts, sites=sites)
if not all_counts:
raise ValueError("No counts for %s" % name)
if not (max(all_counts) >= max(syn_counts) >= 0):
raise ValueError("Count minima don't make sense for %s" % name)
samples.append((name, all_counts, multi_nt_all_counts, syn_counts, multi_nt_syn_counts))
readsomecounts = True
samples.sort()
if not plotfileprefix:
raise ValueError("Failed to parse a value for plotfileprefix")
if not samples:
raise ValueError("Failed to find any samples.")
# get max occurrences of any mutation if maxn not specified
if maxn == None:
maxn = max(samples[0][1])
for (name, all_counts, multi_nt_all_counts, syn_counts, multi_nt_syn_counts) in samples[1 : ]:
maxn = max(maxn, max(all_counts))
# now make cumul_samples: entries are (name, cumul_all, cumul_all_tot, cumul_multi_nt_all, cumul_multi_nt_all_tot, cumul_syn, cumul_syn_tot, cumul_multi_nt_syn, cumul_multi_nt_syn_tot)
# where cumul_all[n] is the fraction that have >= n occurrences
# where cumul_all_tot is total number of mutations in this category
cumul_samples = []
for (name, all_counts, multi_nt_all_counts, syn_counts, multi_nt_syn_counts) in samples:
this_tuple = [name]
for i_list in [all_counts, multi_nt_all_counts, syn_counts, multi_nt_syn_counts]:
i_cumul = []
i_d = {}
for x in i_list:
if x in i_d:
i_d[x] += 1
else:
i_d[x] = 1
ntot = float(len(i_list))
nge = ntot
for n in range(0, maxn):
i_cumul.append(nge / ntot)
if n in i_d:
nge -= i_d[n]
i_cumul.append(nge / ntot)
this_tuple.append(i_cumul)
this_tuple.append(ntot)
assert len(i_cumul) == maxn + 1, "len(i_cumul) = %d, maxn = %d" % (len(i_cumul), maxn)
cumul_samples.append(tuple(this_tuple))
# write the text files and make the plots
cumulfracs = {}
counts = {}
for (tuple_index, mut_type) in [(1, 'all'), (3, 'multi-nt-all'), (5, 'syn'), (7, 'multi-nt-syn')]:
# the text file
fname = '%s_%scodonmutcounts.txt' % (plotfileprefix, mut_type)
print "Now writing file %s..." % fname
f = open(fname, 'w')
f.write('# File listing the fraction of %s mutations that are found greater than or equal to n times.\n' % mut_type)
f.write('# There are %d total %s mutations\n' % (cumul_samples[0][tuple_index + 1], mut_type))
f.write('#n\t%s\n' % ('\t'.join([tup[0] for tup in cumul_samples])))
for n in range(0, maxn + 1):
f.write('%d' % n)
for tup in cumul_samples:
f.write('\t%.4f' % tup[tuple_index][n])
f.write('\n')
f.close()
counts[mut_type] = cumul_samples[0][tuple_index + 1]
names = [tup[0] for tup in cumul_samples]
cumulfracs[mut_type] = [tup[tuple_index] for tup in cumul_samples]
plotfile = "%s_multi-nt-codonmutcounts.pdf" % plotfileprefix
print "Now creating plot %s..." % plotfile
mapmuts.plot.PlotMutCountFracs(plotfile, 'Multi-nucleotide codon mutations', names, cumulfracs['multi-nt-all'], cumulfracs['multi-nt-syn'], counts['multi-nt-all'], counts['multi-nt-syn'], legendloc, writecounts=writecounts)
plotfile = "%s_codonmutcounts.pdf" % plotfileprefix
print "Now creating plot %s..." % plotfile
mapmuts.plot.PlotMutCountFracs(plotfile, 'Codon mutations', names, cumulfracs['all'], cumulfracs['syn'], counts['all'], counts['syn'], legendloc)
print "Script complete."
if __name__ == '__main__':
main() # run the script
|
gpl-3.0
|
f3r/scikit-learn
|
sklearn/feature_selection/__init__.py
|
33
|
1159
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'SelectFromModel']
|
bsd-3-clause
|
CitizenScienceInAstronomyWorkshop/pyIBCC
|
python/tutorial/popdensity.py
|
1
|
9544
|
'''
Created on 5 May 2015
@author: edwin
'''
import logging
logging.basicConfig(level=logging.DEBUG)
import ibcc, json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def load_zoo_data(zoodatafile):
# format of file:
# user_id,user_ip,workflow_id,created_at,gold_standard,expert,metadata,annotations,subject_data
zoodata = pd.read_csv(zoodatafile, sep=',', parse_dates=False, index_col=False, usecols=[0,1,7,8],
skipinitialspace=True, quotechar='"')
userid = zoodata['user_id']
userip = zoodata['user_ip']
subjectdata = zoodata['subject_data']
annotations = zoodata['annotations']
Cagents = []
Cobjects = []
Cscores = []
for i, user in enumerate(userid):
annotation = json.loads(annotations[i])
score = annotation[0]["value"]
if score==6:
continue
else:
Cscores.append(score)
if not user or np.isnan(user):
user = userip[i]
if not user in agentids:
agentids[user] = len(agentids.keys())
Cagents.append(agentids[user])
subjectdict = json.loads(subjectdata[i])
subject = int(subjectdict.keys()[0])
if not subject in subjectids:
subjectids[subject] = len(subjectids.keys())
reverse_subjectids[subjectids[subject]] = subject
Cobjects.append(subjectids[subject])
return Cagents, Cobjects, Cscores, subjectdata
# LOAD CROWDSOURCED DATA ----------------------------------------------
zoodatafile = "./data/rescue_global_nepal_2015.csv"
agentids = {}
subjectids = {}
reverse_subjectids = {}
Cagents, Cobjects, Cscores, subjectdata = load_zoo_data(zoodatafile)
# APPEND DATA FROM OSM to CROWDSOURCED DATASET -------------------------
osmfile = "./data/OSM_labels.csv"
osmdata = pd.read_csv(osmfile, sep=',', parse_dates=False, index_col=False, skipinitialspace=True, quotechar='"',
header=None, names=['subject_id','value'])
osm_subjects = osmdata["subject_id"]# alpha0 = np.tile(alpha0[:,:,np.newaxis], (1,1,len(agentids)))
osm_scores = osmdata["value"] - 1
agentids["OSMData"] = len(agentids.keys())
for i, subject in enumerate(osm_subjects):
Cagents.append(agentids["OSMData"])
if not subject in subjectids:
subjectids[subject] = len(subjectids.keys())
reverse_subjectids[subjectids[subject]] = subject
Cobjects.append(subjectids[subject])
score = osm_scores[i]
Cscores.append(score)
# RUN IBCC --------------------------------------------------------------
Cagents = np.array(Cagents)[:,np.newaxis]
Cobjects = np.array(Cobjects)[:, np.newaxis]
Cscores = np.array(Cscores)[:, np.newaxis]
C = np.concatenate((Cagents,Cobjects,Cscores), axis=1)
alpha0 = np.ones((6,6,len(agentids)))
#alpha0[:, :, 5] = 2.0
alpha0[np.arange(6),np.arange(6),:] = 1.01
# alpha0[:,:,:] = np.array([[4.0, 2.0, 1.5, 1.0, 1.0, 2.0], [2.0, 4.0, 2.0, 1.5, 1.0, 2.5], [1.5, 2.0, 4.0, 2.0, 1.5, 2.5],
# [1.0, 1.5, 2.0, 4.0, 2.0, 2.5], [1.0, 1.0, 1.5, 2.0, 4.0, 3.0], [1.0, 1.0, 1.0, 1.0, 1.0, 4.0]])[:,:,np.newaxis]
# alpha0 = np.tile(alpha0[:,:,np.newaxis], (1,1,len(agentids)))
#alpha0[np.arange(6),np.arange(6),-1] += 20
# alpha0[:, 5, -1] += 50
nu0 = np.array([1,1,1,1,1,1], dtype=float)
combiner = ibcc.IBCC(nclasses=6, nscores=6, alpha0=alpha0, nu0=nu0)
preds = combiner.combine_classifications(C)
# PLOT CONFUSION MATRIX ----------------------------------------------------
from scipy.stats import beta
plt.figure()
# for k in range(combiner.alpha.shape[2]):
k = 1 # worker ID to plot
alpha_k = combiner.alpha[:, :, k]
pi_k = alpha_k / np.sum(alpha_k, axis=1)[:, np.newaxis]
print "Confusion matrix for worker %i" % k
print pi_k
x = np.arange(20) / 20.0
for j in range(alpha_k.shape[0]):
pdfj = beta.pdf(x, alpha_k[j, j], np.sum(alpha_k[j, :]) - alpha_k[j,j] )
plt.plot(x, pdfj, label='True class %i' % j)
plt.legend(loc='best')
plt.ylabel('density')
plt.xlabel('p(correct annotation)')
# SAVE RESULTS TO CSV FILE --------------------------------------------------
results_subjectids = []
for i in range(preds.shape[0]):
results_subjectids.append(reverse_subjectids[i])
results_subjectids = np.array(results_subjectids)# skipinitialspace=True, quotechar='"', header=None, names=['subject_id','x','y'] )
# get the coordinates for the subjects and save to another file
nsubjects = len(results_subjectids)
minxarr = np.zeros(nsubjects)
minyarr = np.zeros(nsubjects)
maxxarr = np.zeros(nsubjects)
maxyarr = np.zeros(nsubjects)
for i, subjectstr in enumerate(subjectdata):
subject = json.loads(subjectstr)
sidstr = subject.keys()[0]
sid = int(subject.keys()[0])
if not sid in subjectids:
continue
sidx = subjectids[sid]
minxarr[sidx] = subject[sidstr]["minx"]
minyarr[sidx] = subject[sidstr]["miny"]
maxxarr[sidx] = subject[sidstr]["maxx"]
maxyarr[sidx] = subject[sidstr]["maxy"]
results = pd.DataFrame(data={'subject_id':results_subjectids, 'priority1': preds[:,0], 'priority2':preds[:,1],
'priority3':preds[:,2], 'priority4':preds[:,3], 'priority5':preds[:,4],
'no_priority':preds[:,5], 'minx':minxarr, 'miny':minyarr, 'maxx':maxxarr, 'maxy':maxyarr},
index=None)
results.to_csv("./output/zooresults_osm.csv", sep=',', index=False, float_format='%1.4f',
cols=['subject_id','priority1','priority2','priority3','priority4','priority5','no_priority','minx','miny','maxx','maxy'])
# TRANSLATING RESULTS BACK TO LATITUDE/LONGITUDE COORDINATES --------------------------------
nepal_subjects = []
for subject in results_subjectids:
if subject in np.array(osm_subjects):
nepal_subjects.append(subjectids[subject])
preds_nepal = preds[nepal_subjects,:]
print np.around(combiner.alpha[:,:,56] - alpha0[:,:,-1], 3)
print np.around(np.sum(combiner.alpha[:,:,0:56], axis=2),3)
idxs = (Cagents==56)
objs = Cobjects[idxs]
scores = Cscores[idxs]
osm_top_objs = objs[scores<=2]
preds_osmtop = np.around(preds[osm_top_objs,:], 2)
local_conflict_ids = osm_top_objs[np.sum(preds_osmtop[:,0:3],axis=1)<0.5]
print np.around(preds[local_conflict_ids,:], 2)
osm_empty_objs = objs[scores>=4]
preds_osmempty = np.around(preds[osm_empty_objs,:], 2)
local_conflict_ids = osm_empty_objs[np.sum(preds_osmempty[:,2:],axis=1)<0.2]
zoo_conflict_ids = results_subjectids[local_conflict_ids]
print zoo_conflict_ids
print np.around(preds[local_conflict_ids,:], 2)
coordsfile = './data/transformed_subject_id_metadata_Kathmandu_ring_1.csv'
coordsdata = pd.read_csv(coordsfile, sep=',', parse_dates=False, index_col=False, usecols=[0,2,3],
skipinitialspace=True, quotechar='"', header=None, names=['subject_id','x','y'] )
osmresults = np.zeros(len(osm_subjects))
crowdpreds = np.zeros((len(osm_subjects), 6))
xcoords = np.zeros(len(osm_subjects))
ycoords = np.zeros(len(osm_subjects))
for i, s in enumerate(osm_subjects):
sidx = subjectids[s]
crowdpreds[i] = preds[sidx, :]
osmresults[i] = osm_scores[i]
for j, s2 in enumerate(coordsdata['subject_id']):
if s2==s:
xcoords[i] = coordsdata['x'][j]
ycoords[i] = coordsdata['y'][j]
# PLOT THE MOST PROBABLE CATEGORIES AS A HEATMAP -----------------------------
# get the chosen category from the crowd
cs = np.cumsum(crowdpreds, axis=1)
c = 5
crowdresults = np.zeros(len(osm_subjects))
while c>=0:
crowdresults[cs[:,c]>=0.9] = c
c -= 1
# chose the minimum from the two sets
combinedresults = crowdresults#np.min([osmresults, crowdresults], axis=0)
output = np.concatenate((osm_subjects[:, np.newaxis], combinedresults[:, np.newaxis]), axis=1)
np.savetxt("./output/combined_categories.csv", output, fmt="%i", delimiter=',')
combinedresults = combinedresults[3:]
xcoords = xcoords[3:]
ycoords = ycoords[3:]
nx = len(np.unique(xcoords))
ny = len(np.unique(ycoords))
grid = np.empty((nx+1, ny+1))
grid[:] = np.nan
xgrid = (xcoords-np.min(xcoords)) / float(np.max(xcoords)-np.min(xcoords)) * nx
ygrid = (ycoords-np.min(ycoords)) / float(np.max(ycoords)-np.min(ycoords)) * ny
xgrid = np.round(xgrid).astype(int)
ygrid = np.round(ygrid).astype(int)
grid[xgrid, ygrid] = combinedresults
dpi = 96.0
fig = plt.figure(frameon=False)#, figsize=(float(nx)/dpi,float(ny)/dpi))
plt.autoscale(tight=True)
#Can also try interpolation=nearest or none
ax = fig.add_subplot(111)
ax.set_axis_off()
# bin the results so we get contours rather than blurred map
# grid = grid.T
contours = np.zeros((grid.shape[0], grid.shape[1], 4))#bcc_pred.copy()
contours[grid==4, :] = [0, 1, 1, 0.7]
contours[grid==3, :] = [0, 1, 0, 0.7]
contours[grid==2, :] = [1, 1, 0, 0.7]
contours[grid==1, :] = [1, 0.2, 0, 0.7]
contours[grid==0, :] = [1, 0, 0.5, 0.7]
plt.imshow(contours, aspect=None, origin='lower', interpolation='nearest')
fig.tight_layout(pad=0,w_pad=0,h_pad=0)
ax = plt.gca()
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.savefig('./output/popdensity.png', bbox_inches='tight', pad_inches=0, transparent=True, dpi=96)
gridsize_lat = float(np.max(xcoords)-np.min(xcoords)) / float(nx)
gridsize_lon = float(np.max(ycoords)-np.min(ycoords)) / float(ny)
print np.min(xcoords)
print np.max(xcoords) + gridsize_lat
print np.min(ycoords)
print np.max(ycoords) + gridsize_lon
|
mit
|
vineetk1/yodaqa
|
data/ml/fbpath/fbpath_train_logistic.py
|
3
|
2964
|
#!/usr/bin/python
#
# Train a Naive Bayes classifier to predict which Freebase
# property paths would match answers given the question features.
#
# Usage: fbpath_train_logistic.py TRAIN.JSON MODEL.JSON
import json
import numpy as np
from fbpathtrain import VectorizedData
import random
import re
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
import sys
import time
def dump_cfier(cfier, Xdict, Ydict):
print('/// Model is %s' % (re.sub('\n\\s*', ' ', str(cfier)),))
print('{')
for cls, cfr in zip(cfier.classes_, cfier.estimators_):
weights = dict()
for feat_i in np.nonzero(cfr.coef_[0] != 0)[0]:
weights[Xdict.feature_names_[feat_i]] = cfr.coef_[0][feat_i]
if not weights:
continue
weights['_'] = cfr.intercept_[0]
print(' "%s": %s%s' % (Ydict.classes_[cls], json.dumps(weights),
',' if cls != cfier.classes_[-1] else ''))
print('}')
if __name__ == "__main__":
trainfile, valfile = sys.argv[1:]
# Seed always to the same number to get reproducible builds
# TODO: Make this configurable on the command line or in the environment
random.seed(17151713)
print('/// The weights of individual question features for each fbpath.')
print('/// Missing features have weight zero. Classifiers with no features are skipped.')
print('// These weights are output by data/ml/fbpath/fbpath-train-logistic.py as this:')
print('//')
## Training
with open(trainfile, 'r') as f:
traindata = VectorizedData(json.load(f))
print('// traindata: %d questions, %d features, %d fbpaths' % (
np.size(traindata.X, axis=0), np.size(traindata.X, axis=1), np.size(traindata.Y, axis=1)))
sys.stdout.flush()
# class_weight='auto' produces reduced performance, val mrr 0.574 -> 0.527
# (see the notebook)
# We use L1 regularization mainly to minimize the output model size,
# though it seems to yield better precision+recall too.
t_start = time.clock()
cfier = OneVsRestClassifier(LogisticRegression(penalty='l1'), n_jobs=4)
cfier.fit(traindata.X, traindata.Y)
t_end = time.clock()
print('// training took %d seconds' % (t_end-t_start,))
sys.stdout.flush()
## Benchmarking
with open(valfile, 'r') as f:
valdata = VectorizedData(json.load(f), traindata.Xdict, traindata.Ydict)
print('// valdata: %d questions' % (np.size(valdata.X, axis=0),))
sys.stdout.flush()
val_score = valdata.cfier_score(cfier, lambda cfier, X: cfier.predict_proba(X))
print('// val sklScore %.3f, qRecallAny %.3f, qRecallAll %.3f, pathPrec %.3f, [qScoreMRR %.3f]' % (
val_score['sklScore'],
val_score['qRecallAny'], val_score['qRecallAll'], val_score['pPrec'],
val_score['qScoreMRR']))
sys.stdout.flush()
## Data Dump
dump_cfier(cfier, traindata.Xdict, traindata.Ydict)
|
apache-2.0
|
annayqho/TheCannon
|
code/lamost/xcalib_5labels/paper_plots/feh_alpha.py
|
1
|
1305
|
import numpy as np
import matplotlib.pyplot as plt
import glob
import pyfits
from corner import hist2d
from matplotlib.colors import LogNorm
from matplotlib import rc
plt.rc('text', usetex=True)
#rc('text.latex', preamble = ','.join('''
# \usepackage{txfonts}
# \usepackage{lmodern}
# '''.split()))
plt.rc('font', family='serif')
direc = '/users/annaho/Data/LAMOST/Label_Transfer'
f = pyfits.open("%s/table_for_paper.fits" %direc)
a = f[1].data
f.close()
feh = a['cannon_m_h']
am = a['cannon_alpha_m']
snr = a['snrg']
choose = snr > 20
print(sum(choose))
print(len(choose))
fig, ax = plt.subplots(1,1, sharex=True, sharey=True, figsize=(8,5))
hist2d(feh[choose], am[choose], ax=ax, bins=100, range=[[-2.2,.9],[-0.2,0.5]])
ax.set_xlabel("[Fe/H] (dex)" + " from Cannon/LAMOST", fontsize=16)
fig.text(
0.04, 0.5, r"$\mathrm{[\alpha/M]}$" + " (dex) from Cannon/LAMOST",
fontsize=16, va = 'center', rotation='vertical')
label = r"Objects with SNR \textgreater 20"
props = dict(boxstyle='round', facecolor='white')
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.text(0.05, 0.85, label,
horizontalalignment='left', verticalalignment='bottom',
transform=ax.transAxes, fontsize=16, bbox=props)
#plt.show()
plt.savefig("feh_alpha.png")
|
mit
|
mne-tools/mne-tools.github.io
|
0.16/_downloads/plot_receptive_field_mtrf.py
|
8
|
11536
|
"""
=========================================
Receptive Field Estimation and Prediction
=========================================
This example reproduces figures from Lalor et al's mTRF toolbox in
matlab [1]_. We will show how the :class:`mne.decoding.ReceptiveField` class
can perform a similar function along with scikit-learn. We will first fit a
linear encoding model using the continuously-varying speech envelope to predict
activity of a 128 channel EEG system. Then, we will take the reverse approach
and try to predict the speech envelope from the EEG (known in the litterature
as a decoding model, or simply stimulus reconstruction).
References
----------
.. [1] Crosse, M. J., Di Liberto, G. M., Bednar, A. & Lalor, E. C. (2016).
The Multivariate Temporal Response Function (mTRF) Toolbox:
A MATLAB Toolbox for Relating Neural Signals to Continuous Stimuli.
Frontiers in Human Neuroscience 10, 604. doi:10.3389/fnhum.2016.00604
.. [2] Haufe, S., Meinecke, F., Goergen, K., Daehne, S., Haynes, J.-D.,
Blankertz, B., & Biessmann, F. (2014). On the interpretation of weight
vectors of linear models in multivariate neuroimaging. NeuroImage, 87,
96-110. doi:10.1016/j.neuroimage.2013.10.067
.. _figure 1: http://journal.frontiersin.org/article/10.3389/fnhum.2016.00604/full#F1
.. _figure 2: http://journal.frontiersin.org/article/10.3389/fnhum.2016.00604/full#F2
.. _figure 5: http://journal.frontiersin.org/article/10.3389/fnhum.2016.00604/full#F5
""" # noqa: E501
# Authors: Chris Holdgraf <[email protected]>
# Eric Larson <[email protected]>
# Nicolas Barascud <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 3
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from os.path import join
import mne
from mne.decoding import ReceptiveField
from sklearn.model_selection import KFold
from sklearn.preprocessing import scale
###############################################################################
# Load the data from the publication
# ----------------------------------
#
# First we will load the data collected in [1]_. In this experiment subjects
# listened to natural speech. Raw EEG and the speech stimulus are provided.
# We will load these below, downsampling the data in order to speed up
# computation since we know that our features are primarily low-frequency in
# nature. Then we'll visualize both the EEG and speech envelope.
path = mne.datasets.mtrf.data_path()
decim = 2
data = loadmat(join(path, 'speech_data.mat'))
raw = data['EEG'].T
speech = data['envelope'].T
sfreq = float(data['Fs'])
sfreq /= decim
speech = mne.filter.resample(speech, down=decim, npad='auto')
raw = mne.filter.resample(raw, down=decim, npad='auto')
# Read in channel positions and create our MNE objects from the raw data
montage = mne.channels.read_montage('biosemi128')
montage.selection = montage.selection[:128]
info = mne.create_info(montage.ch_names[:128], sfreq, 'eeg', montage=montage)
raw = mne.io.RawArray(raw, info)
n_channels = len(raw.ch_names)
# Plot a sample of brain and stimulus activity
fig, ax = plt.subplots()
lns = ax.plot(scale(raw[:, :800][0].T), color='k', alpha=.1)
ln1 = ax.plot(scale(speech[0, :800]), color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['EEG', 'Speech Envelope'], frameon=False)
ax.set(title="Sample activity", xlabel="Time (s)")
mne.viz.tight_layout()
###############################################################################
# Create and fit a receptive field model
# --------------------------------------
#
# We will construct an encoding model to find the linear relationship between
# a time-delayed version of the speech envelope and the EEG signal. This allows
# us to make predictions about the response to new stimuli.
# Define the delays that we will use in the receptive field
tmin, tmax = -.2, .4
# Initialize the model
rf = ReceptiveField(tmin, tmax, sfreq, feature_names=['envelope'],
estimator=1., scoring='corrcoef')
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Prepare model data (make time the first dimension)
speech = speech.T
Y, _ = raw[:] # Outputs for the model
Y = Y.T
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
scores = np.zeros((n_splits, n_channels))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
rf.fit(speech[train], Y[train])
scores[ii] = rf.score(speech[test], Y[test])
# coef_ is shape (n_outputs, n_features, n_delays). we only have 1 feature
coefs[ii] = rf.coef_[:, 0, :]
times = rf.delays_ / float(rf.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_scores = scores.mean(axis=0)
# Plot mean prediction scores across all channels
fig, ax = plt.subplots()
ix_chs = np.arange(n_channels)
ax.plot(ix_chs, mean_scores)
ax.axhline(0, ls='--', color='r')
ax.set(title="Mean prediction score", xlabel="Channel", ylabel="Score ($r$)")
mne.viz.tight_layout()
###############################################################################
# Investigate model coefficients
# ==============================
# Finally, we will look at how the linear coefficients (sometimes
# referred to as beta values) are distributed across time delays as well as
# across the scalp. We will recreate `figure 1`_ and `figure 2`_ from [1]_.
# Print mean coefficients across all time delays / channels (see Fig 1 in [1])
time_plot = 0.180 # For highlighting a specific time.
fig, ax = plt.subplots(figsize=(4, 8))
max_coef = mean_coefs.max()
ax.pcolormesh(times, ix_chs, mean_coefs, cmap='RdBu_r',
vmin=-max_coef, vmax=max_coef, shading='gouraud')
ax.axvline(time_plot, ls='--', color='k', lw=2)
ax.set(xlabel='Delay (s)', ylabel='Channel', title="Mean Model\nCoefficients",
xlim=times[[0, -1]], ylim=[len(ix_chs) - 1, 0],
xticks=np.arange(tmin, tmax + .2, .2))
plt.setp(ax.get_xticklabels(), rotation=45)
mne.viz.tight_layout()
# Make a topographic map of coefficients for a given delay (see Fig 2C in [1])
ix_plot = np.argmin(np.abs(time_plot - times))
fig, ax = plt.subplots()
mne.viz.plot_topomap(mean_coefs[:, ix_plot], pos=info, axes=ax, show=False,
vmin=-max_coef, vmax=max_coef)
ax.set(title="Topomap of model coefficients\nfor delay %s" % time_plot)
mne.viz.tight_layout()
###############################################################################
# Create and fit a stimulus reconstruction model
# ----------------------------------------------
#
# We will now demonstrate another use case for the for the
# :class:`mne.decoding.ReceptiveField` class as we try to predict the stimulus
# activity from the EEG data. This is known in the literature as a decoding, or
# stimulus reconstruction model [1]_. A decoding model aims to find the
# relationship between the speech signal and a time-delayed version of the EEG.
# This can be useful as we exploit all of the available neural data in a
# multivariate context, compared to the encoding case which treats each M/EEG
# channel as an independent feature. Therefore, decoding models might provide a
# better quality of fit (at the expense of not controlling for stimulus
# covariance), especially for low SNR stimuli such as speech.
# We use the same lags as in [1]. Negative lags now index the relationship
# between the neural response and the speech envelope earlier in time, whereas
# positive lags would index how a unit change in the amplitude of the EEG would
# affect later stimulus activity (obviously this should have an amplitude of
# zero).
tmin, tmax = -.2, 0.
# Initialize the model. Here the features are the EEG data. We also specify
# ``patterns=True`` to compute inverse-transformed coefficients during model
# fitting (cf. next section). We'll use a ridge regression estimator with an
# alpha value similar to [1].
sr = ReceptiveField(tmin, tmax, sfreq, feature_names=raw.ch_names,
estimator=1e4, scoring='corrcoef', patterns=True)
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
patterns = coefs.copy()
scores = np.zeros((n_splits,))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
sr.fit(Y[train], speech[train])
scores[ii] = sr.score(Y[test], speech[test])[0]
# coef_ is shape (n_outputs, n_features, n_delays). We have 128 features
coefs[ii] = sr.coef_[0, :, :]
patterns[ii] = sr.patterns_[0, :, :]
times = sr.delays_ / float(sr.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_patterns = patterns.mean(axis=0)
mean_scores = scores.mean(axis=0)
max_coef = np.abs(mean_coefs).max()
max_patterns = np.abs(mean_patterns).max()
###############################################################################
# Visualize stimulus reconstruction
# =================================
#
# To get a sense of our model performance, we can plot the actual and predicted
# stimulus envelopes side by side.
y_pred = sr.predict(Y[test])
time = np.linspace(0, 2., 5 * int(sfreq))
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(time, speech[test][sr.valid_samples_][:int(5 * sfreq)],
color='grey', lw=2, ls='--')
ax.plot(time, y_pred[sr.valid_samples_][:int(5 * sfreq)], color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['Envelope', 'Reconstruction'], frameon=False)
ax.set(title="Stimulus reconstruction")
ax.set_xlabel('Time (s)')
mne.viz.tight_layout()
###############################################################################
# Investigate model coefficients
# ==============================
#
# Finally, we will look at how the decoding model coefficients are distributed
# across the scalp. We will attempt to recreate `figure 5`_ from [1]_. The
# decoding model weights reflect the channels that contribute most toward
# reconstructing the stimulus signal, but are not directly interpretable in a
# neurophysiological sense. Here we also look at the coefficients obtained
# via an inversion procedure [2]_, which have a more straightforward
# interpretation as their value (and sign) directly relates to the stimulus
# signal's strength (and effect direction).
time_plot = (-.140, -.125) # To average between two timepoints.
ix_plot = np.arange(np.argmin(np.abs(time_plot[0] - times)),
np.argmin(np.abs(time_plot[1] - times)))
fig, ax = plt.subplots(1, 2)
mne.viz.plot_topomap(np.mean(mean_coefs[:, ix_plot], axis=1),
pos=info, axes=ax[0], show=False,
vmin=-max_coef, vmax=max_coef)
ax[0].set(title="Model coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.plot_topomap(np.mean(mean_patterns[:, ix_plot], axis=1),
pos=info, axes=ax[1],
show=False, vmin=-max_patterns, vmax=max_patterns)
ax[1].set(title="Inverse-transformed coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.tight_layout()
plt.show()
|
bsd-3-clause
|
vincentltz/ns-3-dev-git
|
src/core/examples/sample-rng-plot.py
|
188
|
1246
|
# -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
|
gpl-2.0
|
daviddesancho/mdtraj
|
mdtraj/utils/validation.py
|
11
|
8147
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# imports
##############################################################################
from __future__ import print_function, division
import warnings
import numbers
import numpy as np
import collections
from mdtraj.utils.six.moves import zip_longest
##############################################################################
# functions / classes
##############################################################################
class TypeCastPerformanceWarning(RuntimeWarning):
pass
def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
"""Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well.
"""
if can_be_none and val is None:
return None
if not isinstance(val, np.ndarray):
if isinstance(val, collections.Iterable):
# If they give us an iterator, let's try...
if isinstance(val, collections.Sequence):
# sequences are easy. these are like lists and stuff
val = np.array(val, dtype=dtype)
else:
# this is a generator...
val = np.array(list(val), dtype=dtype)
elif np.isscalar(val) and add_newaxis_on_deficient_ndim and ndim == 1:
# special case: if the user is looking for a 1d array, and
# they request newaxis upconversion, and provided a scalar
# then we should reshape the scalar to be a 1d length-1 array
val = np.array([val])
else:
raise TypeError(("%s must be numpy array. "
" You supplied type %s" % (name, type(val))))
if warn_on_cast and val.dtype != dtype:
warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
TypeCastPerformanceWarning)
if not val.ndim == ndim:
if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
val = val[np.newaxis, ...]
else:
raise ValueError(("%s must be ndim %s. "
"You supplied %s" % (name, ndim, val.ndim)))
val = np.ascontiguousarray(val, dtype=dtype)
if length is not None and len(val) != length:
raise ValueError(("%s must be length %s. "
"You supplied %s" % (name, length, len(val))))
if shape is not None:
# the shape specified given by the user can look like (None, None 3)
# which indicates that ANY length is accepted in dimension 0 or
# dimension 1
sentenel = object()
error = ValueError(("%s must be shape %s. You supplied "
"%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
if a is sentenel or b is sentenel:
# if the sentenel was reached, it means that the ndim didn't
# match or something. this really shouldn't happen
raise error
if b is None:
# if the user's shape spec has a None in it, it matches anything
continue
if a != b:
# check for equality
raise error
return val
def cast_indices(indices):
"""Check that ``indices`` are appropriate for indexing an array
Parameters
----------
indices : {None, array_like, slice}
If indices is None or slice, it'll just pass through. Otherwise, it'll
be converted to a numpy array and checked to make sure it contains
unique integers.
Returns
-------
value : {slice, np.ndarray}
Either a slice or an array of integers, depending on the input type
"""
if indices is None or isinstance(indices, slice):
return indices
if not len(indices) == len(set(indices)):
raise ValueError("indices must be unique.")
out = np.asarray(indices)
if not issubclass(out.dtype.type, np.integer):
raise ValueError('indices must be of an integer type. %s is not an integer type' % out.dtype)
return out
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : {None, int, RandomState}
Seed for a random number generator
Returns
-------
randomstate : RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
# This code is direcly from the scikit-learn project (sklearn/utils/validation.py)
# Authors: Olivier Grisel and Gael Varoquaux and others (please update me)
# License: BSD 3 clause
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
|
lgpl-2.1
|
luo66/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
bsd-3-clause
|
thuml/HashNet
|
caffe/examples/finetune_flickr_style/assemble_data.py
|
38
|
3636
|
#!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
|
mit
|
jaidevd/scikit-learn
|
sklearn/feature_selection/rfe.py
|
33
|
16667
|
# Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
"""
Return the score for a fit across one fold.
"""
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
return rfe._fit(
X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer)).scores_
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
else:
coefs = getattr(estimator, 'feature_importances_', None)
if coefs is None:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If the
estimator is a classifier or if ``y`` is neither binary nor multiclass,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
n_jobs : int, default 1
Number of cores to run in parallel while fitting across folds.
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0,
n_jobs=1):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if self.n_jobs == 1:
parallel, func = list, _rfe_single_fit
else:
parallel, func, = Parallel(n_jobs=self.n_jobs), delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, X, y, train, test, scorer)
for train, test in cv.split(X, y))
scores = np.sum(scores, axis=0)
n_features_to_select = max(
n_features - (np.argmax(scores) * step),
n_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores[::-1] / cv.get_n_splits(X, y)
return self
|
bsd-3-clause
|
filipkilibarda/Ants-on-a-Polygon
|
simulation.py
|
1
|
3153
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import pi,cos,sin,sqrt
import numpy as np
import ants
def calcAnalyticalSolution():
ngon = ants.Ngon(NUMBER_OF_ANTS)
phi = ngon.getInteriorAngle()
intialDistanceAnts = 2*INITIAL_DISTANCE_ORIGIN*sin(2*pi/NUMBER_OF_ANTS/2)
return intialDistanceAnts/(SPEED*(1-sin(phi-pi/2)))
NUMBER_OF_ANTS = 16
SPEED = 1
INITIAL_DISTANCE_ORIGIN = 1
if __name__ == "__main__":
kwargs = {
"antGroup": ants.AntGroup(NUMBER_OF_ANTS),
"maxFrames": 2**20,
"frameReductionFactor": 2**7,
"alpha": 1/1000,
}
simulationManager = ants.SimulationManager(**kwargs)
simulationManager.runSimulation()
def init():
"""initialize animation"""
analy_text.set_text('Expected time = %.10f' %
calcAnalyticalSolution())
return (time_text,)
def animate(i):
"""perform animation step"""
if i >= simulationManager.getNumFramesUsedAfterReduction():
i = simulationManager.getNumFramesUsedAfterReduction()
dots.set_data(
simulationManager.getIthXPositions(i),
simulationManager.getIthYPositions(i)
)
time_text.set_text('Elapsed time = %.10f' %
simulationManager.getIthTimeElapsed(i))
distance_text.set_text('Distance between ants = %.10f' %
simulationManager.getIthDistanceBetweenAnts(i))
return (dots, time_text, distance_text,)
###########################################################
# Setup plot
###########################################################
# set up figure and animation
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(-INITIAL_DISTANCE_ORIGIN,
INITIAL_DISTANCE_ORIGIN),
ylim=(-INITIAL_DISTANCE_ORIGIN,
INITIAL_DISTANCE_ORIGIN))
# dots to go on the plot
dots, = ax.plot([], 'bo', ms=.3)
# declare the text that indicates elapsed time
time_text = ax.text(0.02, 0.90, '', transform=ax.transAxes)
# text that idicates the analytical solution
analy_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)
# text that indicates the distance between each ant
distance_text = ax.text(0.02, 0.85, '', transform=ax.transAxes)
"""
Interval is the length of time that the animation should pause
in between each frame. The amount of time it takes to calculate
each frame depends on how complicated the calcualation is, but there's
this extra `interval` length of time where the animation pauses
before calculating the next frame.
"""
interval = 20
# number of frame steps to rest on the last frame
pause = 100
ani = animation.FuncAnimation(fig, animate,
frames=simulationManager.getNumFramesUsedAfterReduction()+pause,
interval=interval,
blit=True,
init_func=init,
repeat=False)
ani.save('imgs/ani.gif', writer='imagemagick', fps=50)
# plt.show()
|
mit
|
ozak/geopandas
|
geopandas/io/tests/test_io.py
|
1
|
6230
|
from __future__ import absolute_import
from collections import OrderedDict
import fiona
import pytest
from shapely.geometry import box
import geopandas
from geopandas import read_postgis, read_file
from geopandas.tests.util import connect, create_postgis, validate_boro_df
@pytest.fixture
def nybb_df():
nybb_path = geopandas.datasets.get_path('nybb')
df = read_file(nybb_path)
return df
class TestIO:
def setup_method(self):
nybb_zip_path = geopandas.datasets.get_path('nybb')
self.df = read_file(nybb_zip_path)
with fiona.open(nybb_zip_path) as f:
self.crs = f.crs
self.columns = list(f.meta["schema"]["properties"].keys())
def test_read_postgis_default(self):
con = connect('test_geopandas')
if con is None or not create_postgis(self.df):
raise pytest.skip()
try:
sql = "SELECT * FROM nybb;"
df = read_postgis(sql, con)
finally:
con.close()
validate_boro_df(df)
# no crs defined on the created geodatabase, and none specified
# by user; should not be set to 0, as from get_srid failure
assert df.crs is None
def test_read_postgis_custom_geom_col(self):
con = connect('test_geopandas')
geom_col = "the_geom"
if con is None or not create_postgis(self.df, geom_col=geom_col):
raise pytest.skip()
try:
sql = "SELECT * FROM nybb;"
df = read_postgis(sql, con, geom_col=geom_col)
finally:
con.close()
validate_boro_df(df)
def test_read_postgis_select_geom_as(self):
"""Tests that a SELECT {geom} AS {some_other_geom} works."""
con = connect('test_geopandas')
orig_geom = "geom"
out_geom = "the_geom"
if con is None or not create_postgis(self.df, geom_col=orig_geom):
raise pytest.skip()
try:
sql = """SELECT borocode, boroname, shape_leng, shape_area,
{} as {} FROM nybb;""".format(orig_geom, out_geom)
df = read_postgis(sql, con, geom_col=out_geom)
finally:
con.close()
validate_boro_df(df)
def test_read_postgis_get_srid(self):
"""Tests that an SRID can be read from a geodatabase (GH #451)."""
crs = {"init": "epsg:4269"}
df_reproj = self.df.to_crs(crs)
created = create_postgis(df_reproj, srid=4269)
con = connect('test_geopandas')
if con is None or not created:
raise pytest.skip()
try:
sql = "SELECT * FROM nybb;"
df = read_postgis(sql, con)
finally:
con.close()
validate_boro_df(df)
assert(df.crs == crs)
def test_read_postgis_override_srid(self):
"""Tests that a user specified CRS overrides the geodatabase SRID."""
orig_crs = self.df.crs
created = create_postgis(self.df, srid=4269)
con = connect('test_geopandas')
if con is None or not created:
raise pytest.skip()
try:
sql = "SELECT * FROM nybb;"
df = read_postgis(sql, con, crs=orig_crs)
finally:
con.close()
validate_boro_df(df)
assert(df.crs == orig_crs)
def test_read_file(self):
df = self.df.rename(columns=lambda x: x.lower())
validate_boro_df(df)
assert df.crs == self.crs
# get lower case columns, and exclude geometry column from comparison
lower_columns = [c.lower() for c in self.columns]
assert (df.columns[:-1] == lower_columns).all()
@pytest.mark.web
def test_remote_geojson_url(self):
url = ("https://raw.githubusercontent.com/geopandas/geopandas/"
"master/examples/null_geom.geojson")
gdf = read_file(url)
assert isinstance(gdf, geopandas.GeoDataFrame)
def test_filtered_read_file(self):
full_df_shape = self.df.shape
nybb_filename = geopandas.datasets.get_path('nybb')
bbox = (1031051.7879884212, 224272.49231459625, 1047224.3104931959,
244317.30894023244)
filtered_df = read_file(nybb_filename, bbox=bbox)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (2, 5)
def test_filtered_read_file_with_gdf_boundary(self):
full_df_shape = self.df.shape
nybb_filename = geopandas.datasets.get_path('nybb')
bbox = geopandas.GeoDataFrame(
geometry=[box(1031051.7879884212, 224272.49231459625,
1047224.3104931959, 244317.30894023244)],
crs=self.crs)
filtered_df = read_file(nybb_filename, bbox=bbox)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (2, 5)
def test_filtered_read_file_with_gdf_boundary_mismatched_crs(self):
full_df_shape = self.df.shape
nybb_filename = geopandas.datasets.get_path('nybb')
bbox = geopandas.GeoDataFrame(
geometry=[box(1031051.7879884212, 224272.49231459625,
1047224.3104931959, 244317.30894023244)],
crs=self.crs)
bbox.to_crs(epsg=4326, inplace=True)
filtered_df = read_file(nybb_filename, bbox=bbox)
filtered_df_shape = filtered_df.shape
assert full_df_shape != filtered_df_shape
assert filtered_df_shape == (2, 5)
def test_empty_shapefile(self, tmpdir):
# create empty shapefile
meta = {'crs': {},
'crs_wkt': '',
'driver': 'ESRI Shapefile',
'schema':
{'geometry': 'Point',
'properties': OrderedDict([('A', 'int:9'),
('Z', 'float:24.15')])}}
fname = str(tmpdir.join("test_empty.shp"))
with fiona.drivers():
with fiona.open(fname, 'w', **meta) as _:
pass
empty = read_file(fname)
assert isinstance(empty, geopandas.GeoDataFrame)
assert all(empty.columns == ['A', 'Z', 'geometry'])
|
bsd-3-clause
|
yonglehou/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
ryan413/gotem
|
html_to_databases/reuters_feed.py
|
1
|
3081
|
import os, sys
from time import sleep
from selenium import webdriver
import redis
import time
import configparser
config = configparser.ConfigParser()
config.read(r'C:\Projects\@Scripts\CONFIG\urls_r.cfg')
URL_WEBSITE = config['Settings']['URL_WEBSITE']
URL_REST = config['Settings']['URL_REST']
file_ = 'r.xlsx'
r1 = redis.StrictRedis(host='localhost', port=6379, db=13)
import codecs
rfilename = codecs.open(file_, "r", encoding="utf-8").read()
burl=r''
feed = OrderedDict()
dfxls = pd.read_excel(file_)
turl = URL_WEBSITE+URL_REST+r'&firstRow={}'.format(row)
row = 1
df.from_csv('r.csv')
feed_pages = []
last = df.iloc[0]
from bs4 import BeautifulSoup
import lxml.html
import pandas as pd
from gotem import Gotem
from collections import OrderedDict
class r(object):
def __init__(self):
self.r_news_urls = {}
self.url_start = URL_WEBSITE
def generate_urls(self):
for x in range(0,10):
self.r_news_urls[x]=URL_WEBSITE+URL_REST+r"&firstRow={0}".format(x*20)
ifeed_pages = grab_page()
for page in feed_pages:
soup = BeautifulSoup(page.decode('utf-8'),'lxml')
html = lxml.html.fromstring(soup.prettify())
for row in range(1,18):
hl = OrderedDict()
xpath_time = r'//*[@id="headlinesToday"]/div[{}]/div[{}]'.format(row, 1)
xpath_content = r'//*[@id="headlinesToday"]/div[{}]/div[{}]'.format(row, 2)
html.xpath(xpath_content)[0].make_links_absolute(URL_WEBSITE)
item=len(feed)+1
hl['headline_time'] = str(html.xpath(xpath_time)[0].text).strip()
hl['headline_text'] = str(html.xpath(xpath_content)[0][0].text).strip()
hl['headline_source'] = str(html.xpath(xpath_content)[0][1].text).strip()
hl['headline_link'] = html.xpath(xpath_content)[0][0].attrib['href']
hl['headline_content']= ""
feed[item]=hl
def check_r_times(feed):
time = list(list(list(feed.items())[0])[1].values())[0]
for x in range(0,5):
urls[x]=URL_WEBSITE+r"&firstRow={0}".format(x*20)
import time
import datetime
time_current_headline = list(list(list(feed.items())[0])[1].values())[0]
time_current_headline = datetime.datetime.strptime(list(list(list(feed.items())[0])[1].values())[0].replace(' EST',""),'%H:%M%p')
time_last_headline_saved = datetime.datetime.strptime(dfxls.iloc[0].headline_time.replace(' EST',""),'%H:%M%p')
last_saved_headline < latest_headline_time
last = dfxls.iloc[0]
lhour = last_headling_time.split(" ")[0][:2]
lmin = last_headling_time.split(" ")[0][:-2][-2:]
lAMPM = last_headling_time.split(" ")[0][-2:]
last_saved_headline > latest_headline_time
headline_content = lambda url:get_headline_content(url)
df['headline_content']=df['headline_link'].apply(headline_content)
df = pd.DataFrame(feed,index=['headline_time', 'headline_text', 'headline_source', 'headline_content','headline_link']).transpose()
def get_headline_content(url):
headline_xpath = r'//*[@id="newsStory"]'
req = Request_Url(url)
print(url)
content, response = req.GET()
return headline_article
df.to_csv('r.csv')
|
mit
|
jzt5132/scikit-learn
|
examples/cluster/plot_cluster_comparison.py
|
246
|
4684
|
"""
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
|
bsd-3-clause
|
ubaumgar/OKR
|
gui/run.py
|
1
|
5364
|
#!/usr/bin/env python
import datetime
import sys
import wx
import wx.lib.inspection
import wx.lib.mixins.inspection
from okr.config import Config
from okr.exporter import export_excel_for_klipfolio
from okr.importer import import_excel_sheet
import matplotlib
matplotlib.use('WXAgg')
from okr.visualize import visualize
# stuff for debugging
print("Python %s" % sys.version)
print("wx.version: %s" % wx.version())
assertMode = wx.APP_ASSERT_DIALOG
class Log:
@staticmethod
def write_text(text):
if text[-1:] == '\n':
text = text[:-1]
wx.LogMessage(text)
write = write_text
class ConverterFrame(wx.Frame):
def __init__(self, parent, id_val, title):
wx.Frame.__init__(self, parent, id_val, title, wx.DefaultPosition, wx.Size(640, 480))
path_to_source_file = Config.PATH_INPUT_FILE
path_to_output_files = Config.PATH_OUTPUT_FILES
self.SetSizeHints(640, 480, 640, 480)
panel = wx.Panel(self, -1)
self.static_text_input = wx.StaticText(panel, -1, 'Input Excel file:', (20, 20), style=wx.ALIGN_LEFT)
self.text_input = wx.TextCtrl(panel, -1, path_to_source_file, (20, 40), size=(500, -1), style=wx.TE_LEFT)
self.button_input = wx.Button(panel, -1, '...', (530, 40))
self.Bind(wx.EVT_BUTTON, self.on_input, id=self.button_input.GetId())
self.static_text_output = wx.StaticText(panel, -1, 'Output directory:', (20, 80), style=wx.ALIGN_LEFT)
self.text_output = wx.TextCtrl(panel, -1, path_to_output_files, (20, 100), size=(500, -1), style=wx.TE_LEFT)
self.button_output = wx.Button(panel, -1, '...', (530, 100))
self.Bind(wx.EVT_BUTTON, self.on_output, id=self.button_output.GetId())
self.button_convert = wx.Button(panel, -1, "Convert", (20, 160))
self.Bind(wx.EVT_BUTTON, self.on_convert, id=self.button_convert.GetId())
self.status_bar = self.CreateStatusBar(1)
self.status_bar.SetStatusText('', 0)
def on_convert(self, event):
del event
self.status_bar.SetStatusText('', 0)
max_objectives = Config.MAX_OBJECTIVES
max_key_results = Config.MAX_KEY_RESULTS
max_check_ins = Config.MAX_CHECK_INS
start_date = Config.START_DATE
path_to_source_file = self.text_input.GetValue()
try:
all_okr_teams = import_excel_sheet(max_check_ins, max_key_results, max_objectives, path_to_source_file,
start_date)
except Exception as e:
self.status_bar.SetStatusText('An error occurred during import: {}'.format(str(e)), 0)
return
today = datetime.datetime.now()
path_to_output_files = self.text_output.GetValue()
filename = '{}/OKR_{:04d}{:02d}{:02d}'.format(path_to_output_files, today.year, today.month, today.day)
try:
export_excel_for_klipfolio(all_okr_teams, filename + '.xlsx', today)
except Exception as e:
self.status_bar.SetStatusText('An error occurred during the Excel export: {}'.format(str(e)))
try:
# CAUTION the visualization does not work due to false settings
# see https://stackoverflow.com/questions/7906365/matplotlib-savefig-plots-different-from-show
# http://matplotlib.org/users/customizing.html
visualize(all_okr_teams, filename + '.png', Config.TITLE_STRING, True)
except Exception as e:
self.status_bar.SetStatusText('An error occurred during the visualization: {}'.format(str(e)))
def on_input(self, event):
del event
open_file_dialog = wx.FileDialog(self, "Open input Excel file", "", "", "Excel files (*.xlsx)|*.xlsx",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if open_file_dialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
input_path = open_file_dialog.GetPath()
self.text_input.SetValue(input_path)
def on_output(self, event):
del event
open_directory_dialog = wx.DirDialog(self, "Select output directory", "",
wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if open_directory_dialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
output_path = open_directory_dialog.GetPath()
self.text_output.SetValue(output_path)
class RunConverterApp(wx.App):
def __init__(self):
wx.App.__init__(self, redirect=False)
def OnInit(self):
wx.Log.SetActiveTarget(wx.LogStderr())
cframe = ConverterFrame(None, -1, 'OKR Converter')
menu_bar = wx.MenuBar()
menu = wx.Menu()
item = menu.Append(-1, "E&xit", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit_app, item)
menu_bar.Append(menu, "&File")
cframe.SetSize((640, 480))
cframe.SetMenuBar(menu_bar)
cframe.Show(True)
cframe.Bind(wx.EVT_CLOSE, self.on_close_frame)
self.SetTopWindow(cframe)
self.frame = cframe
return True
def on_exit_app(self, event):
del event
self.frame.Close(True)
def on_close_frame(self, event):
event.Skip()
def main():
app = RunConverterApp()
app.MainLoop()
if __name__ == "__main__":
main()
|
bsd-2-clause
|
jjs0sbw/CSPLN
|
apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/projections/polar.py
|
2
|
26693
|
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
if rmin != 0:
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
else:
x[:] = r * np.cos(t)
y[:] = r * np.sin(t)
return xy
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', None)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(0)
self.set_theta_direction(1)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
set_rscale = Axes.set_yscale
set_rticks = Axes.set_yticks
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
|
gpl-3.0
|
chapmanbe/pymitools
|
pymitools/ipython/imgs.py
|
1
|
1092
|
"""
Functions for interacting with volumetric images in the IPython notebook
"""
import numpy as np
from ipywidgets import interact
import ipywidgets as widgets
import matplotlib.pyplot as plt
def win_lev(img, w, l, maxc=255):
"""
Window (w) and level (l) data in img
img: numpy array representing an image
w: the window for the transformation
l: the level for the transformation
maxc: the maximum display color
"""
m = maxc/(2.0*w)
o = m*(l-w)
return np.clip((m*img-o),0,maxc).astype(np.uint8)
def display_img(img, cmap="gray"):
f, ax1 = plt.subplots(1)
ax1.imshow(img, cmap=cmap)
ax1.grid(False)
ax1.yaxis.set_visible(False)
ax1.xaxis.set_visible(False)
return f, ax1
def view_volume(img):
@interact(sl=widgets.IntSlider(min=0, max=img.shape[0]-1, value=int(img.shape[0]/2)),
win=widgets.IntSlider(min=1, max=2000, value=1000),
level=widgets.IntSlider(min=-1024, max=2000, value=0))
def _view_slice(sl=0,win=1000,level=0):
display_img(win_lev(img[sl,:,:],win,level))
|
apache-2.0
|
nelson-liu/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
Djabbz/scikit-learn
|
examples/cluster/plot_mini_batch_kmeans.py
|
265
|
4081
|
"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
B3AU/waveTree
|
sklearn/metrics/cluster/__init__.py
|
312
|
1322
|
"""
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
|
bsd-3-clause
|
jtryan/camera-calibration
|
Undistort.py
|
1
|
1858
|
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load(open("calibration_wide/wide_dist_pickle.p", "rb"))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = cv2.imread('calibration_wide/test_image2.jpg')
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
def corners_unwarp(img, nx, ny, mtx, dist):
# 1) Undistort using mtx and dist
undist = cv2.undistort(img, mtx, dist, None, mtx)
# 2) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 3) Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# 4) If corners found:
if ret == True:
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
offset = 100
img_size = (img.shape[1], img.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx - 1], corners[-1], corners[-nx]])
dst = np.float32([[offset, offset], [img_size[0] - offset, offset],
[img_size[0] - offset, img_size[1] - offset],
[offset, img_size[1] - offset]])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, img_size)
return warped, M
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
|
mit
|
wazeerzulfikar/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
110
|
3768
|
# Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
jmetzen/scikit-learn
|
sklearn/manifold/tests/test_spectral_embedding.py
|
26
|
9488
|
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.linalg import eigh
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold.spectral_embedding_ import _graph_connected_component
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.extmath import _deterministic_vector_sign_flip
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert_true(component[:n_sample].all())
assert_true(not component[n_sample:].any())
component = _graph_connected_component(affinity, -1)
assert_true(not component[:n_sample].any())
assert_true(component[n_sample:].all())
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False)
# Verify using manual computation with dense eigh
laplacian, dd = graph_laplacian(sims, normed=False, return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components] * dd
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
|
bsd-3-clause
|
jklenzing/pysat
|
pysat/tests/test_meta.py
|
2
|
48081
|
"""
tests the pysat meta object and code
"""
import pysat
import pandas as pds
from nose.tools import raises
import pysat.instruments.pysat_testing
import numpy as np
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.meta = pysat.Meta()
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean')
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst
del self.meta
@raises(ValueError)
def test_setting_nonpandas_metadata(self):
self.meta = pysat.Meta(metadata='Not a Panda')
def test_inst_data_assign_meta_default(self):
self.testInst.load(2009, 1)
self.testInst['help'] = self.testInst['mlt']
assert self.testInst.meta['help', 'long_name'] == 'help'
assert self.testInst.meta['help', 'axis'] == 'help'
assert self.testInst.meta['help', 'label'] == 'help'
assert self.testInst.meta['help', 'notes'] == ''
assert np.isnan(self.testInst.meta['help', 'fill'])
assert np.isnan(self.testInst.meta['help', 'value_min'])
assert np.isnan(self.testInst.meta['help', 'value_max'])
assert self.testInst.meta['help', 'units'] == ''
assert self.testInst.meta['help', 'desc'] == ''
assert self.testInst.meta['help', 'scale'] == 'linear'
def test_inst_data_assign_meta(self):
self.testInst.load(2009, 1)
self.testInst['help'] = {'data': self.testInst['mlt'],
'units': 'V',
'long_name': 'The Doors'}
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert self.testInst.meta['help', 'axis'] == 'help'
assert self.testInst.meta['help', 'label'] == 'help'
assert self.testInst.meta['help', 'notes'] == ''
assert np.isnan(self.testInst.meta['help', 'fill'])
assert np.isnan(self.testInst.meta['help', 'value_min'])
assert np.isnan(self.testInst.meta['help', 'value_max'])
assert self.testInst.meta['help', 'units'] == 'V'
assert self.testInst.meta['help', 'desc'] == ''
assert self.testInst.meta['help', 'scale'] == 'linear'
def test_inst_data_assign_meta_then_data(self):
self.testInst.load(2009, 1)
self.testInst['help'] = {'data': self.testInst['mlt'],
'units': 'V',
'long_name': 'The Doors'}
self.testInst['help'] = self.testInst['mlt']
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert self.testInst.meta['help', 'axis'] == 'help'
assert self.testInst.meta['help', 'label'] == 'help'
assert self.testInst.meta['help', 'notes'] == ''
assert np.isnan(self.testInst.meta['help', 'fill'])
assert np.isnan(self.testInst.meta['help', 'value_min'])
assert np.isnan(self.testInst.meta['help', 'value_max'])
assert self.testInst.meta['help', 'units'] == 'V'
assert self.testInst.meta['help', 'desc'] == ''
assert self.testInst.meta['help', 'scale'] == 'linear'
def test_inst_ho_data_assign_no_meta_default(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
self.testInst['help'] = [frame]*len(self.testInst.data.index)
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
def test_inst_ho_data_assign_meta_default(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors'}
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
def test_inst_ho_data_assign_meta(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta()
meta['dummy_frame1'] = {'units': 'A'}
meta['dummy_frame2'] = {'desc': 'nothing'}
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors',
'meta': meta}
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
assert self.testInst.meta['help']['children']['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help']['children']['dummy_frame1',
'desc'] == ''
assert self.testInst.meta['help']['children']['dummy_frame2',
'desc'] == 'nothing'
def test_inst_ho_data_assign_meta_then_data(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta()
meta['dummy_frame1'] = {'units': 'A'}
meta['dummy_frame2'] = {'desc': 'nothing'}
print('Setting original data')
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors',
'meta': meta}
self.testInst['help'] = [frame]*len(self.testInst.data.index)
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
assert self.testInst.meta['help']['children']['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help']['children']['dummy_frame1',
'desc'] == ''
assert self.testInst.meta['help']['children']['dummy_frame2',
'desc'] == 'nothing'
def test_inst_ho_data_assign_meta_different_labels(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta(units_label='blah', desc_label='whoknew')
meta['dummy_frame1'] = {'blah': 'A'}
meta['dummy_frame2'] = {'whoknew': 'nothing'}
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors',
'meta': meta}
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
assert self.testInst.meta['help']['children']['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help']['children']['dummy_frame1',
'desc'] == ''
assert self.testInst.meta['help']['children']['dummy_frame2',
'desc'] == 'nothing'
def test_inst_assign_from_meta(self):
self.testInst.load(2009, 1)
self.testInst['help'] = self.testInst['mlt']
self.testInst['help2'] = self.testInst['mlt']
self.testInst.meta['help2'] = self.testInst.meta['help']
assert self.testInst.meta['help2', 'long_name'] == 'help'
assert self.testInst.meta['help2', 'axis'] == 'help'
assert self.testInst.meta['help2', 'label'] == 'help'
assert self.testInst.meta['help2', 'notes'] == ''
assert np.isnan(self.testInst.meta['help2', 'fill'])
assert np.isnan(self.testInst.meta['help2', 'value_min'])
assert np.isnan(self.testInst.meta['help2', 'value_max'])
assert self.testInst.meta['help2', 'units'] == ''
assert self.testInst.meta['help2', 'desc'] == ''
assert self.testInst.meta['help2', 'scale'] == 'linear'
assert 'children' not in self.testInst.meta.data.columns
assert 'help2' not in self.testInst.meta.keys_nD()
def test_inst_assign_from_meta_w_ho(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta()
meta['dummy_frame1'] = {'units': 'A'}
meta['dummy_frame2'] = {'desc': 'nothing'}
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors',
'meta': meta}
self.testInst['help2'] = self.testInst['help']
self.testInst.meta['help2'] = self.testInst.meta['help']
assert self.testInst.meta['help'].children['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help2', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help2']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help2']
assert 'dummy_frame1' in self.testInst.meta['help2']['children']
assert 'dummy_frame2' in self.testInst.meta['help2']['children']
assert self.testInst.meta['help2']['children'].has_attr('units')
assert self.testInst.meta['help2']['children'].has_attr('desc')
assert self.testInst.meta['help2']['children']['dummy_frame1',
'desc'] == ''
assert self.testInst.meta['help2']['children']['dummy_frame2',
'desc'] == 'nothing'
assert 'children' not in self.testInst.meta.data.columns
def test_inst_assign_from_meta_w_ho_then_update(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta()
meta['dummy_frame1'] = {'units': 'A'}
meta['dummy_frame2'] = {'desc': 'nothing'}
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'name': 'The Doors',
'meta': meta}
self.testInst['help2'] = self.testInst['help']
self.testInst.meta['help2'] = self.testInst.meta['help']
new_meta = self.testInst.meta['help2'].children
new_meta['dummy_frame1'] = {'units': 'Amps',
'desc': 'something',
'label': 'John Wick',
'axis': 'Reeves',
}
self.testInst.meta['help2'] = new_meta
self.testInst.meta['help2'] = {'label': 'The Doors Return'}
# print('yoyo: ', self.testInst.meta['help']['children']
# ['dummy_frame1', 'units'])
assert self.testInst.meta['help']['children']['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help2', 'name'] == 'The Doors'
assert self.testInst.meta['help2', 'label'] == 'The Doors Return'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help2']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help2']
assert 'dummy_frame1' in self.testInst.meta['help2']['children']
assert 'dummy_frame2' in self.testInst.meta['help2']['children']
assert self.testInst.meta['help2']['children'].has_attr('units')
assert self.testInst.meta['help2']['children'].has_attr('desc')
assert self.testInst.meta['help2']['children']['dummy_frame1',
'desc'] == 'something'
assert self.testInst.meta['help2']['children']['dummy_frame2',
'desc'] == 'nothing'
assert self.testInst.meta['help2']['children']['dummy_frame1',
'units'] == 'Amps'
assert self.testInst.meta['help2']['children']['dummy_frame1',
'label'] == 'John Wick'
assert self.testInst.meta['help2']['children']['dummy_frame1',
'axis'] == 'Reeves'
assert 'children' not in self.testInst.meta.data.columns
def test_repr_call_runs(self):
self.testInst.meta['hi'] = {'units': 'yoyo', 'long_name': 'hello'}
print(self.testInst.meta)
assert True
def test_repr_call_runs_with_higher_order_data(self):
self.meta['param1'] = {'units': 'blank', 'long_name': u'parameter1',
'custom1': 14, 'custom2': np.NaN,
'custom3': 14.5, 'custom4': u'hello'}
self.testInst.meta['param0'] = {'units': 'basic',
'long_name': 'parameter0',
self.testInst.meta.fill_label: '10',
'CUSTOM4': 143}
self.testInst.meta['kiwi'] = self.meta
print(self.testInst.meta)
assert True
def test_basic_pops(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew',
'value_min': 0, 'value_max': 1}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo', 'fill': 1,
'value_min': 0, 'value_max': 1}
# create then assign higher order meta data
meta2 = pysat.Meta(name_label='long_name')
meta2['new31'] = {'units': 'hey3', 'long_name': 'crew_brew', 'fill': 1,
'value_min': 0, 'value_max': 1}
self.meta['new3'] = meta2
aa = self.meta.pop('new3')
assert np.all(aa['children'] == meta2)
# ensure lower metadata created when ho data assigned
assert aa['units'] == ''
assert aa['long_name'] == 'new3'
m1 = self.meta['new2']
m2 = self.meta.pop('new2')
assert m1['children'] is None
assert m2['children'] is None
for key in m1.index:
if key not in ['children']:
assert m1[key] == m2[key]
# make sure both have the same indexes
assert np.all(m1.index == m2.index)
@raises(KeyError)
def test_basic_pops_w_bad_key(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew',
'value_min': 0, 'value_max': 1}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo', 'fill': 1,
'value_min': 0, 'value_max': 1}
_ = self.meta.pop('new4')
@raises(KeyError)
def test_basic_getitem_w_bad_key_string(self):
self.meta['new4']
@raises(NotImplementedError)
def test_basic_getitem_w_integer(self):
self.meta[1]
def test_basic_equality(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo', 'fill': np.NaN}
# ensure things are the same
meta2 = self.meta.copy()
assert (meta2 == self.meta)
# different way to create meta object
meta3 = pysat.Meta()
meta3['new1'] = self.meta['new1']
meta3['new2'] = self.meta['new2']
assert (meta3 == self.meta)
# make sure differences matter
self.meta['new2'] = {'fill': 1}
assert not (meta2 == self.meta)
def test_basic_concat(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new3'] = {'units': 'hey3', 'long_name': 'crew_brew'}
self.meta = self.meta.concat(meta2)
assert (self.meta['new3'].units == 'hey3')
@raises(RuntimeError)
def test_concat_w_name_collision_strict(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new2'] = {'units': 'hey2', 'long_name': 'crew_brew'}
meta2['new3'] = {'units': 'hey3', 'long_name': 'crew_brew'}
self.meta = self.meta.concat(meta2, strict=True)
def test_basic_concat_w_ho(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new3'] = {'units': 'hey3', 'long_name': 'crew_brew'}
meta3 = pysat.Meta()
meta3['new41'] = {'units': 'hey4', 'long_name': 'crew_brew',
'bob_level': 'max'}
meta2['new4'] = meta3
self.meta = self.meta.concat(meta2)
assert (self.meta['new3'].units == 'hey3')
assert (self.meta['new4'].children['new41'].units == 'hey4')
@raises(RuntimeError)
def test_basic_concat_w_ho_collision_strict(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new31'] = {'units': 'hey3', 'long_name': 'crew_brew'}
self.meta['new3'] = meta2
meta3 = pysat.Meta()
meta3['new31'] = {'units': 'hey4', 'long_name': 'crew_brew',
'bob_level': 'max'}
meta2['new3'] = meta3
self.meta = self.meta.concat(meta2, strict=True)
def test_basic_concat_w_ho_collision_not_strict(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new3'] = {'units': 'hey3', 'long_name': 'crew_brew'}
meta3 = pysat.Meta()
meta3['new41'] = {'units': 'hey4', 'long_name': 'crew_brew',
'bob_level': 'max'}
meta2['new3'] = meta3
self.meta = self.meta.concat(meta2, strict=False)
assert self.meta['new3'].children['new41'].units == 'hey4'
assert self.meta['new3'].children['new41'].bob_level == 'max'
assert self.meta['new2'].units == 'hey'
def test_basic_concat_w_ho_collisions_not_strict(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new31'] = {'units': 'hey3', 'long_name': 'crew_brew'}
self.meta['new3'] = meta2
meta3 = pysat.Meta()
meta3['new31'] = {'units': 'hey4', 'long_name': 'crew_brew',
'bob_level': 'max'}
meta2['new3'] = meta3
self.meta = self.meta.concat(meta2, strict=False)
assert self.meta['new3'].children['new31'].units == 'hey4'
assert self.meta['new3'].children['new31'].bob_level == 'max'
assert self.meta['new2'].units == 'hey'
def test_basic_meta_assignment(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'boo')
def test_basic_meta_assignment_w_Series(self):
self.meta['new'] = pds.Series({'units': 'hey', 'long_name': 'boo'})
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'boo')
def test_multiple_meta_assignment(self):
self.meta[['new', 'new2']] = {'units': ['hey', 'hey2'],
'long_name': ['boo', 'boo2']}
assert self.meta['new'].units == 'hey'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new2'].units == 'hey2'
assert self.meta['new2'].long_name == 'boo2'
def test_multiple_meta_retrieval(self):
self.meta[['new', 'new2']] = {'units': ['hey', 'hey2'],
'long_name': ['boo', 'boo2']}
self.meta[['new', 'new2']]
self.meta[['new', 'new2'],:]
self.meta[:, 'units']
self.meta['new',('units','long_name')]
def test_multiple_meta_ho_data_retrieval(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta[['higher', 'lower']] = {'meta': [meta, None],
'units': [None, 'boo'],
'long_name': [None, 'boohoo']}
assert self.meta['lower'].units == 'boo'
assert self.meta['lower'].long_name == 'boohoo'
assert self.meta['higher'].children == meta
self.meta['higher',('axis','scale')]
@raises(ValueError)
def test_multiple_meta_assignment_error(self):
self.meta[['new', 'new2']] = {'units': ['hey', 'hey2'],
'long_name': ['boo']}
def test_replace_meta_units(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new'] = {'units': 'yep'}
assert (self.meta['new'].units == 'yep')
assert (self.meta['new'].long_name == 'boo')
def test_replace_meta_long_name(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new'] = {'long_name': 'yep'}
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'yep')
def test_add_additional_metadata_types(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'boo')
assert (self.meta['new'].description == 'boohoo')
def test_add_meta_then_add_additional_metadata_types(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'crew'}
self.meta['new'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
assert self.meta['new'].units == 'hey'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new'].description == 'boohoo'
def test_add_meta_with_custom_then_add_additional_metadata_types(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'crew',
'description': 'boohoo'}
self.meta['new'] = {'units': 'hey2', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'heyy', 'long_name': 'hoo'}
self.meta['new3'] = {'units': 'hey3', 'long_name': 'crew3',
'description': 'boohoo3'}
assert self.meta['new'].units == 'hey2'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new'].description == 'boohoo'
assert self.meta['new3'].description == 'boohoo3'
assert self.meta['new2'].long_name == 'hoo'
def test_add_meta_then_add_different_additional_metadata_types(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
assert self.meta['new2'].units == 'hey'
assert self.meta['new2'].long_name == 'boo'
assert self.meta['new2'].description == 'boohoo'
assert self.meta['new1'].units == 'hey1'
assert self.meta['new1'].long_name == 'crew'
assert np.isnan(self.meta['new1'].description)
def test_add_meta_then_partially_add_additional_metadata_types(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'crew'}
self.meta['new'] = {'long_name': 'boo', 'description': 'boohoo'}
assert self.meta['new'].units == 'hey'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new'].description == 'boohoo'
def test_meta_equality(self):
assert self.testInst.meta == self.testInst.meta
def test_false_meta_equality(self):
assert not (self.testInst.meta == self.testInst)
def test_equality_with_higher_order_meta(self):
self.meta = pysat.Meta()
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
meta2 = pysat.Meta()
meta2['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta2['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
meta3 = pysat.Meta()
meta3['higher'] = meta2
assert meta3 == self.meta
assert self.meta == meta3
def test_inequality_with_higher_order_meta(self):
self.meta = pysat.Meta()
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo', 'radn': 'raiden'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
meta2 = pysat.Meta()
meta2['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta2['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
meta3 = pysat.Meta()
meta3['higher'] = meta2
assert not (meta3 == self.meta)
assert not (self.meta == meta3)
def test_inequality_with_higher_order_meta2(self):
self.meta = pysat.Meta()
meta = pysat.Meta()
meta['dm'] = {'units': 'hey2', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
meta2 = pysat.Meta()
meta2['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta2['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
meta3 = pysat.Meta()
meta3['higher'] = meta2
assert not (meta3 == self.meta)
assert not (self.meta == meta3)
def test_inequality_with_higher_order_meta3(self):
self.meta = pysat.Meta()
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
self.meta['lower'] = {'units': 'yoyooy'}
meta2 = pysat.Meta()
meta2['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta2['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
meta3 = pysat.Meta()
meta3['higher'] = meta2
assert not (meta3 == self.meta)
assert not (self.meta == meta3)
def test_assign_higher_order_meta(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
def test_assign_higher_order_meta_from_dict(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = {'meta': meta}
def test_assign_higher_order_meta_from_dict_correct(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = {'meta': meta}
assert self.meta['higher'].children == meta
def test_assign_higher_order_meta_from_dict_w_multiple(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta[['higher', 'lower']] = {'meta': [meta, None],
'units': [None, 'boo'],
'long_name': [None, 'boohoo']}
assert self.meta['lower'].units == 'boo'
assert self.meta['lower'].long_name == 'boohoo'
assert self.meta['higher'].children == meta
def test_assign_higher_order_meta_from_dict_w_multiple_2(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta[['higher', 'lower', 'lower2']] = \
{'meta': [meta, None, meta],
'units': [None, 'boo', None],
'long_name': [None, 'boohoo', None]}
assert self.meta['lower'].units == 'boo'
assert self.meta['lower'].long_name == 'boohoo'
assert self.meta['higher'].children == meta
def test_create_new_metadata_from_old(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta[['higher', 'lower', 'lower2']] = \
{'meta': [meta, None, meta],
'units': [None, 'boo', None],
'long_name': [None, 'boohoo', None],
'fill': [1, 1, 1],
'value_min': [0, 0, 0],
'value_max': [1, 1, 1]}
meta2 = pysat.Meta(metadata=self.meta.data)
m1 = meta2['lower']
m2 = self.meta['lower']
assert m1['children'] is None
assert m2['children'] is None
for key in m1.index:
if key not in ['children']:
assert m1[key] == m2[key]
# make sure both have the same indexes
assert np.all(m1.index == m2.index)
# command below doesn't work because 'children' is None
# assert np.all(meta2['lower'] == self.meta['lower'])
def test_replace_meta_units_list(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta[['new2', 'new']] = {'units': ['yeppers', 'yep']}
assert self.meta['new'].units == 'yep'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new2'].units == 'yeppers'
assert self.meta['new2'].long_name == 'boo2'
def test_meta_repr_functions(self):
self.testInst.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.testInst.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
print(self.testInst.meta)
# if it doesn't produce an error, we presume it works
# how do you test a print??
assert True
def test_meta_csv_load(self):
import os
name = os.path.join(pysat.__path__[0], 'tests', 'cindi_ivm_meta.txt')
mdata = pysat.Meta.from_csv(name=name, na_values=[], # index_col=2,
keep_default_na=False,
col_names=['name', 'long_name', 'idx',
'units', 'description'])
check = []
# print(mdata['yrdoy'])
check.append(mdata['yrdoy'].long_name == 'Date')
check.append(mdata['unit_mer_z'].long_name ==
'Unit Vector - Meridional Dir - S/C z')
check.append(mdata['iv_mer'].description ==
'Constructed using IGRF mag field.')
assert np.all(check)
# assign multiple values to default
def test_multiple_input_names_null_value(self):
self.meta[['test1', 'test2']] = {}
check1 = self.meta['test1', 'units'] == ''
check2 = self.meta['test2', 'long_name'] == 'test2'
assert check1 & check2
def test_multiple_input_names_null_value_preexisting_values(self):
self.meta[['test1', 'test2']] = {'units': ['degrees', 'hams'],
'long_name': ['testing', 'further']}
self.meta[['test1', 'test2']] = {}
check1 = self.meta['test1', 'units'] == 'degrees'
check2 = self.meta['test2', 'long_name'] == 'further'
assert check1 & check2
# test behaviors related to case changes, 'units' vs 'Units'
def test_assign_Units(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
assert ((self.meta['new'].Units == 'hey') &
(self.meta['new'].Long_Name == 'boo') &
(self.meta['new2'].Units == 'hey2') &
(self.meta['new2'].Long_Name == 'boo2'))
@raises(AttributeError)
def test_assign_Units_no_units(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
self.meta['new'].units
def test_get_Units_wrong_case(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
assert ((self.meta['new', 'units'] == 'hey') &
(self.meta['new', 'long_name'] == 'boo') &
(self.meta['new2', 'units'] == 'hey2') &
(self.meta['new2', 'long_name'] == 'boo2'))
def test_set_Units_wrong_case(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
assert self.meta['new'].Units == 'hey'
assert self.meta['new'].Long_Name == 'boo'
assert self.meta['new2'].Units == 'hey2'
assert self.meta['new2'].Long_Name == 'boo2'
def test_repeated_set_Units_wrong_case(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
for i in np.arange(10):
self.meta['new'] = {'units': 'hey%d' % i, 'long_name': 'boo%d' % i}
self.meta['new_%d' % i] = {'units': 'hey%d' % i,
'long_name': 'boo%d' % i}
for i in np.arange(10):
self.meta['new_5'] = {'units': 'hey%d' % i,
'long_name': 'boo%d' % i}
self.meta['new_%d' % i] = {'units': 'heyhey%d' % i,
'long_name': 'booboo%d' % i}
assert self.meta['new'].Units == 'hey9'
assert self.meta['new'].Long_Name == 'boo9'
assert self.meta['new_9'].Units == 'heyhey9'
assert self.meta['new_9'].Long_Name == 'booboo9'
assert self.meta['new_5'].Units == 'hey9'
assert self.meta['new_5'].Long_Name == 'boo9'
def test_change_Units_and_Name_case(self):
self.meta = pysat.Meta(units_label='units', name_label='long_name')
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta.units_label = 'Units'
self.meta.name_label = 'Long_Name'
assert ((self.meta['new'].Units == 'hey') &
(self.meta['new'].Long_Name == 'boo') &
(self.meta['new2'].Units == 'hey2') &
(self.meta['new2'].Long_Name == 'boo2'))
def test_change_Units_and_Name_case_w_ho(self):
self.meta = pysat.Meta(units_label='units', name_label='long_Name')
meta2 = pysat.Meta(units_label='units', name_label='long_Name')
meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = meta2
self.meta.units_label = 'Units'
self.meta.name_label = 'Long_Name'
assert ((self.meta['new'].Units == 'hey') &
(self.meta['new'].Long_Name == 'boo') &
(self.meta['new2'].children['new21'].Units == 'hey2') &
(self.meta['new2'].children['new21'].Long_Name == 'boo2'))
@raises(AttributeError)
def test_change_Units_and_Name_case_w_ho_wrong_case(self):
self.meta = pysat.Meta(units_label='units', name_label='long_Name')
meta2 = pysat.Meta(units_label='units', name_label='long_Name')
meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = meta2
self.meta.units_label = 'Units'
self.meta.name_label = 'Long_Name'
assert ((self.meta['new'].units == 'hey') &
(self.meta['new'].long_name == 'boo') &
(self.meta['new2'].children['new21'].units == 'hey2') &
(self.meta['new2'].children['new21'].long_name == 'boo2'))
def test_contains_case_insensitive(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
assert ('new2' in self.meta)
assert ('NEW2' in self.meta)
def test_contains_case_insensitive_w_ho(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta['new2'] = meta2
assert ('new2' in self.meta)
assert ('NEW2' in self.meta)
assert not ('new21' in self.meta)
assert not ('NEW21' in self.meta)
def test_get_variable_name_case_preservation(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['NEW2'] = {'units': 'hey2', 'long_name': 'boo2'}
assert ('NEW2' == self.meta.var_case_name('new2'))
assert ('NEW2' == self.meta.var_case_name('nEw2'))
assert ('NEW2' == self.meta.var_case_name('neW2'))
assert ('NEW2' == self.meta.var_case_name('NEW2'))
def test_get_attribute_name_case_preservation(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['NEW2'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['new'] = {'yoyoyo': 'YOLO'}
assert (self.meta['new', 'yoyoyo'] == 'YOLO')
assert (self.meta['new', 'YoYoYO'] == 'YOLO')
assert (self.meta['new2', 'yoyoyo'] == 'yolo')
assert (self.meta['new2', 'YoYoYO'] == 'yolo')
def test_get_attribute_name_case_preservation_w_higher_order(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['NEW2'] = meta2
self.meta['new'] = {'yoyoyo': 'YOLO'}
assert (self.meta.attr_case_name('YoYoYo') == 'YoYoYO')
assert (self.meta['new', 'yoyoyo'] == 'YOLO')
assert (self.meta['new', 'YoYoYO'] == 'YOLO')
assert (self.meta['new2'].children['new21', 'yoyoyo'] == 'yolo')
assert (self.meta['new2'].children['new21', 'YoYoYO'] == 'yolo')
assert (self.meta['new2'].children.attr_case_name('YoYoYo') ==
'YoYoYO')
def test_get_attribute_name_case_preservation_w_higher_order_2(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['NEW2'] = meta2
self.meta['NEW'] = {'yoyoyo': 'YOLO'}
assert (self.meta.attr_case_name('YoYoYo') == 'YoYoYO')
assert (self.meta['new', 'yoyoyo'] == 'YOLO')
assert (self.meta['NEW', 'YoYoYO'] == 'YOLO')
assert (self.meta['new2'].children['new21', 'yoyoyo'] == 'yolo')
assert (self.meta['new2'].children['new21', 'YoYoYO'] == 'yolo')
assert (self.meta['new2'].children.attr_case_name('YoYoYo') ==
'YoYoYO')
def test_get_attribute_name_case_preservation_w_higher_order_reverse_order(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['new'] = {'yoyoyo': 'YOLO'}
self.meta['NEW2'] = meta2
assert (self.meta.attr_case_name('YoYoYo') == 'yoyoyo')
assert (self.meta['new', 'yoyoyo'] == 'YOLO')
assert (self.meta['new', 'YoYoYO'] == 'YOLO')
assert (self.meta['new2'].children['new21', 'yoyoyo'] == 'yolo')
assert (self.meta['new2'].children['new21', 'YoYoYO'] == 'yolo')
assert (self.meta['new2'].children.attr_case_name('YoYoYo') ==
'yoyoyo')
def test_has_attr_name_case_preservation_w_higher_order_reverse_order(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['new'] = {'yoyoyo': 'YOLO'}
self.meta['NEW2'] = meta2
assert (self.meta.has_attr('YoYoYo'))
assert (self.meta.has_attr('yoyoyo'))
assert not (self.meta.has_attr('YoYoYyo'))
def test_has_attr_name_case_preservation_w_higher_order(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['NEW2'] = meta2
assert not (self.meta.has_attr('YoYoYo'))
assert not (self.meta.has_attr('yoyoyo'))
assert not (self.meta.has_attr('YoYoYyo'))
# check support on case preservation, but case insensitive
def test_replace_meta_units_list_weird_case(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta[['NEW2', 'new']] = {'units': ['yeppers', 'yep']}
assert (self.meta['new'].units == 'yep')
assert (self.meta['new'].long_name == 'boo')
assert (self.meta['new2'].units == 'yeppers')
assert (self.meta['new2'].long_name == 'boo2')
# Test the attribute transfer function
def test_transfer_attributes_to_instrument(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
assert self.testInst.new_attribute == 'hello'
# ensure leading hyphens are dropped
@raises(AttributeError)
def test_transfer_attributes_to_instrument_leading_(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
self.testInst._yo_yo == 'yo yo'
# ensure leading hyphens are dropped
@raises(AttributeError)
def test_transfer_attributes_to_instrument_leading__(self):
self.meta.new_attribute = 'hello'
self.meta.__yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
self.testInst.__yo_yo == 'yo yo'
@raises(RuntimeError)
def test_transfer_attributes_to_instrument_strict_names(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.jojo_beans = 'yep!'
self.meta.name = 'Failure!'
self.meta.date = 'yo yo2'
self.testInst.load(2009, 1)
self.testInst.jojo_beans = 'nope!'
self.meta.transfer_attributes_to_instrument(self.testInst,
strict_names=True)
def test_merge_meta(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta.merge(meta2)
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'boo')
assert (self.meta['NEW21'].units == 'hey2')
assert (self.meta['NEW21'].long_name == 'boo2')
assert (self.meta['NEW21'].YoYoYO == 'yolo')
def test_drop_meta(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta.drop(['new'])
assert not ('new' in self.meta.data.index)
assert (self.meta['NEW21'].units == 'hey2')
assert (self.meta['NEW21'].long_name == 'boo2')
assert (self.meta['NEW21'].YoYoYO == 'yolo')
def test_keep_meta(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta.keep(['new21'])
assert not ('new' in self.meta.data.index)
assert (self.meta['NEW21'].units == 'hey2')
assert (self.meta['NEW21'].long_name == 'boo2')
assert (self.meta['NEW21'].YoYoYO == 'yolo')
|
bsd-3-clause
|
Canas/kaftools
|
examples/klms_bicycle.py
|
1
|
2703
|
from scipy.io import loadmat
import matplotlib.pyplot as plt
from kaftools.filters import KlmsFilter
from kaftools.kernels import GaussianKernel
if __name__ == "__main__":
# Cargar datos
mat = loadmat("data/bicycle_data.mat")
y_noise = mat['y_noise'][0] # voltage signal
# Configurar KLMS
klms_params = {
'kernel': GaussianKernel(sigma=10),
'learning_rate': 5e-4,
'delay': 5,
}
klms = KlmsFilter(y_noise, y_noise)
klms.fit(**klms_params)
# Graficar resultados
# Transient
fsize = 26
fig = plt.figure(101, figsize=(16, 4))
ax = plt.gca()
plt.plot(klms.estimate, 'b', label='KLMS-X predictions', linewidth=4)
plt.plot(y_noise, color='red', marker='.', linestyle='None', ms=8, label='true voltage measurements')
leg = plt.legend(ncol=4, frameon=False, shadow=True, loc=9, prop={'size': fsize})
frame = leg.get_frame()
frame.set_facecolor('0.9')
plt.xlabel('time [sample]', size=fsize)
plt.ylabel('voltage', size=fsize)
plt.axis('tight')
plt.rc('xtick', labelsize=fsize)
plt.rc('ytick', labelsize=fsize)
plt.title('One-step-ahead prediction of voltage signal (KLMS-X)', size=fsize) # ,weight='bold')
plt.show()
# Steady-Transient
fsize = 26
fig = plt.figure(101, figsize=(16, 4))
ax = plt.gca()
plt.plot(klms.estimate, 'b', label='KLMS-X predictions', linewidth=4)
plt.plot(y_noise, color='red', marker='.', linestyle='None', ms=8, label='true voltage measurements')
leg = plt.legend(ncol=4, frameon=False, shadow=True, loc=9, prop={'size': fsize})
frame = leg.get_frame()
frame.set_facecolor('0.9')
plt.xlabel('time [sample]', size=fsize)
plt.ylabel('voltage', size=fsize)
plt.rc('xtick', labelsize=fsize)
plt.rc('ytick', labelsize=fsize)
plt.title('KLMS-X prediction: transition to steady-state region', size=fsize) # ,weight='bold')
plt.axis([750, 1250, 0, 1.6])
plt.show()
# Steady
fsize = 26
fig = plt.figure(101, figsize=(16, 4))
ax = plt.gca()
plt.plot(klms.estimate, 'b', label='KLMS-X predictions', linewidth=4)
plt.plot(y_noise, color='red', marker='.', linestyle='None', ms=8, label='true voltage measurements')
leg = plt.legend(ncol=4, frameon=False, shadow=True, loc=9, prop={'size': fsize})
frame = leg.get_frame()
frame.set_facecolor('0.9')
plt.xlabel('time [sample]', size=fsize)
plt.ylabel('voltage', size=fsize)
plt.axis('tight')
plt.rc('xtick', labelsize=fsize)
plt.rc('ytick', labelsize=fsize)
plt.title('KLMS-X prediction: steady-state region', size=fsize) # ,weight='bold')
plt.axis([2000, 2500, -0.9, 0.2])
plt.show()
|
mit
|
rahuldhote/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
105
|
22788
|
import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
|
bsd-3-clause
|
mikemoorester/ESM
|
esm.py
|
1
|
65733
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import re
import gzip
import calendar
import os, sys
import datetime as dt
from scipy import interpolate
from scipy.stats.stats import nanmean, nanmedian, nanstd
#from scipy import sparse
from scipy import stats
import antenna as ant
import residuals as res
import gpsTime as gt
import GamitStationFile as gsf
import svnav
def loglikelihood(meas,model):
#def loglikelihood(meas,model,sd):
# Calculate negative log likelihood
#LL = -np.sum( stats.norm.logpdf(meas, loc=model, scale=sd ) )
n2 = np.size(meas)/2.
tmp = np.subtract(meas,model)
SSR = np.dot(tmp.T,tmp)
LL = -np.log(SSR)*n2
LL -= (1+np.log(np.pi/n2) )*n2
return LL
def calcAIC(llh,dof):
aic = -2.*llh + 2.*(dof)
return aic
# small number of observations
def calcAICC(llh,dof,numObs):
return -2. * llh + 2. * dof * numObs / (numObs - dof - 1.)
def calcBIC(llh,dof,numObs):
bic = -2.*llh + np.log(numObs) * dof
return bic
def reject_outliers(data, m=5):
return data[abs(data - np.mean(data)) < m * np.std(data)]
def reject_abs(data, val):
return data[abs(data) < val]
def reject_outliers_arg(data,nSigma):
"""
Do a simple outlier removal at 3 sigma, with two passes over the data
"""
criterion = ( (data[:] < (data[:].mean() + data[:].std() * nSigma)) &
(data[:] > (data[:].mean() - data[:].std() * nSigma)) )
ind = np.array(np.where(criterion))[0]
return ind
def reject_outliers_byelevation_arg(data,nSigma,zenSpacing=0.5):
zen = np.linspace(0,90,int(90./zenSpacing)+1)
tmp = []
for z in zen:
criterion = ( (data[:,2] < (z + zenSpacing/2.)) &
(data[:,2] > (z - zenSpacing/2.)) )
ind = np.array(np.where(criterion))[0]
rout = reject_outliers_arg(data[ind,3],nSigma)
tmp.append(rout.tolist())
return tmp
def reject_outliers_elevation(data,nSigma,zenSpacing=0.5):
zen = np.linspace(0,90,int(90./zenSpacing)+1)
for z in zen:
criterion = ( (data[:,0] < (z + zenSpacing/2.)) &
(data[:,0] > (z - zenSpacing/2.)) )
ind = np.array(np.where(criterion))
ind = ind.reshape((np.size(ind),))
tdata = np.zeros((np.size(ind),3))
tdata = data[ind,:]
din = data[ind,2].reshape((np.size(ind),))
rout = reject_outliers_arg(din,5)
# if its the first interation initialise tmp
if z < zenSpacing:
tmp = tdata[rout,:]
else:
tmp = np.vstack((tmp,tdata[rout,:]))
return tmp
def blockMedian(data, azSpacing=0.5,zenSpacing=0.5):
"""
bMedian = blockMedian(residuals,args, idSpacing)
where,
residuals => f
gridSpacing => float ie every 0.5 degrees
output:
bType 1 => bMedian is a 2d matrix of shape [nAz,nZd]
bMedian = blockMedian('./MOBS_DPH_2012_001_143.all',0.5)
bMedian.shape => 721,181
"""
az = np.linspace(0,360.,int(360./azSpacing)+1)
zz = np.linspace(0,90,int(90./zenSpacing)+1)
azCtr = 0
iCtr = 0
bMedian = np.zeros((az.size,zz.size))
bMedianStd = np.zeros((az.size,zz.size))
for i in az:
if(i - azSpacing/2. < 0) :
criterion = (data[:,0] < (i + azSpacing/2.)) | (data[:,0] > (360. - azSpacing/2.) )
else:
criterion = (data[:,0] < (i + azSpacing/2.)) & (data[:,0] > (i - azSpacing/2.) )
ind = np.array(np.where(criterion))[0]
#print("Azimuth,ind:",np.shape(ind),ind)
jCtr = 0
if ind.size == 0:
for j in zz :
bMedian[iCtr,jCtr] = float('NaN')
jCtr += 1
else:
tmp = data[ind,:]
for j in zz:
# disregard any observation above 80 in zenith, too noisy
if j >= 80:
bMedian[iCtr,jCtr] = float('NaN')
else:
criterion = (tmp[:,1] < (j + zenSpacing/2.)) & (tmp[:,1] > (j - zenSpacing/2.) )
indZ = np.array(np.where(criterion))[0]
if indZ.size > 3 :
bMedian[iCtr,jCtr] = nanmedian( reject_outliers(reject_abs( tmp[indZ,2],70. ),5.))
bMedianStd[iCtr,jCtr] = nanstd(reject_outliers(reject_abs( tmp[indZ,2],70. ),5.))
else:
bMedian[iCtr,jCtr] = float('NaN')
jCtr += 1
iCtr += 1
return bMedian, bMedianStd
def interpolate_eleMean(model):
""" Salim --- When the residuals are NaN, replace them with the mean of
# the all the data at the same elevation
"""
# Get mean of columns (data at the same elevation) without taking int account NaNs
el_mean = nanmean(model,axis=0)
#print(el_mean)
# Find indices for NaNs, and replace them by the column mean
ind_nan = np.where(np.isnan(model))
model[ind_nan] = np.take(el_mean,ind_nan[1])
return model
def modelStats(model,data, azSpacing=0.5,zenSpacing=0.5):
"""
bMedian = blockMedian(residuals,gridSpacing)
where,
residuals => f
gridSpacing => float ie every 0.5 degrees
output:
bType 1 => bMedian is a 2d matrix of shape [nAz,nZd]
Example:
bMedian = blockMedian('./MOBS_DPH_2012_001_143.all',0.5)
bMedian.shape => 721,181
"""
az = np.linspace(0,360.,int(360./azSpacing)+1)
zz = np.linspace(0,90,int(90./zenSpacing)+1)
azCtr = 0
iCtr = 0
chi = 0
SS_tot = 0
SS_res = 0
SS_reg = 0
test_ctr = 0
reg_ctr = 0
for i in az:
if(i - azSpacing/2. < 0) :
criterion = (data[:,0] < (i + azSpacing/2.)) | (data[:,0] > (360. - azSpacing/2.) )
else:
criterion = (data[:,0] < (i + azSpacing/2.)) & (data[:,0] > (i - azSpacing/2.) )
ind = np.array(np.where(criterion))
if ind.size > 0:
tmp = data[ind,:]
tmp = tmp.reshape(tmp.shape[1],tmp.shape[2])
jCtr = 0
for j in zz:
# disregard any observation above 80 in zenith, too noisy
if j < 80. + zenSpacing:
criterion = (tmp[:,1] < (j + zenSpacing/2.)) & (tmp[:,1] > (j - zenSpacing/2.) )
tmpZ = np.array( tmp[indZ[:],2] )
if indZ.size > 3 and not np.isnan(model[iCtr,jCtr]): # and (model[iCtr,jCtr] > 0.00001 or model[iCtr,jCtr] < -0.00001):
test_data = reject_outliers(reject_abs( tmp[indZ,2],70. ),5.)
#print(i,j,test_data,model[iCtr,jCtr])
if test_data.size > 0:
y_mean = np.mean(test_data)
SS_reg += (model[iCtr,jCtr] - y_mean)**2
reg_ctr += 1
for obs in test_data:
#chi += (obs - model[iCtr,jCtr]) ** 2 / model[iCtr,jCtr]
SS_tot += (obs - y_mean) ** 2
SS_res += (obs - model[iCtr,jCtr])**2
test_ctr += 1
jCtr += 1
iCtr += 1
rr = 1. - SS_res/SS_tot
rms = np.sqrt(SS_res) * 1./test_ctr
# gives an indication of how different the models would be between the test and training data set
rms2 = np.sqrt(SS_reg) * 1./reg_ctr
# Used in matlab instead of rr
norm_res = np.sqrt(SS_res)
mse = 1./(2.*test_ctr) * SS_res
return mse,rr,rms,rms2
# calculate the cost function, that is the Mean Square Error
def calcMSE(model,data,azGridSpacing=0.5,zenGridSpacing=0.5):
mse = 0
az = np.linspace(0,360, int(360./azGridSpacing) )
zen = np.linspace(0,90, int(90./zenGridSpacing)+1 )
model = np.nan_to_num(model)
model_test = interpolate.interp2d(az, zen, model.reshape(az.size * zen.size,), kind='linear')
for i in range(0,np.shape(data)[0]):
mse += (data[i,3] - model_test(data[i,1],data[i,2]))[0]**2
mse = 1./(2.*np.shape(data)[0]) * mse
return mse
def setPlotFontSize(ax,fsize):
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fsize)
return ax
def plotModel(model,figname):
az = np.linspace(0, 360, np.shape(model)[0])
zz = np.linspace(0, 90, np.shape(model)[1])
plt.ioff()
# fig = plt.figure(figsize=(3.62, 2.76))
#ax = fig.add_subplot(111,polar=True)
ax = plt.subplot(111,polar=True)
ax.set_theta_direction(-1)
ax.set_theta_offset(np.radians(90.))
ax.set_ylim([0,1])
ax.set_rgrids((0.00001, np.radians(20)/np.pi*2, np.radians(40)/np.pi*2,np.radians(60)/np.pi*2,np.radians(80)/np.pi*2),labels=('0', '20', '40', '60', '80'),angle=180)
ma,mz = np.meshgrid(az,zz,indexing='ij')
ma = ma.reshape(ma.size,)
mz = mz.reshape(mz.size,)
polar = ax.scatter(np.radians(ma), np.radians(mz)/np.pi*2., c=model[:,:], s=50, alpha=1., cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
cbar = plt.colorbar(polar,shrink=0.75,pad=.10)
cbar.ax.tick_params(labelsize=8)
cbar.set_label('ESM (mm)',size=8)
ax = setPlotFontSize(ax,8)
plt.tight_layout()
plt.savefig(figname)
plt.close()
#plt.show()
return
def plotModelElevationSlices(model,figname):
zz = np.linspace(0, 90, np.shape(model)[1])
ele = 90. - zz[::-1]
zenRes = model[0,:]
eleRes = zenRes[::-1]
plt.ioff()
ax = plt.subplot(111)
for i in range(0,np.shape(model)[0]):
#print("Plotting Slice:",i)
zenRes = model[i,:]
eleRes = zenRes[::-1]
ax.plot(ele,eleRes)
#ax.set_ylim([0,1])
ax = setPlotFontSize(ax,8)
plt.tight_layout()
plt.savefig(figname)
plt.close()
return
def plotModelsElevationSlices(models,figname):
zz = np.linspace(0, 90, np.shape(models)[2])
ele = 90. - zz[::-1]
plt.ioff()
ax = plt.subplot(111)
for i in range(0,np.shape(models)[1]):
#print("Plotting Slice:",i)
for j in range(0,np.shape(models)[0]):
print("From Model segment:",j)
zenRes = models[j,i,:]
eleRes = zenRes[::-1]
ax.plot(ele,eleRes)
ax = setPlotFontSize(ax,8)
plt.tight_layout()
plt.savefig(figname)
plt.close()
return
def plotModelsElevationSlices3D(models,figname):
az = np.linspace(0, 360, np.shape(models)[1])
zz = np.linspace(0, 90, np.shape(models)[2])
ele = 90. - zz[::-1]
dd = range(0,np.shape(models)[1])
mz,md = np.meshgrid(zz,dd,indexing='ij')
md = md.reshape(md.size,)
mz = mz.reshape(mz.size,)
ax = plt.subplot(111,projection='3d')
for i in range(0,np.shape(models)[1]):
for j in range(0,np.shape(models)[0]):
zenRes = models[j,i,:]
eleRes = zenRes[::-1]
blah = np.ones(np.size(zenRes))*j
ax.plot(ele,blah,eleRes)
ax = setPlotFontSize(ax,8)
plt.tight_layout()
plt.savefig(figname)
plt.close()
return
def print_antex_file_header(f):
print(' 1.4 M ANTEX VERSION / SYST',file=f)
print('A PCV TYPE / REFANT',file=f)
print(' END OF HEADER',file=f)
return
# TODO fix gridspacing to be dynamc
def print_antex_header(antType,valid_from,valid_to,f):
"""
print_antex_header antenna name
grid_spacing
"""
f.write(" START OF ANTENNA\n")
f.write("{:<20s} TYPE / SERIAL NO\n".format(antType))
f.write("CALCULATED ANU 0 25-MAR-11 METH / BY / # / DATE\n")
f.write(" 0.5 DAZI\n")
f.write(" 0.0 90.0 0.5 ZEN1 / ZEN2 / DZEN\n")
f.write(" 2 # OF FREQUENCIES\n")
# valid_from is a dto (datetime object
yyyy, MM, dd, hh, mm, ss, ms = gt.dt2validFrom(valid_from)
# force seconds to 0.00 for valid from
f.write("{:>6s} {:>5s} {:>5s} {:>5s} {:>5s} 0.0000000 VALID FROM\n".format(yyyy,MM,dd,hh,mm))
yyyy, MM, dd, hh, mm, ss, ms = gt.dt2validFrom(valid_to)
hh = str(23)
mm = str(59)
f.write("{:>6s} {:>5s} {:>5s} {:>5s} {:>5s} 59.9999999 VALID UNTIL\n".format(yyyy,MM,dd,hh,mm))
#
# Change the numbers after ANU to the same code as the previous antenna
#
f.write("ANU08_1648 SINEX CODE\n")
f.write("CALCULATED From MIT repro2 COMMENT\n")
return 1
def print_start_frequency(freq,pco,f):
f.write(" {:3s} START OF FREQUENCY\n".format(freq))
pco_n = "{:0.2f}".format(pco[0])
pco_n = "{:>10s}".format(pco_n)
pco_e = "{:0.2f}".format(pco[1])
pco_e = "{:>10s}".format(pco_e)
pco_u = "{:0.2f}".format(pco[2])
pco_u = "{:>10s}".format(pco_u)
f.write(pco_n+pco_e+pco_u+" NORTH / EAST / UP\n")
def print_antex_noazi(data,f):
noazi = "{:>8s}".format('NOAZI')
for d in data:
d = "{:>8.2f}".format(d)
noazi = noazi + d
f.write(noazi)
f.write("\n")
def print_antex_line(az,data,f):
az = "{:>8.1f}".format(az)
for d in data:
d = "{:>8.2f}".format(d)
az = az+d
f.write(az)
f.write("\n")
def print_end_frequency(freq,f):
f.write(" {:3s} END OF FREQUENCY\n".format(freq))
def print_end_antenna(f):
f.write(" END OF ANTENNA\n")
def create_esm(med,azGrid,zenGrid,antennas,antType):
# add the block median residuals to an interpolate PCV file...
# args.grid should come from the antenna data based on the grid spacing of the antex file
antenna = ant.antennaType(antType,antennas)
dzen = antenna['dzen'][2]
x = np.linspace(0,360, int(360./dzen)+1 )
y = np.linspace(0,90, int(90./dzen)+1 )
L1_data = np.array(antenna['data'][0])
L2_data = np.array(antenna['data'][1])
# check to see if it is an elevation only model..
# if it is then copy the elevation fields to all of the azimuth fields
if np.shape(L1_data)[0] == 1:
L1_tmp = np.zeros((np.size(x),np.size(y)))
L2_tmp = np.zeros((np.size(x),np.size(y)))
# often elevation only models only go down to 80 degrees in zenith
for j in range(0,np.size(y)):
if j >= np.shape(L1_data)[1] :
L1_tmp[:,j] = 0.
L2_tmp[:,j] = 0.
else:
L1_tmp[:,j] = L1_data[0,j]
L2_tmp[:,j] = L2_data[0,j]
del L1_data, L2_data
L1_data = L1_tmp
L2_data = L2_tmp
tmp = L1_data.reshape(x.size * y.size,)
L1 = interpolate.interp2d(x, y, tmp, kind='linear')
L2 = interpolate.interp2d(x, y, L2_data.reshape(x.size * y.size,), kind='linear')
x_esm = np.linspace(0,360, int(360./azGrid)+1 )
y_esm = np.linspace(0,90, int(90./zenGrid)+1 )
esm = np.zeros((x_esm.size,y_esm.size,2))
i = 0
for az in x_esm :
j = 0
#==========================================
# med is in elevation angle order
# need to reverse the med array around so that it is in zenith order
#==========================================
for zen in y_esm :
if med[i,j] > 0.00001 or med[i,j] < -0.00001 :
esm[i,j,0] = med[i,j] + L1(az,zen)[0]
esm[i,j,1] = med[i,j] + L2(az,zen)[0]
else:
esm[i,j,0] = L1(az,zen)[0]
esm[i,j,1] = L2(az,zen)[0]
j += 1
i += 1
# catch the case where the residuals have not been averaged twice at az = 0
# PWL vs block median
if i > np.shape(med)[0]-1:
i = 0
#================================================================
#print("test interpolation for az=>360 zenith =>90 => 13.67:")
#az = 360.
#zen = 90.
#print(L1(az,zen),L1(zen,az),L1(az,90.-zen),L1(90.-zen,az))
#print("test interpolation for az=>355 zenith =>85 => 8.65:")
#az = 355.
#zen = 85.
#print(L1(az,zen),L1(zen,az),L1(az,90.-zen),L1(90.-zen,az))
#print(L1(355,80),L1(355,82.5),L1(355,85),L1(355,86),L1(355,87),L1(355,88),L1(355,89),L1(355,90))
return esm
def traverse_directory(args) :
"""
traverse_directory(args)
Search through a specified GAMIT project to look for DPH residuals files
consolidate them into a compressed L3 format (.CL3), for analysis later.
"""
siteRGX = re.compile('DPH.'+args.site.upper())
s = []
# report non-unique residuals
for root, dirs, files in os.walk(args.traverse):
path = root.split('/')
for gamitFile in files:
if siteRGX.search(gamitFile):
gamitFile = root+'/'+gamitFile
#check for potential duplicates in the same path, only want to use one of the DOH files
if len(path[-1]) > 4:
regex = re.compile(root[:-2])
else:
regex = re.compile(root)
# only check for duplicates when there is more than one network
# being processed...
if args.network == 'yyyy_dddnN':
if len(s) == 0:
s.append(gamitFile)
else:
# for each element in s, check to see if the root path does not match
# any of the files already stored in the list
m = 0
for item in s:
if regex.search(item) :
m = 1
if not m :
s.append(gamitFile)
else:
s.append(gamitFile)
s.sort()
lines = ''
# Now loop through each file and consolidate the residuals
for dfile in s :
dphs = res.parseDPH(dfile)
# check if the dph files are being searched are from
#a GAMIT network of type yyyy/dddn?/
root, filename = os.path.split(dfile)
if args.network == 'yyyy_dddnN':
ddd = root[-5:-2]
year = int(root[-10:-6])
startDT = dt.datetime(year,01,01)
startDT = startDT + dt.timedelta(days=(int(ddd) -1))
elif args.network == 'ddd':
ddd = root[-3:]
year = root[-8:-4]
startDT = dt.datetime(int(year),01,01)
startDT = startDT + dt.timedelta(days=(int(ddd) -1))
line = res.consolidate(dphs,startDT)
lines = lines + line
# if its larger than 1GB dump it to a file
# this is designed to keep the load n the file system lighter
if sys.getsizeof(lines) > 1073741824 :
f = gzip.open(args.save_file,'a',9)
f.write(lines)
f.close()
lines = ''
#print(lines)
# dump any remaining memory to file
f = gzip.open(args.save_file,'a',9)
f.write(lines)
f.close()
lines = ''
return
def satelliteModel(antenna,nadirData):
#assuming a 14 model at 1 deg intervals
ctr = 0
newNoAzi = []
# from the Nadir model force the value at 13.8 to be equal to 14.0
for val in antenna['noazi'] :
if ctr == 13:
antenna['noazi'][ctr] = (val + nadirData[ctr*5 -1])
elif ctr > 13:
antenna['noazi'][ctr] = val
else:
antenna['noazi'][ctr] = val + nadirData[ctr*5]
ctr +=1
return antenna
def calcNadirAngle(ele):
"""
Calculate the NADIR angle based on the station's elevation angle
"""
nadeg = np.arcsin(6378.0/26378.0 * np.cos(ele/180.*np.pi)) * 180./np.pi
return nadeg
def applyNadirCorrection(svdat,nadirData,site_residuals):
"""
sr = applyNadirCorrection(nadirData,site_residuals)
Apply the nadir residual model to the carrier phase residuals.
nadirData is a dictionary of satellite corrections
nadirData['1'] = <array 70 elements> 0 to 13.8 degrees
"""
#print("Attempting to apply the corrections")
# form up a linear interpolater for each nadir model..
nadirAngles = np.linspace(0,13.8,70)
linearInt = {}
for svn in nadirData:
linearInt[svn] = interpolate.interp1d(nadirAngles ,nadirData[svn])
# slow method, can;t assume the PRN will be the same SV over time..
# can break residuals in daily chunks and then apply correction
for i in range(0,np.shape(site_residuals)[0]):
nadeg = calcNadirAngle(site_residuals[i,2])
if nadeg > 13.8:
nadeg = 13.8
dto = gt.unix2dt(site_residuals[i,0])
svn = svnav.findSV_DTO(svdat,int(site_residuals[i,4]),dto)
#print("Looking for svn:",svn, int(site_residuals[i,4]),nadeg)
site_residuals[i,3] = site_residuals[i,3] + linearInt[str(svn)](nadeg)
return site_residuals
#def krig(site_residuals):
def pwlFly(site_residuals, azSpacing=0.5,zenSpacing=0.5):
"""
PWL piece-wise-linear interpolation fit of phase residuals
-construct a PWL fit for each azimuth bin, and then paste them all together to get
the full model
-inversion is doen within each bin
cdata -> compressed data
"""
tdata = res.reject_absVal(site_residuals,100.)
del site_residuals
data = res.reject_outliers_elevation(tdata,5,0.5)
del tdata
numd = np.shape(data)[0]
numZD = int(90.0/zenSpacing) + 1
numAZ = int(360./zenSpacing)
pwl_All = np.zeros((numAZ,numZD))
pwlSig_All = np.zeros((numAZ,numZD))
Bvec_complete = []
Sol_complete = []
meas_complete = []
model_complete = []
postchis = []
prechis = []
aics = []
bics = []
#w = 1;
for j in range(0,numAZ):
# Find only those value within this azimuth bin:
if(j - azSpacing/2. < 0) :
criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )
else:
criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )
ind = np.array(np.where(criterion))[0]
azData =data[ind,:]
numd = np.shape(azData)[0]
#print("NUMD:",numd)
if numd < 2:
continue
#
# Neq is acting like a constrain on the model a small value 0.001
# let the model vary by 1000 mm
# will let it vary more. a large value -> 1 will force the model to be closer to 0
# This gets too large for lots of observations, s best to doit on the fly..
#
Neq = np.eye(numZD,dtype=float)# * 0.001
Apart = np.zeros((numd,numZD))
for i in range(0,numd):
iz = int(np.floor(azData[i,2]/zenSpacing))
Apart[i,iz] = (1.-(azData[i,2]-iz*zenSpacing)/zenSpacing)
Apart[i,iz+1] = (azData[i,2]-iz*zenSpacing)/zenSpacing
w = np.sin(data[i,2]/180.*np.pi)
for k in range(iz,iz+2):
for l in range(iz,iz+2):
Neq[k,l] = Neq[k,l] + (Apart[i,l]*Apart[i,k]) * 1./w**2
prechi = np.dot(azData[:,3].T,azData[:,3])
Bvec = np.dot(Apart.T,azData[:,3])
for val in Bvec:
Bvec_complete.append(val)
Cov = np.linalg.pinv(Neq)
Sol = np.dot(Cov,Bvec)
for val in Sol:
Sol_complete.append(val)
#Qxx = np.dot(Apart.T,Apart)
#Qvv = np.subtract( np.eye(numd) , np.dot(np.dot(Apart,Qxx),Apart.T))
#sd = np.squeeze(np.diag(Qvv))
#dx = np.dot(np.linalg.pinv(Qxx),Bvec)
#dl = np.dot(Apart,dx)
postchi = prechi - np.dot(Bvec.T,Sol)
postchis.append(np.sqrt(postchi/numd))
prechis.append(np.sqrt(prechi/numd))
pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)
# calculate the model values for each obs
model = np.dot(Apart,Sol) #np.zeros(numd)
for d in range(0,numd):
model_complete.append(model[d])
meas_complete.append(azData[d,3])
# zen = azData[d,2]
# iz = int(np.floor(azData[d,2]/zenSpacing))
# #model[d] = Sol[iz]
#print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),gls_results.rsquared,gls_results.aic,gls_results.bic)
# loglikelihood(meas,model,sd)
#sd = np.squeeze(np.diag(Qvv))
#print("meas, model, sd:",np.shape(azData),np.shape(model),np.shape(sd))
f = loglikelihood(azData[:,3],model)
dof = numd - np.shape(Sol)[0]
aic = calcAIC(f,dof)
bic = calcBIC(f,dof,numd)
aics.append(aic)
bics.append(bic)
#print("=========================")
pwl_All[j,:] = Sol
pwlSig_All[j,:] = pwlsig
del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind
#A_complete = np.squeeze(np.asarray(A_complete.todense()))
#print("A shape",np.shape(A_complete))
print("Doing a fit to the data")
f = loglikelihood(np.array(meas_complete),np.array(model_complete))
numd = np.size(meas_complete)
dof = numd - np.shape(Sol_complete)[0]
aic = calcAIC(f,dof)
bic = calcBIC(f,dof,numd)
#prechi = np.dot(data[:,3].T,data[:,3])
prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))
postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))
#print("My loglikelihood:",f,aic,bic,dof,numd)
print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)
return pwl_All, pwlSig_All
def pwl(site_residuals, azSpacing=0.5,zenSpacing=0.5):
"""
PWL piece-wise-linear interpolation fit of phase residuals
-construct a PWL fit for each azimuth bin, and then paste them all together to get
the full model
-inversion is doen within each bin
cdata -> compressed data
"""
tdata = res.reject_absVal(site_residuals,100.)
del site_residuals
data = res.reject_outliers_elevation(tdata,5,0.5)
del tdata
Bvec_complete = []
Sol_complete = []
model_complete = []
meas_complete = []
numd = np.shape(data)[0]
numZD = int(90.0/zenSpacing) + 1
numAZ = int(360./azSpacing)
print("numAZ",numAZ)
pwl_All = np.zeros((numAZ,numZD))
pwlSig_All = np.zeros((numAZ,numZD))
#pwl_All = np.zeros((numZD,numAZ))
#pwlSig_All = np.zeros((numZD,numAZ))
for j in range(0,numAZ):
# Find only those value within this azimuth bin:
if(j - azSpacing/2. < 0) :
criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )
else:
criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )
ind = np.array(np.where(criterion))[0]
azData =data[ind,:]
numd = np.shape(azData)[0]
if numd < 2:
continue
# Neq is acting like a constrain on the model a small value 0.001
# let the model vary by 1000 mm
# will let it vary more. a large value -> 1 will force the model to be closer to 0
Neq = np.eye(numZD,dtype=float) * 0.001
Apart = np.zeros((numd,numZD))
#aiz = j* int(np.floor(360./zenSpacing))
for i in range(0,numd):
iz = int(np.floor(azData[i,2]/zenSpacing)) #+ aiz
Apart[i,iz] = (1.-(azData[i,2]-float(iz)*zenSpacing)/zenSpacing)
Apart[i,iz+1] = (azData[i,2]-float(iz)*zenSpacing)/zenSpacing
#Apart_1 = (1.-(azData[i,2]-float(iz)*zenSpacing)/zenSpacing)
#Apart_2 = (azData[i,2]-float(iz)*zenSpacing)/zenSpacing
prechi = np.dot(azData[:,3].T,azData[:,3])
Neq = np.add(Neq, np.dot(Apart.T,Apart) )
Bvec = np.dot(Apart.T,azData[:,3])
for val in Bvec:
Bvec_complete.append(val)
Cov = np.linalg.pinv(Neq)
Sol = np.dot(Cov,Bvec)
for val in Sol:
Sol_complete.append(val)
postchi = prechi - np.dot(Bvec.T,Sol)
pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)
model = np.dot(Apart,Sol)
for d in range(0,numd):
meas_complete.append(azData[d,3])
model_complete.append(model[d])
pwl_All[j,:] = Sol
pwlSig_All[j,:] = pwlsig
#print("Sol:",Sol)
#print("PWL:",pwl_All[j,:])
#pwl_All[:,j] = Sol
#print("Sol:",np.shape(Sol),np.shape(pwl_All))
#pwlSig_All[:,j] = pwlsig
del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind
# Calculate the AIC and BIC values...
f = loglikelihood(np.array(meas_complete),np.array(model_complete))
numd = np.size(meas_complete)
dof = numd - np.shape(Sol_complete)[0]
aic = calcAIC(f,dof)
bic = calcBIC(f,dof,numd)
prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))
postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))
#print("My loglikelihood:",f,aic,bic,dof,numd)
#print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)
stats = {}
stats['prechi'] = np.sqrt(prechi/numd)
stats['postchi'] = np.sqrt(postchi/numd)
stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)
stats['aic'] = aic
stats['bic'] = bic
return pwl_All, pwlSig_All, stats
def pwlELE(site_residuals, azSpacing=0.5,zenSpacing=0.5,store=False,site="site"):
"""
PWL piece-wise-linear interpolation fit of phase residuals
cdata -> compressed data
"""
tdata = res.reject_absVal(site_residuals,100.)
del site_residuals
data = res.reject_outliers_elevation(tdata,5,0.5)
del tdata
numd = np.shape(data)[0]
numZD = int(90.0/zenSpacing) + 1
Neq = np.eye(numZD,dtype=float) * 0.01
#print("Neq",np.shape(Neq))
Apart = np.zeros((numd,numZD))
#print("Apart:",np.shape(Apart))
sd = np.zeros(numd)
for i in range(0,numd):
iz = np.floor(data[i,2]/zenSpacing)
sd[i] = np.sin(data[i,2]/180.*np.pi)
Apart[i,iz] = (1.-(data[i,2]-iz*zenSpacing)/zenSpacing)
Apart[i,iz+1] = (data[i,2]-iz*zenSpacing)/zenSpacing
prechi = np.dot(data[:,3].T,data[:,3])
#print("prechi:",prechi,numd,np.sqrt(prechi/numd))
Neq = np.add(Neq, np.dot(Apart.T,Apart) )
#print("Neq:",np.shape(Neq))
Bvec = np.dot(Apart.T,data[:,3])
#print("Bvec:",np.shape(Bvec))
Cov = np.linalg.pinv(Neq)
#print("Cov",np.shape(Cov))
Sol = np.dot(Cov,Bvec)
#print("Sol",np.shape(Sol))
postchi = prechi - np.dot(Bvec.T,Sol)
#print("postchi:",postchi)
pwl = Sol
#print("pwl:",np.shape(pwl))
pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)
#print("pwlsig",np.shape(pwlsig))
model = np.dot(Apart,Sol)
f = loglikelihood(data[:,3],model)
dof = numd - np.shape(Sol)[0]
aic = calcAIC(f,dof)
bic = calcBIC(f,dof,numd)
#print("My loglikelihood:",f,aic,bic,dof,numd)
#print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)
stats = {}
stats['prechi'] = np.sqrt(prechi/numd)
stats['postchi'] = np.sqrt(postchi/numd)
stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)
stats['aic'] = aic
stats['bic'] = bic
# Check to see if wee store the partials as a numpy array
if store:
np.savez(site+'_pwlELE.npz',neq=Neq,atwb=Bvec)
return pwl,pwlsig,stats
def meanAdjust(site_residuals, azSpacing=0.5,zenSpacing=0.5):
"""
PWL piece-wise-linear interpolation fit of phase residuals
-construct a PWL fit for each azimuth bin, and then paste them all together to get
the full model
-inversion is doen within each bin
cdata -> compressed data
"""
tdata = res.reject_absVal(site_residuals,100.)
del site_residuals
data = res.reject_outliers_elevation(tdata,5,0.5)
del tdata
numd = np.shape(data)[0]
numZD = int(90.0/zenSpacing) + 1
numAZ = int(360./zenSpacing)
pwl_All = np.zeros((numAZ,numZD))
pwlSig_All = np.zeros((numAZ,numZD))
postchis = []
prechis = []
model_complete = []
meas_complete = []
Bvec_complete = []
Sol_complete = []
for j in range(0,numAZ):
# Find only those value within this azimuth bin:
if(j - azSpacing/2. < 0) :
criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )
else:
criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )
ind = np.array(np.where(criterion))[0]
azData =data[ind,:]
numd = np.shape(azData)[0]
if numd < 2:
continue
Neq = np.eye(numZD,dtype=float) * 0.001
Apart = np.zeros((numd,numZD))
for i in range(0,numd):
iz = int(np.floor(azData[i,2]/zenSpacing))
Apart[i,iz] = 1.
prechi = np.dot(azData[:,3].T,azData[:,3])
Neq = np.add(Neq, np.dot(Apart.T,Apart) )
Bvec = np.dot(Apart.T,azData[:,3])
for val in Bvec:
Bvec_complete.append(val)
Cov = np.linalg.pinv(Neq)
Sol = np.dot(Cov,Bvec)
for val in Sol:
Sol_complete.append(val)
postchi = prechi - np.dot(Bvec.T,Sol)
pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)
prechis.append(np.sqrt(prechi/numd))
postchis.append(np.sqrt(postchi/numd))
#print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd))
model = np.dot(Apart,Sol)
for d in range(0,numd):
model_complete.append(model[d])
meas_complete.append(azData[d,3])
pwl_All[j,:] = Sol
pwlSig_All[j,:] = pwlsig
del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind
#overallPrechi = np.dot(data[:,3].T,data[:,3])
numd = np.size(meas_complete)
#print("OVERALL STATS:", np.mean(prechis),np.mean(postchis),np.sqrt(overallPrechi/numD))
#prechi = np.dot(data[:,3].T,data[:,3])
prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))
postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))
f = loglikelihood(meas_complete,model_complete)
dof = numd - np.shape(Sol_complete)[0]
aic = calcAIC(f,dof)
bic = calcBIC(f,dof,numd)
#print("My loglikelihood:",f,aic,bic,dof,numd)
#print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)
stats = {}
stats['prechi'] = np.sqrt(prechi/numd)
stats['postchi'] = np.sqrt(postchi/numd)
stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)
stats['aic'] = aic
stats['bic'] = bic
return pwl_All, pwlSig_All,stats
def meanAdjustELE(site_residuals, azSpacing=0.5,zenSpacing=0.5):
"""
PWL piece-wise-linear interpolation fit of phase residuals
cdata -> compressed data
"""
tdata = res.reject_absVal(site_residuals,100.)
del site_residuals
data = res.reject_outliers_elevation(tdata,5,0.5)
del tdata
numd = np.shape(data)[0]
numZD = int(90.0/zenSpacing) + 1
Neq = np.eye(numZD,dtype=float) * 0.01
Apart = np.zeros((numd,numZD))
sd = np.zeros(numd)
for i in range(0,numd):
iz = np.floor(data[i,2]/zenSpacing)
sd[i] = np.sin(data[i,2]/180.*np.pi)
Apart[i,iz] = 1.#-(data[i,2]-iz*zenSpacing)/zenSpacing)
prechi = np.dot(data[:,3].T,data[:,3])
Neq = np.add(Neq, np.dot(Apart.T,Apart) )
Bvec = np.dot(Apart.T,data[:,3])
Cov = np.linalg.pinv(Neq)
Sol = np.dot(Cov,Bvec)
postchi = prechi - np.dot(Bvec.T,Sol)
pwl = Sol
pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)
model = np.dot(Apart,Sol)
f = loglikelihood(data[:,3],model)
dof = numd - np.shape(Sol)[0]
aic = calcAIC(f,dof)
bic = calcBIC(f,dof,numd)
#print("My loglikelihood:",f,aic,bic,dof,numd)
#print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)
stats = {}
stats['prechi'] = np.sqrt(prechi/numd)
stats['postchi'] = np.sqrt(postchi/numd)
stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)
stats['aic'] = aic
stats['bic'] = bic
return pwl,pwlsig,stats
def nadirPlot(svnav,nadirData,i):
fig = plt.figure(figsize=(3.62, 2.76))
ax = fig.add_subplot(111)
for sv in nadirData:
blk = int(svnav.findBLK_SV(svdat,sv))
if blk == i:
ax.plot(nadir,nadirData[sv],'-',alpha=0.7,linewidth=1,label="SV "+str(sv))
ax.set_xlabel('Nadir Angle (degrees)',fontsize=8)
ax.set_ylabel('Residual (mm)',fontsize=8)
ax.set_xlim([0, 14])
ax.set_ylim([-5,5])
ax.legend(fontsize=8,ncol=3)
title = svnav.blockType(i)
ax.set_title(title,fontsize=8)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(8)
return fig
#==============================================================================
#
# TODO:
# test plots
# error bars on elevation plot?
#=====================================
if __name__ == "__main__":
import warnings
warnings.filterwarnings("ignore")
import argparse
parser = argparse.ArgumentParser(prog='esm',description='Create an Empirical Site Model from one-way GAMIT phase residuals',
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
Example:
To create a consolidated phase residual file:
> python ~/gg/com/esm.py --trav /short/dk5/repro2/2012/ --site YAR2 --network yyyy_dddnN --save_file YAR2.2012.CL3.gz
To create a model:
> python ~/gg/com/esm.py --model --site yar2 -f ./t/YAR2.2012.CL3
''')
#===================================================================
# Meta data information required to create an ESM
#===================================================================
parser.add_argument('-a', '--antex', dest='antex', default="~/gg/tables/antmod.dat",help="Location of ANTEX file (default = ~/gg/tables/antmod.dat)")
parser.add_argument('--sf','--station_file', dest='station_file', default="~/gg/tables/station.info",help="GAMIT station file with metadata (default= ~/gg/tables/station.info)")
parser.add_argument('--sv','--svnav', dest="svnavFile",default="~/gg/tables/svnav.dat", help="Location of GAMIT svnav.dat")
parser.add_argument('-s', '--site', dest='site', required=True, help="SITE 4 character id")
#===================================================================
# Inputting the phase residuals options:
#===================================================================
parser.add_argument('-f', dest='resfile', default='',help="Consolidated one-way LC phase residuals")
#===================================================================
# Consolidate DPH file options:
#===================================================================
parser.add_argument('--save_file',dest='save_file',default='./',
help="Location to save the consolidated phase files")
parser.add_argument('--traverse',dest='traverse',
help="Location to search for DPH files from")
# only support yyyy_dddn? and ddd
parser.add_argument('--network',dest='network',default='yyyy_dddnN',choices=['yyyy_dddnN','ddd'],
help="Format of gps subnetworks")
#===================================================================
# Modelling options
#===================================================================
parser.add_argument('--model', dest='model', choices=['blkm','pwl','blkmadj'],help="Create an ESM\n (blkm = block median, pwl = piece wise linear)")
parser.add_argument('-g', '--grid', dest='grid', default=5.,type=float,help="ANTEX grid spacing (default = 5 degrees)")
parser.add_argument('--esm_grid', dest='esm_grid', default=0.5, type=float,help="Grid spacing to use when creating an ESM (default = 0.5 degrees)")
# Interpolation/extrapolation options
# TODO: nearneighbour, polynomial, surface fit, etc..
parser.add_argument('-i','--interpolate',dest='interpolate',choices=['ele_mean'],
help="ele_mean use the elevation mean to fill any missing values in the model")
#===================================================================
# Plot options
#===================================================================
parser.add_argument('--polar',dest='polar', default=False, action='store_true', help="Produce a polar plot of the ESM phase residuals (not working in development")
parser.add_argument('--elevation',dest='elevation', default=False, action='store_true', help="Produce an elevation dependent plot of ESM phase residuals")
#===================================================================
# Start from a consolidated CPH file of the DPH residuals
#parser.add_argument('--dph',dest='dphFile')
parser.add_argument('-o','--outfile',help='filename for ESM model (default = antmod.ssss)')
#===================================================================
# Satellite options
#===================================================================
parser.add_argument('--nadir',dest='nadir',help="location of satellite nadir residuals SV_RESIDUALS.ND3")
parser.add_argument('--nm','--nadirModel',dest='nadirModel',default=False,action='store_true',
help="Create an ESM model for the satellites")
parser.add_argument('--nadirPlot',dest='nadirPlot',default=False,action='store_true',help="Plot nadir residuals")
parser.add_argument('--nadirCorrection',dest='nadirCorrection',default=False,action='store_true',help="Apply the satellite Nadir correction to the phase residuals")
parser.add_argument('--store',dest='store',default=False,action='store_true',
help='Store the partials Neq and AtWl as a numpy binary file')
#===================================================================
args = parser.parse_args()
#===================================================================
# expand any home directory paths (~) to the full path, otherwise python won't find the file
if args.resfile : args.resfile = os.path.expanduser(args.resfile)
args.antex = os.path.expanduser(args.antex)
args.station_file = os.path.expanduser(args.station_file)
args.svnavFile = os.path.expanduser(args.svnavFile)
svdat = []
nadirData = {}
#===================================================================
# Look through the GAMIT processing subdirectories for DPH files
# belonging to a particular site.
#===================================================================
if args.traverse :
traverse_directory(args)
if args.model:
args.resfile = args.save_file
if args.nadir:
nadir = np.genfromtxt(args.nadir)
sv_nums = np.unique(nadir[:,2])
nadirDataStd = {}
for sv in sv_nums:
criterion = nadir[:,2] == sv
ind = np.array(np.where(criterion))[0]
nadir_medians = nanmean(nadir[ind,3:73],axis=0)
nadir_stdev = nanstd(nadir[ind,3:73],axis=0)
nadirData[str(int(sv))] = nadir_medians
#nadirDataStd[str(int(sv))] = nadir_stdev
if args.nadirPlot:
nadir = np.linspace(0,13.8, int(14.0/0.2) )
svdat = svnav.parseSVNAV(args.svnavFile)
# prepare a plot for each satellite block
figBLK = []
axBLK = []
fig1 = nadirPlot(svnav,nadirData,1)
plt.tight_layout()
title = svnav.blockType(1)
plt.savefig(title+".eps")
fig2 = nadirPlot(svnav,nadirData,2)
plt.tight_layout()
title = svnav.blockType(2)
plt.savefig(title+".eps")
fig3 = nadirPlot(svnav,nadirData,3)
plt.tight_layout()
title = svnav.blockType(3)
plt.savefig(title+".eps")
fig4 = nadirPlot(svnav,nadirData,4)
plt.tight_layout()
title = svnav.blockType(4)
plt.savefig(title+".eps")
fig5 = nadirPlot(svnav,nadirData,5)
plt.tight_layout()
title = svnav.blockType(5)
plt.savefig(title+".eps")
fig6 = nadirPlot(svnav,nadirData,6)
plt.tight_layout()
title = svnav.blockType(6)
plt.savefig(title+".eps")
fig7 = nadirPlot(svnav,nadirData,7)
plt.tight_layout()
title = svnav.blockType(7)
plt.savefig(title+".eps")
# Do a plot of all the satellites now..
fig = plt.figure(figsize=(3.62, 2.76))
ax = fig.add_subplot(111)
for sv in nadirData:
ax.plot(nadir,nadirData[sv],'-',alpha=0.7,linewidth=1)
ax.set_xlabel('Nadir Angle (degrees)',fontsize=8)
ax.set_ylabel('Residual (mm)',fontsize=8)
ax.set_xlim([0, 14])
ax.set_ylim([-5,5])
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(8)
plt.tight_layout()
plt.savefig("NadirResiduals_All.eps")
# Plot the satellites by block
blocks = np.unique(nadir[:,])
plt.show()
if args.nadirModel:
# read in the antenna satellite model
antennas = ant.parseANTEX(args.antex)
with open('satmod.dat','w') as f:
ant.printAntexHeader(f)
for sv in nadirData:
svn = "{:03d}".format(int(sv))
scode = 'G' + str(svn)
antenna = ant.antennaScode(scode,antennas)
for a in antenna:
adjustedAnt = satelliteModel(a, nadirData[sv])
ant.printSatelliteModel(adjustedAnt,f)
if args.model or args.elevation or args.polar:
#===================================================================
# get the antenna information from an antex file
antennas = ant.parseANTEX(args.antex)
# read in the consolidated LC residuals
print("")
print("Reading in the consolidated phase residuals from:",args.resfile)
print("")
site_residuals = res.parseConsolidatedNumpy(args.resfile)
dt_start = gt.unix2dt(site_residuals[0,0])
res_start = int(dt_start.strftime("%Y") + dt_start.strftime("%j"))
dt_stop = gt.unix2dt(site_residuals[-1,0])
res_stop = int(dt_stop.strftime("%Y") + dt_stop.strftime("%j"))
print("\tResiduals run from:",res_start,"to:",res_stop)
if args.nadirCorrection:
svdat = svnav.parseSVNAV(args.svnavFile)
print("\n\t** Applying the Satellite dependent Nadir angle correction to the phase residuals")
print("svdat:",np.shape(svdat))
site_residuals = applyNadirCorrection(svdat,nadirData,site_residuals)
# work out how many models need to be created for the time period the residuals cover
# check the station file, and looks for change in antenna type or radome type
print("")
print("Working out how many models need to be generated for",args.site.upper(),"using metadata obtained from:",args.station_file)
print("")
print("\t A new model will be formed whenever there is a change in:")
print("\t\t1) Antenna type")
print("\t\t2) Antenna serial number")
print("\t\t3) Antenna height")
print("\t\t4) Change of radome")
print("")
sdata = gsf.parseSite(args.station_file,args.site.upper())
change = gsf.determineESMChanges(dt_start,dt_stop,sdata)
# find the indices where the change occurs due to an antenna type / radome change
ind = gsf.antennaChange(sdata)
models = np.zeros((np.size(change['ind'])+1,int(360./args.esm_grid)+1,int(90/args.esm_grid)+1,2))
num_models = np.size(change['ind'])+1
print("\nNumber of models which need to be formed:", num_models)
ctr = 0
for i in range(0,num_models):
print("\t\tCreating model",i+1,"of",num_models)
minVal_dt = gt.ydhms2dt(change['start_yyyy'][i],change['start_ddd'][i],0,0,0)
maxVal_dt = gt.ydhms2dt(change['stop_yyyy'][i],change['stop_ddd'][i],0,0,0)
criterion = ( ( site_residuals[:,0] >= calendar.timegm(minVal_dt.utctimetuple()) ) &
( site_residuals[:,0] < calendar.timegm(maxVal_dt.utctimetuple()) ) )
mind = np.array(np.where(criterion))
change['valid_from'].append(minVal_dt)
change['valid_to'].append(maxVal_dt)
# get the correct antenna type for this station at this time
antType = gsf.antennaType(sdata,minVal_dt.strftime("%Y"),minVal_dt.strftime("%j"))
print("get station info:",antType,minVal_dt.strftime("%Y"),minVal_dt.strftime("%j"))
# do a block median with 5 sigma outlier detection at 0.5 degree grid
if args.model == 'blkm':
data = np.zeros((np.size(mind),3))
data[:,0] = site_residuals[mind,1]
data[:,1] = site_residuals[mind,2]
data[:,2] = site_residuals[mind,3]
med, medStd = blockMedian(data)
print("BLKM:",np.shape(med))
elif args.model == 'pwl':
med,pwl_sig,stats = pwl(site_residuals,args.esm_grid,args.esm_grid)
print("PWL ","prechi:{:.2f} postchi:{:.2f} AIC:{:.1f}".format(stats['prechi'],stats['postchi'],stats['chi_inc'],stats['aic']))
#med,pwl_sig = pwlFly(site_residuals,args.esm_grid,args.esm_grid)
# Compute the elevation depenedent model
med_ele, pwl_sig,stats_ele = pwlELE(site_residuals,args.esm_grid,args.esm_grid,args.store,args.site)
print("PWL_ELE ","prechi:{:.2f} postchi:{:.2f} AIC:{:.1f}".format(stats_ele['prechi'],stats_ele['postchi'],stats_ele['chi_inc'],stats_ele['aic']))
elif args.model == 'blkmadj':
med, medStd,stats = meanAdjust(site_residuals,args.esm_grid,args.esm_grid)
print("PWL ","prechi:{:.2f} postchi:{:.2f} AIC:{:.1f}".format(stats['prechi'],stats['postchi'],stats['chi_inc'],stats['aic']))
# Compute the elevation depenedent model
med_ele, pwl_sig,stats = meanAdjustELE(site_residuals,args.esm_grid,args.esm_grid)
print("med:",np.shape(med),"med_ele:",np.shape(med_ele))#,med_ele[93,:])
print("PWL_ELE ","prechi:{:.2f} postchi:{:.2f} AIC:{:.1f}".format(stats['prechi'],stats['postchi'],stats['chi_inc'],stats['aic']))
# check to see if any interpolation needs to be applied
if args.interpolate == 'ele_mean':
med = interpolate_eleMean(med)
print("BLKM:",np.shape(med))
# Take the block median residuals and add them to the ANTEX file
esm = create_esm(med, 0.5, 0.5, antennas,antType)
models[ctr,:,:,:] = esm
ctr +=1
# Now sort out any plotting requests
#===========================================================
# Do an elevation only plot of the residuals
#===========================================================
if args.elevation:
fig = plt.figure(figsize=(3.62, 2.76))
fig.canvas.set_window_title(args.site+"_elevationDependentResiduals_"+str(1)+".png")
ax = fig.add_subplot(111)
ele = np.linspace(0,90, int(90./0.5)+1 )
ele_model = []
nzen = int(90./args.esm_grid) + 1
naz = int(360./args.esm_grid) + 1
for i in range(0,720):
#ax.scatter(90.-ele,med[i,:],s=1,alpha=0.5,c='k')
ax.scatter(ele,med[i,:],s=1,alpha=0.5,c='k')
elevation = []
for j in range(0,nzen):
elevation.append(90.- j * 0.5)
ele_model.append(nanmean(med[:,j]))
#if args.model == 'pwl':
ax.plot(elevation,med_ele[::-1],'r-',linewidth=2)
ax.set_xlabel('Elevation Angle (degrees)',fontsize=8)
ax.set_ylabel('Phase Residuals (mm)',fontsize=8)
ax.set_xlim([0, 90])
ax.set_ylim([-15,15])
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(8)
plt.tight_layout()
if args.polar:
az = np.linspace(0, 360.-0.5, int(360./0.5))
zz = np.linspace(0, 90, int(90./0.5)+1)
# Plot the ESM... (antenna PCV + residuals)
fig2 = plt.figure(figsize=(3.62, 2.76))
ax = fig2.add_subplot(111,polar=True)
ax.set_theta_offset(np.radians(90.))
ax.set_theta_direction(-1)
ax.set_ylim([0,1])
ax.set_rgrids((0.00001, np.radians(20)/np.pi*2,
np.radians(40)/np.pi*2,
np.radians(60)/np.pi*2,
np.radians(80)/np.pi*2),
labels=('0', '20', '40', '60', '80'),angle=180)
ma,mz = np.meshgrid(az,zz,indexing='ij')
ma = ma.reshape(ma.size,)
mz = mz.reshape(mz.size,)
print("MED:",np.shape(med),np.shape(ma),np.shape(mz))
print("ma:",ma[0:181])
print("mz:",mz[0:181])
print("ma:",ma[-181:])
print("mz:",mz[-181:])
#polar = ax.scatter(np.radians(ma),np.radians(mz)/np.pi*2., c=models[ctr-1,:,:,0], s=5, alpha=1., cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
#polar = ax.scatter(np.radians(ma), np.radians(mz)/np.pi*2., c=med[:,:], s=5, alpha=1.,cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
#polar = ax.scatter(np.radians(ma), np.radians(mz)/np.pi*2., c=med, s=15, cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
#polar = ax.scatter(np.radians(mz)/np.pi*2., np.radians(ma), c=med, s=15, cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
#polar.set_alpha(0.75)
#az = np.linspace(0, 360.-0.5, int(360./0.5))
#zz = np.linspace(0, 90, int(90./0.5)+1)
#ictr = 0
#for zz in np.linspace(0, 90, int(90./0.5)+1):
# jctr = 0
# for az in np.linspace(0, 360.-0.5, int(360./0.5)):
# ax.scatter(np.radians(az), np.radians(zz)/np.pi*2., c=med[ictr,jctr], s=5, cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
# ictr += 1
# polar = ax2.scatter(np.radians(ma), np.radians(mz)/np.pi*2., c=med, s=5, alpha=1., cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
#cbar = fig.colorbar(polar,shrink=0.75,pad=.10)
#cbar.ax.tick_params(labelsize=8)
#cbar.set_label(args.site+' ESM (mm)',size=8)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(8)
plt.tight_layout()
if args.polar or args.elevation:
plt.show()
#===========================================================================================
# If we want an elevation or polar plot....
#===========================================================================================
# if args.elevation or args.polar :
# import matplotlib.pyplot as plt
# from matplotlib import cm
# if args.polar :
# az = np.linspace(0, 360, int(360./0.5)+1)
# zz = np.linspace(0, 90, int(90./0.5)+1)
# # Plot the ESM... (antenna PCV + residuals)
# fig = plt.figure(figsize=(3.62, 2.76))
# ax = fig.add_subplot(111,polar=True)
# ax.set_theta_direction(-1)
# ax.set_theta_offset(np.radians(90.))
# ax.set_ylim([0,1])
# ax.set_rgrids((0.00001, np.radians(20)/np.pi*2,
# np.radians(40)/np.pi*2,
# np.radians(60)/np.pi*2,
# np.radians(80)/np.pi*2),
# labels=('0', '20', '40', '60', '80'),angle=180)
# ma,mz = np.meshgrid(az,zz,indexing='ij')
# ma = ma.reshape(ma.size,)
# mz = mz.reshape(mz.size,)
# polar = ax.scatter(np.radians(ma), np.radians(mz)/np.pi*2., c=models[0,:,:,0], s=5, alpha=1., cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
# cbar = fig.colorbar(polar,shrink=0.75,pad=.10)
# cbar.ax.tick_params(labelsize=8)
# cbar.set_label(args.site+' ESM (mm)',size=8)
# for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
# ax.get_xticklabels() + ax.get_yticklabels()):
# item.set_fontsize(8)
# plt.tight_layout()
# # Plot the blkm of the residuals
# fig2 = plt.figure(figsize=(3.62, 2.76))
# ax2 = fig2.add_subplot(111,polar=True)
# ax2.set_theta_direction(-1)
# ax2.set_theta_offset(np.radians(90.))
# ax2.set_ylim([0,1])
# ax2.set_rgrids((0.00001, np.radians(20)/np.pi*2,
# np.radians(40)/np.pi*2,
# np.radians(60)/np.pi*2,
# np.radians(80)/np.pi*2),
# labels=('0', '20', '40', '60', '80'),angle=180)
# ma,mz = np.meshgrid(az,zz,indexing='ij')
# ma = ma.reshape(ma.size,)
# mz = mz.reshape(mz.size,)
# polar = ax2.scatter(np.radians(ma), np.radians(mz)/np.pi*2., c=med, s=5, alpha=1., cmap=cm.RdBu,vmin=-15,vmax=15, lw=0)
# cbar = fig2.colorbar(polar,shrink=0.75,pad=.10)
# cbar.ax.tick_params(labelsize=8)
# cbar.set_label(args.site+' L3 Residuals (mm)',size=8)
# for item in ([ax2.title, ax2.xaxis.label, ax2.yaxis.label] +
# ax2.get_xticklabels() + ax2.get_yticklabels()):
# item.set_fontsize(8)
# plt.tight_layout()
# if args.elevation :
#===========================================================
# TODO: loop over changes in equipment...
#===========================================================
# Plot the residuals
# the antenna model
# then the ESM
#
#===========================================================
# Do an elevation only plot of the residuals
#===========================================================
# fig = plt.figure(figsize=(3.62, 2.76))
# ax = fig.add_subplot(111)
# ele = np.linspace(0,90, int(90./0.5)+1 )
# ele_model = []
# for i in range(0,720):
# ax.scatter(90.-ele,med[i,:],s=1,alpha=0.5,c='k')
# elevation = []
# for j in range(0,181):
# elevation.append(90.- j * 0.5)
# ele_model.append(nanmean(med[:,j]))
# ax.plot(elevation,ele_model[:],'r-',linewidth=2)
# ax.set_xlabel('Elevation Angle (degrees)',fontsize=8)
# ax.set_ylabel('ESM (mm)',fontsize=8)
# ax.set_xlim([0, 90])
# ax.set_ylim([-15,15])
# for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
# ax.get_xticklabels() + ax.get_yticklabels()):
# item.set_fontsize(8)
# plt.tight_layout()
#===========================================================
# Plot the antenna model before the residuals are added
# on L1
#===========================================================
#ax2 = fig.add_subplot(312)
#ele = np.linspace(0,90, int(90./5)+1 )
#minVal_dt = gt.ydhms2dt(change['start_yyyy'][0],change['start_ddd'][0],0,0,0)
#maxVal_dt = gt.ydhms2dt(change['stop_yyyy'][0],change['stop_ddd'][0],0,0,0)
#antType = gsf.antennaType(sdata,minVal_dt.strftime("%Y"),minVal_dt.strftime("%j"))
#antenna = ant.antennaType(antType,antennas)
#L1_data = np.array(antenna['data'][0])
#L1_mean = np.mean(L1_data,axis=0)
#f = interpolate.interp1d(ele,L1_mean)
#L1_int = []
#ele_i = []
#for j in range(0,181):
# ele_i.append(j*0.5)
# L1_int.append(f(j*0.5))
#L1_int = np.array(L1_int)
#ax2.plot(ele,L1_mean[::-1],'b-',alpha=0.5,linewidth=2)
#ax2.plot(ele_i,L1_int[::-1],'k--')
#ax2.set_xlabel('Elevation Angle (degrees)',fontsize=8)
#ax2.set_ylabel('L1 PCV (mm)',fontsize=8)
#ax2.set_xlim([0, 90])
#for item in ([ax2.title, ax2.xaxis.label, ax2.yaxis.label] +
# ax2.get_xticklabels() + ax2.get_yticklabels()):
# item.set_fontsize(8)
#plt.tight_layout()
#===========================================================
# Do an elevation only plot of the ESM
#===========================================================
#fig = plt.figure(figsize=(3.62, 2.76))
#ax3 = fig.add_subplot(313)
#ele = np.linspace(0,90, int(90./0.5)+1 )
#ele_esm = []
#esm = create_esm(med, 0.5, 0.5, antennas,antType)
#for j in range(0,181):
# ele_esm.append(np.mean(esm[:,j,0]))
# plot the ele only esm model
#ax3.plot(ele, ele_esm[::-1], 'g-',alpha=0.5,linewidth=2)
# plot the interpolated ant ele PCV
#ax3.plot(ele_i,L1_int[::-1],'b--',alpha=0.5,linewidth=2)
# plot the esm - antenna model => should get the residuals
#ax3.plot(ele, ele_esm - L1_int, 'b--',alpha=0.5,linewidth=2)
# plot the ele only residuals
#ax3.plot(ele, ele_model[::-1] , 'r-',alpha=0.3,linewidth=2)
#ax3.legend(['esm','pcv','esm-pcv','residuals'],loc='best')
# Try a crude attempt at an esm
#fix = L1_int[::-1] + ele_model[::-1]
#ax3.plot(ele, fix,'k--',alpha=0.5,linewidth=2)
#ax3.plot(ele, fix - L1_int[::-1],'r--')
#ax3.set_xlabel('Elevation Angle (degrees)',fontsize=8)
#ax3.set_ylabel('ESM (mm)',fontsize=8)
#ax3.set_xlim([0, 90])
#for item in ([ax3.title, ax3.xaxis.label, ax3.yaxis.label] +
# ax3.get_xticklabels() + ax3.get_yticklabels()):
# item.set_fontsize(8)
#plt.tight_layout()
#plt.show()
#===================================================
# print the esm model residuals + antenna to an ANTEX file
#===================================================
if args.model:
if not args.outfile:
args.outfile = "antmod."+args.site.lower()
print("")
print("Adding the ESM to the antenna PCV model to be saved to:",args.outfile)
print("")
with open(args.outfile,'w') as f:
print_antex_file_header(f)
for m in range(0,num_models):
antType = gsf.antennaType( sdata, change['start_yyyy'][m], change['start_ddd'][m] )
antenna = ant.antennaType(antType,antennas)
print("Model",m+1," is being added to the antenna PCV for:",antType)
print_antex_header(antType, change['valid_from'][m],change['valid_to'][m],f)
freq_ctr = 0
for freq in ['G01','G02'] :
pco = antenna['PCO_'+freq]
print_start_frequency(freq,pco,f)
noazi = np.mean(models[m,:,:,freq_ctr],axis=0)
print_antex_noazi(noazi,f)
for i in range(0,int(360./args.esm_grid)+1):
print_antex_line(float(i*args.esm_grid),models[m,i,:,freq_ctr],f)
print_end_frequency(freq,f)
freq_ctr +=1
print_end_antenna(f)
f.close()
#print("FINISHED")
|
mit
|
cdegroc/scikit-learn
|
sklearn/__init__.py
|
1
|
1863
|
"""
Machine Learning module in python
=================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.sourceforge.net for complete documentation.
"""
from . import check_build
from .base import clone
try:
from numpy.testing import nosetester
class NoseTester(nosetester.NoseTester):
""" Subclass numpy's NoseTester to add doctests by default
"""
def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
>>> from sklearn import test
>>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP
"""
return super(NoseTester, self).test(label=label, verbose=verbose,
extra_argv=extra_argv,
doctests=doctests, coverage=coverage)
test = NoseTester().test
del nosetester
except:
pass
__all__ = ['check_build', 'cross_validation', 'cluster', 'covariance',
'datasets', 'decomposition', 'feature_extraction',
'feature_selection', 'semi_supervised',
'gaussian_process', 'grid_search', 'hmm', 'lda', 'linear_model',
'metrics', 'mixture', 'naive_bayes', 'neighbors', 'pipeline',
'preprocessing', 'qda', 'svm', 'test', 'clone', 'pls']
__version__ = '0.11-git'
|
bsd-3-clause
|
lthurlow/Boolean-Constrained-Routing
|
working_dir/runs/.per_file.py
|
1
|
1163
|
import numpy as np
import matplotlib.pyplot as plt
import os, sys
import pdb
files_to_read = []
for l in os.listdir('.'):
if l.split('.')[-1] == 'txt':
files_to_read.append(l)
for k in files_to_read:
f = open(k, 'r')
counter = 0
temp_d = {}
temp_e = {}
for line in f:
if '[' in line:
counter = 0
continue
lp = line.split(',')
if counter not in temp_d:
temp_d[counter] = [float(lp[0].strip())]
temp_e[counter] = [float(lp[1].strip())]
else:
temp_d[counter].append(float(lp[0].strip()))
temp_e[counter].append(float(lp[1].strip()))
counter += 1
# example data
#x = np.arange(0.1, 4, 0.5)
y1 = temp_d[1]
x = [10,20,30,40,50]
y2 = temp_d[2]
# First illustrate basic pyplot interface, using defaults where possible.
plt.subplot(2, 1, 1)
plt.plot(x, y1, '.-')
plt.errorbar(x,y1,yerr=temp_e[1])
plt.title('Time Comparision for Shortest Path')
plt.ylabel('Boolean Time (s)')
plt.subplot(2, 1, 2)
plt.plot(x, y2, '.-')
plt.errorbar(x,y2, yerr=temp_e[2])
plt.xlabel('Nodes')
plt.ylabel('Networkx Time (s)')
plt.savefig(str(k).split('.')[0]+'.png')
plt.clf()
|
mit
|
arokem/scipy
|
scipy/optimize/minpack.py
|
1
|
34231
|
from __future__ import division, print_function, absolute_import
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, prod, greater,
asarray, inf,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
from scipy._lib._util import _asarray_validated, _lazywhere
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
# from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument,
and returns a value of the same length.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable ``f(x, *args)``, optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the ``method=='hybr'`` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
sol['message'] = errors['unknown']
return sol
LEASTSQ_SUCCESS = [1, 2, 3, 4]
LEASTSQ_FAILURE = [5, 6, 7, 8]
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
Should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided,
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
estimate of the Hessian. A value of None indicates a singular matrix,
which means the curvature in parameters `x` is numerically flat. To
obtain the covariance matrix of the parameters `x`, `cov_x` must be
multiplied by the variance of the residuals -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the keys:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
See Also
--------
least_squares : Newer interface to solve nonlinear least-squares problems
with bounds on the variables. See ``method=='lm'`` in particular.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
or whether `x0` is a scalar.
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev,
factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible." % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError]}
# The FORTRAN return value (possible return values are >= 0 and <= 8)
info = retval[-1]
if full_output:
cov_x = None
if info in LEASTSQ_SUCCESS:
from numpy.dual import inv
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
else:
if info in LEASTSQ_FAILURE:
warnings.warn(errors[info][0], RuntimeWarning)
elif info == 0:
raise errors[info][1](errors[info][0])
return retval[0], info
def _wrap_func(func, xdata, ydata, transform):
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``.
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : array_like or object
The independent variable where the data is measured.
Should usually be an M-length sequence or an (k,M)-shaped array for
functions with k predictors, but can actually be any object.
ydata : array_like
The dependent data, a length M array - nominally ``f(xdata, ...)``.
p0 : array_like, optional
Initial guess for the parameters (length N). If None, then the
initial values will all be 1 (if the number of parameters for the
function can be determined using introspection, otherwise a
ValueError is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-D `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-D `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-D `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False, only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters). Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared
residuals of ``f(xdata, *popt) - ydata`` is minimized.
pcov : 2-D array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
Define the data to be fit with some noise:
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> np.random.seed(1729)
>>> y_noise = 0.2 * np.random.normal(size=xdata.size)
>>> ydata = y + y_noise
>>> plt.plot(xdata, ydata, 'b-', label='data')
Fit for the parameters a, b, c of the function `func`:
>>> popt, pcov = curve_fit(func, xdata, ydata)
>>> popt
array([ 2.55423706, 1.35190947, 0.47450618])
>>> plt.plot(xdata, func(xdata, *popt), 'r-',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
Constrain the optimization to the region of ``0 <= a <= 3``,
``0 <= b <= 1`` and ``0 <= c <= 0.5``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
>>> popt
array([ 2.43708906, 1. , 0.35015434])
>>> plt.plot(xdata, func(xdata, *popt), 'g--',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend()
>>> plt.show()
"""
if p0 is None:
# determine number of parameters by inspecting the function
sig = _getfullargspec(f)
args = sig.args
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# optimization may produce garbage for float32 inputs, cast them to float64
# NaNs cannot be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata, float)
else:
ydata = np.asarray(ydata, float)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata, float)
else:
xdata = np.asarray(xdata, float)
if ydata.size == 0:
raise ValueError("`ydata` must not be empty!")
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-D, sigma are errors, define transform = 1/sigma
if sigma.shape == (ydata.size, ):
transform = 1.0 / sigma
# if 2-D, sigma is the covariance matrix,
# define transform = L such that L L^T = C
elif sigma.shape == (ydata.size, ydata.size):
try:
# scipy.linalg.cholesky requires lower=True to return L L^T = A
transform = cholesky(sigma, lower=True)
except LinAlgError:
raise ValueError("`sigma` must be positive definite.")
else:
raise ValueError("`sigma` has incorrect shape.")
else:
transform = None
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
elif jac is None and method != 'lm':
jac = '2-point'
if 'args' in kwargs:
# The specification for the model function `f` does not support
# additional arguments. Refer to the `curve_fit` docstring for
# acceptable call signatures of `f`.
raise ValueError("'args' is not a supported keyword argument.")
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
ysize = len(infodict['fvec'])
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
ysize = len(res.fun)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ysize > p0.size:
s_sq = cost / (ysize - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (prod(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed point of the function: i.e., where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2",
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
|
bsd-3-clause
|
SylvainCorlay/bqplot
|
setup.py
|
2
|
4343
|
# Copyright 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from setuptools import setup, find_packages
from jupyter_packaging import (
create_cmdclass,
install_npm,
ensure_targets,
combine_commands,
get_version,
skip_if_exists,
)
import os
from os.path import join as pjoin
from distutils import log
here = os.path.dirname(os.path.abspath(__file__))
# due to https://github.com/jupyterlab/jupyterlab/blob/136d2ec216ebfc429a696e6ee75fee5f8ead73e2/jupyterlab/federated_labextensions.py#L347
# we should not print out anything, otherwise setup.py --name gives noise
# log.set_verbosity(log.ERROR)
# log.info('setup.py entered')
# log.info('$PATH=%s' % os.environ['PATH'])
name = 'bqplot'
LONG_DESCRIPTION = """
BQPlot
======
Plotting system for the Jupyter notebook based on the interactive Jupyter widgets.
Installation
============
.. code-block:: bash
pip install bqplot
jupyter nbextension enable --py bqplot
Usage
=====
.. code-block:: python
from bqplot import pyplot as plt
import numpy as np
plt.figure(1)
n = 200
x = np.linspace(0.0, 10.0, n)
y = np.cumsum(np.random.randn(n))
plt.plot(x,y, axes_options={'y': {'grid_lines': 'dashed'}})
plt.show()
"""
# Get bqplot version
version = get_version(pjoin(name, '_version.py'))
js_dir = pjoin(here, 'js')
# Representative files that should exist after a successful build
jstargets = [
pjoin('share', 'jupyter', 'nbextensions', 'bqplot', 'index.js'),
pjoin('share', 'jupyter', 'labextensions', 'bqplot', 'package.json'),
]
data_files_spec = [
('share/jupyter/nbextensions/bqplot', 'share/jupyter/nbextensions/bqplot', '*.js'),
('share/jupyter/labextensions/bqplot/', 'share/jupyter/labextensions/bqplot/', '**'),
('etc/jupyter/nbconfig/notebook.d', 'etc/jupyter/nbconfig/notebook.d', 'bqplot.json'),
]
js_command = combine_commands(
install_npm(js_dir, build_dir='share/jupyter/', source_dir='js/src', build_cmd='build'), ensure_targets(jstargets),
)
# Adding "map_data" as package_data manually, this should not be needed because it's already
# specified in MANIFEST and include_package_data=True. This might be a bug in jupyter-packaging?
cmdclass = create_cmdclass('jsdeps', data_files_spec=data_files_spec, package_data_spec={"bqplot": ["map_data/*.json"]})
is_repo = os.path.exists(os.path.join(here, '.git'))
if is_repo:
cmdclass['jsdeps'] = js_command
else:
cmdclass['jsdeps'] = skip_if_exists(jstargets, js_command)
setup_args = dict(
name=name,
version=version,
description='Interactive plotting for the Jupyter notebook, using d3.js and ipywidgets.',
long_description=LONG_DESCRIPTION,
license='Apache',
author='The BQplot Development Team',
url='https://github.com/bloomberg/bqplot',
include_package_data=True,
cmdclass=cmdclass,
install_requires=[
'ipywidgets>=7.5.0',
'traitlets>=4.3.0',
'traittypes>=0.0.6',
'numpy>=1.10.4',
'pandas'],
packages=find_packages(exclude=["tests"]),
zip_safe=False,
keywords=[
'ipython',
'jupyter',
'widgets',
'graphics',
'plotting',
'd3',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Graphics',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
setup(**setup_args)
|
apache-2.0
|
arokem/pyAFQ
|
examples/plot_afq_api.py
|
2
|
6320
|
"""
==========================
AFQ API
==========================
An example using the AFQ API
"""
import os.path as op
import matplotlib.pyplot as plt
import nibabel as nib
import plotly
from AFQ import api
import AFQ.data as afd
##########################################################################
# Get some example data
# ---------------------
#
# Retrieves High angular resolution diffusion imaging (HARDI) dataset from
# Stanford's Vista Lab
#
# see https://purl.stanford.edu/ng782rw8378 for details on dataset.
#
# The data for the first subject and first session are downloaded locally
# (by default into the users home directory) under:
#
# ``.dipy/stanford_hardi/``
#
# Anatomical data (``anat``) and Diffusion-weighted imaging data (``dwi``) are
# then extracted, formatted to be BIDS compliant, and placed in the AFQ
# data directory (by default in the users home directory) under:
#
# ``AFQ_data/stanford_hardi/``
#
# This data represents the required preprocessed diffusion data necessary for
# intializing the AFQ object (which we will do next)
#
# The clear_previous_afq is used to remove any previous runs of the afq object
# stored in the AFQ_data/stanford_hardi/ BIDS directory. Set it to false if
# you want to use the results of previous runs.
afd.organize_stanford_data(clear_previous_afq=True)
##########################################################################
# Initialize an AFQ object:
# -------------------------
#
# Creates an AFQ object, that encapsulates tractometry. This object can be
# used to manage the entire AFQ pipeline, including:
#
# - Tractography
# - Registration
# - Segmentation
# - Cleaning
# - Profiling
# - Visualization
#
# In this example we will load the subjects session data from the previous step
# using the default AFQ parameters.
#
# .. note::
#
# The first time intializing the AFQ object will download necessary
# waypoint regions of interest (ROIs) templates into AFQ data directory:
#
# - Human corpus callosum templates: ``AFQ_data/callosum_templates/``
#
# see https://digital.lib.washington.edu/researchworks/handle/1773/34926
#
# - Tract probability maps: ``AFQ_data/templates/``
#
# see https://figshare.com/articles/Tract_probability_maps_for_automated_fiber_quantification/6270434
#
# These waypoints ROIs will used to identify the desired white matter tracts.
#
# This will also create an output folder for the corresponding AFQ derivatives
# in the AFQ data directory: ``AFQ_data/stanford_hardi/derivatives/afq/``
#
# To initialize this object we will pass in the path location to our BIDS
# compliant data.
#
# .. note::
#
# As noted above, the Stanford HARDI data contains anatomical and
# diffusion weighted imaging (dwi) data. In this example, we are interested
# in the vistasoft dwi. For our dataset the `dmriprep` is optional, but
# we have included it to make the initialization more explicit.
#
# .. note::
#
# We will also be using plotly to generate an interactive visualization.
# So we will specify plotly_no_gif as the visualization backend.
myafq = api.AFQ(bids_path=op.join(afd.afq_home,
'stanford_hardi'),
dmriprep='vistasoft',
viz_backend='plotly_no_gif')
##########################################################################
# Reading in DTI FA (Diffusion Tensor Imaging Fractional Anisotropy)
# ------------------------------------------------------------------
# The AFQ object holds a table with file names to various data derivatives.
#
# For example, the file where the FA computed from DTI is stored can be
# retrieved by inspecting the ``dti_fa`` property. The measures are stored
# in a series, and since we only have one subject and one session we will
# access the first (and only) file name from the example data.
#
# .. note::
#
# The AFQ API computes quantities lazily. This means that DTI parameters
# are not computed until they are required. This means that the first
# line below is the one that requires time.
#
# We will then use `nibabel` to load the deriviative file and retrieve the
# data array.
FA_fname = myafq.dti_fa["01"]
FA_img = nib.load(FA_fname)
FA = FA_img.get_fdata()
##########################################################################
# Visualize the result with Matplotlib
# -------------------------------------
# At this point `FA` is an array, and we can use standard Python tools to
# visualize it or perform additional computations with it.
#
# In this case we are going to take an axial slice halfway through the
# FA data array and plot using a sequential color map.
#
# .. note::
#
# The data array is structured as a xyz coordinate system.
fig, ax = plt.subplots(1)
ax.matshow(FA[:, :, FA.shape[-1] // 2], cmap='viridis')
ax.axis("off")
##########################################################################
# Visualizing bundles and tract profiles:
# ---------------------------------------
# The pyAFQ API provides several ways to visualize bundles and profiles.
#
# First, we will run a function that exports an html file that contains
# an interactive visualization of the bundles that are segmented.
#
# .. note::
# By default we resample a 100 points within a bundle, however to reduce
# processing time we will only resample 50 points.
#
# Once it is done running, it should pop a browser window open and let you
# interact with the bundles.
#
# .. note::
# Running the code below triggers the full pipeline of operations
# leading to the computation of the tract profiles. Therefore, it
# takes a little while to run (about 40 minutes, typically).
#
# .. note::
# You can hide or show a bundle by clicking the legend, or select a
# single bundle by double clicking the legend. The interactive
# visualization will also all you to pan, zoom, and rotate.
bundle_html = myafq.all_bundles_figure
plotly.io.show(bundle_html["01"])
##########################################################################
# We can also visualize the tract profiles in all of the bundles. These
# plots show both FA (left) and MD (right) layed out anatomically.
#
fig_files = myafq.tract_profile_plots["01"]
##########################################################################
# .. figure:: {{ fig_files[0] }}
#
|
bsd-2-clause
|
tspr/pycgats
|
stats_play.py
|
1
|
4714
|
#!/usr/bin/python
# coding=UTF-8
### create 3D-Plot of color patches.
### Assumes: List of Patches for measured values of Inking curve.
###
### To do the plot correctly:
### 0. set up tval: use one of CMYK_C...CMYK_K
### 1. get list of Points
### 2. convert Lab colors to sRGB
### 3. Plot small- sized Points
### 4. Plot Uncertainty-sized spheres (Rings of dE)
### 5. Generate Spline Fit (enumerate SampleID as time variable in spline fit)
### 6. Plot Spline
#### SETUP HERE
cdwpath= '/Users/tspr/Desktop/LabPlot'
h5filename = "./Farben.h5"
tvals_in = "CMYK_M"
import locale
locale.setlocale(locale.LC_ALL, 'de_DE')
from tables import *
from colormath.color_objects import LabColor
from mpl_toolkits.mplot3d import *
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
from scipy import interpolate
from scipy.stats import lognorm
# import seaborn as sns
# sns.set(style="whitegrid")
def abL_2_sRGB(a_val,b_val,L_val):
intermediatecolor = LabColor(lab_l=L_val,lab_a=a_val,lab_b=b_val).convert_to('rgb')
normtuple = (float(intermediatecolor.rgb_r)/256,float(intermediatecolor.rgb_g)/256,float(intermediatecolor.rgb_b)/256)
return normtuple
class CGATScolors(IsDescription):
SAMPLE_ID = StringCol(32)
SAMPLE_NAME = StringCol(32)
CMYK_C = FloatCol()
CMYK_M = FloatCol()
CMYK_Y = FloatCol()
CMYK_K = FloatCol()
LAB_L = FloatCol()
LAB_A = FloatCol()
LAB_B = FloatCol()
h5file = openFile(h5filename, mode = "r")
table=h5file.getNode('/Tabellen/Patches')
# Convert Lab to rgb for plotting and append to data points
# make sure, your installation of matplotlib is patched according to:
# https://github.com/login?return_to=%2Fmatplotlib%2Fmatplotlib%2Fissues%2F1692
colours=[]
tvals=[]
avals=[]
bvals=[]
Lvals=[]
for row in table[:]:
colours.append(abL_2_sRGB(a_val=row['LAB_A'],b_val=row['LAB_B'],L_val=row['LAB_L']))
tvals.append(row[tvals_in]/100)
avals.append(row['LAB_A'])
bvals.append(row['LAB_B'])
Lvals.append(row['LAB_L'])
h5file.close()
# Create Interpolation functions for a,b,L
a_pchip=interpolate.pchip(tvals,avals)
b_pchip=interpolate.pchip(tvals,bvals)
L_pchip=interpolate.pchip(tvals,Lvals)
pch_Inter=[a_pchip,b_pchip,L_pchip]
# Start plotting
# Disable depth shading
plt.ion()
art3d.zalpha = lambda *args:args[0]
# Setup canvas
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel('a')
ax.set_ylabel('b')
ax.set_zlabel('L')
ax.set_xlim(-100,100)
ax.set_ylim(-100,100)
ax.set_zlim(0,100)
# plot the grey axis
gx=np.linspace(0,0,101)
gy=np.linspace(0,0,101)
gz=np.linspace(0,100,101)
gaxis=ax.plot(gx,gy,gz,c=(0,0,0))
# plot the points
#pointsplot=ax.scatter3D(table.cols.LAB_A[:],table.cols.LAB_B[:],table.cols.LAB_L[:],s=20,c=colours,linewidth=0)#
pointsplot=ax.scatter3D(avals,bvals,Lvals,s=20,c=colours,linewidth=0)
# maybe plot some cross marks as well
#q=ax.scatter3D(table.cols.LAB_A[:],table.cols.LAB_B[:],table.cols.LAB_L[:], s=100,c=(0,0,0), norm=None, marker='+', linewidth=1,alpha=0.5)
# plot the Interpolated Path
linerange = np.arange(np.amin(tvals),np.amax(tvals),0.01)
interline=ax.plot(a_pchip(linerange),b_pchip(linerange),L_pchip(linerange),c=(0,0,0),linewidth=1)
cmpos=0.80
crossmark=ax.scatter3D(a_pchip(cmpos),b_pchip(cmpos),L_pchip(cmpos),marker='+',c=abL_2_sRGB(a_pchip(cmpos),b_pchip(cmpos),L_pchip(cmpos)),s=200, linewidth=1)
# show the thing
fig.show()
# create 100 nomrally distributed points along the line around cmpos.
# range should be 95% within +-5% of range of t-values (2 sigma)
# scale= sigma. so sigma = 5 (percent) * range / 2(sides)*2(sigma)*100(percent)
scale=(5*(linerange.max()-linerange.min())/400)
zufallspositionen = np.random.normal(loc=cmpos,scale=scale,size=1000)
rnd_a = a_pchip(zufallspositionen)
rnd_b = b_pchip(zufallspositionen)
rnd_L = L_pchip(zufallspositionen)
# add some noise (size: Lab units)
noise_size = 0.1
rnd_a += np.random.normal(0,noise_size,1000)
rnd_b += np.random.normal(0,noise_size,1000)
rnd_L += np.random.normal(0,noise_size,1000)
cntr_a = a_pchip(cmpos)
cntr_b = b_pchip(cmpos)
cntr_L = L_pchip(cmpos)
delta_a = cntr_a - rnd_a
delta_b = cntr_b - rnd_b
delta_L = cntr_L - rnd_L
delta_E = sqrt(square(delta_a) +square(delta_b) + square(delta_L))
# plot them green
rndplt=ax.scatter3D(rnd_a,rnd_b,rnd_L,marker='*',c='green',s=50,linewidth=1)
# histogramm delta E
plt.figure()
n, bins, patches = plt.hist(delta_E,bins=50,color='blue',normed=True,histtype='bar')
lnrm_shape, lnrm_loc, lnrm_scale = lognorm.fit(delta_E)
x= np.linspace(0, delta_E.max(), num=400)
y = lognorm.pdf(x,lnrm_shape,loc=lnrm_loc,scale=lnrm_scale)
pdflne=plt.plot(x,y,'r--',linewidth=2)
|
mit
|
bgroveben/python3_machine_learning_projects
|
oreilly_GANs_for_beginners/introduction_to_ml_with_python/mglearn/mglearn/plot_scaling.py
|
4
|
1505
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import (StandardScaler, MinMaxScaler, Normalizer,
RobustScaler)
from .plot_helpers import cm2
def plot_scaling():
X, y = make_blobs(n_samples=50, centers=2, random_state=4, cluster_std=1)
X += 3
plt.figure(figsize=(15, 8))
main_ax = plt.subplot2grid((2, 4), (0, 0), rowspan=2, colspan=2)
main_ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm2, s=60)
maxx = np.abs(X[:, 0]).max()
maxy = np.abs(X[:, 1]).max()
main_ax.set_xlim(-maxx + 1, maxx + 1)
main_ax.set_ylim(-maxy + 1, maxy + 1)
main_ax.set_title("Original Data")
other_axes = [plt.subplot2grid((2, 4), (i, j))
for j in range(2, 4) for i in range(2)]
for ax, scaler in zip(other_axes, [StandardScaler(), RobustScaler(),
MinMaxScaler(), Normalizer(norm='l2')]):
X_ = scaler.fit_transform(X)
ax.scatter(X_[:, 0], X_[:, 1], c=y, cmap=cm2, s=60)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_title(type(scaler).__name__)
other_axes.append(main_ax)
for ax in other_axes:
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
|
mit
|
jubatus/jubakit
|
example/classifier_parameter.py
|
2
|
2430
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Finding Best Hyper Parameter for Classifier
=======================================================
In this example, we try to find the best hyper parameter of Classifier
(`method` and `regularization_weight`) by calculating precision for
possible hyper parameter values.
Datasets are randomly generated by using scikit-learn data generator.
"""
import sklearn.datasets
import sklearn.metrics
from jubakit.classifier import Classifier, Dataset, Config
# Generate a dummy dataset using scikit-learn.
(X, y) = sklearn.datasets.make_classification(
n_samples=512,
n_features=20,
n_informative=2,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=2,
weights=None,
flip_y=0.01,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=0, # fixed seed
)
# Convert arrays into jubakit Dataset.
dataset = Dataset.from_array(X, y)
# Try finding the best classifier parameter.
param2metrics = {}
for method in ['AROW', 'NHERD', 'CW']:
for rw in [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0]:
print('Running ({0} / regularization_weight = {1})...'.format(method, rw))
# Create a config data structure.
jubatus_config = Config(method=method, parameter={'regularization_weight': rw})
# It is equivalent to:
#jubatus_config = Config.default()
#jubatus_config['method'] = method
#jubatus_config['parameter']['regularization_weight'] = rw
# Launch Jubatus server using the specified configuration.
classifier = Classifier.run(jubatus_config)
# Train with the dataset.
for _ in classifier.train(dataset):
pass
# Classify with the same dataset.
y_true = []
y_pred = []
for (idx, label, result) in classifier.classify(dataset):
y_true.append(label)
y_pred.append(result[0][0])
classifier.stop()
# Store the metrics for current configuration.
param2metrics['{0} ({1})'.format(method, rw)] = sklearn.metrics.accuracy_score(y_true, y_pred)
# Show results for each hyper parameter.
best_C = sorted(param2metrics.keys(), key=lambda x: param2metrics[x], reverse=True)[0]
print('--------------------')
print('Configuration\tAccuracy')
for C in sorted(param2metrics.keys()):
print('{0}\t{1}\t{2}'.format(C, param2metrics[C], '*' if C == best_C else ''))
|
mit
|
harrisonpim/bookworm
|
bookworm/analyse.py
|
1
|
5688
|
import networkx as nx
import pandas as pd
import numpy as np
import networkx as nx
from nltk.tokenize import word_tokenize
from .build_network import *
def character_density(book_path):
'''
number of central characters divided by the total number of words in a novel
Parameters
----------
book_path : string (required)
path to txt file containing full text of book to be analysed
Returns
-------
density : float
number of characters in book / number of words in book
'''
book = load_book(book_path)
book_length = len(word_tokenize(book))
book_graph = nx.from_pandas_dataframe(bookworm(book_path),
source='source',
target='target')
n_characters = len(book_graph.nodes())
return n_characters / book_length
def split_book(book, n_sections=10, cumulative=True):
'''
Split a book into n equal parts, with optional cumulative aggregation
Parameters
----------
book : string (required)
the book to be split
n_sections : (optional)
the number of sections which we want to split our book into
cumulative : bool (optional)
If true, the returned sections will be cumulative, ie all
will start at the book's beginning and end at evenly distributed
points throughout the book
Returns
-------
split_book : list
the given book split into the specified number of even (or, if
cumulative is set to True, uneven) sections
'''
book_sequences = get_sentence_sequences(book)
split_book = np.array_split(np.array(book_sequences), n_sections)
if cumulative is True:
split_book = [np.concatenate(split_book[:pos + 1])
for pos, section in enumerate(split_book)]
return split_book
def chronological_network(book_path, n_sections=10, cumulative=True):
'''
Split a book into n equal parts, with optional cumulative aggregation, and
return a dictionary of assembled character graphs
Parameters
----------
book_path : string (required)
path to the .txt file containing the book to be split
n_sections : (optional)
the number of sections which we want to split our book into
cumulative : bool (optional)
If true, the returned sections will be cumulative, ie all will start at
the book's beginning and end at evenly distributed points throughout
the book
Returns
-------
graph_dict : dict
a dictionary containing the graphs of each split book section
keys = section index
values = nx.Graph describing the character graph in the specified book
section
'''
book = load_book(book_path)
sections = split_book(book, n_sections, cumulative)
graph_dict = {}
for i, section in enumerate(sections):
characters = extract_character_names(' '.join(section))
df = find_connections(sequences=section, characters=characters)
cooccurence = calculate_cooccurence(df)
interaction_df = get_interaction_df(cooccurence, threshold=2)
graph_dict[i] = nx.from_pandas_dataframe(interaction_df,
source='source',
target='target')
return graph_dict
def select_k(spectrum):
'''
Returns k, where the top k eigenvalues of the graph's laplacian describe 90
percent of the graph's complexiities.
Parameters
----------
spectrum : type (required optional)
the laplacian spectrum of the graph in question
Returns
-------
k : int
denotes the top k eigenvalues of the graph's laplacian spectrum,
explaining 90 percent of its complexity (or containing 90 percent of
its energy)
'''
if sum(spectrum) == 0:
return len(spectrum)
running_total = 0
for i in range(len(spectrum)):
running_total += spectrum[i]
if (running_total / sum(spectrum)) >= 0.9:
return i + 1
return len(spectrum)
def graph_similarity(graph_1, graph_2):
'''
Computes the similarity of two graphs based on their laplacian spectra,
returning a value between 0 and inf where a score closer to 0 is indicative
of a more similar network
Parameters
----------
graph_1 : networkx.Graph (required)
graph_2 : networkx.Graph (required)
Returns
-------
similarity : float
the similarity score of the two graphs where a value closer to 0 is
indicative of a more similar pair of networks
'''
laplacian_1 = nx.spectrum.laplacian_spectrum(graph_1)
laplacian_2 = nx.spectrum.laplacian_spectrum(graph_2)
k_1 = select_k(laplacian_1)
k_2 = select_k(laplacian_2)
k = min(k_1, k_2)
return sum((laplacian_1[:k] - laplacian_2[:k])**2)
def comparison_df(graph_dict):
'''
takes an assortment of novels and computes their simlarity, based on their
laplacian spectra
Parameters
----------
graph_dict : dict (required)
keys = book title
values = character graph
Returns
-------
comparison : pandas.DataFrame
columns = book titles
indexes = book titles
values = measure of the character graph similarity of books
'''
books = list(graph_dict.keys())
comparison = {book_1: {book_2: graph_similarity(graph_dict[book_1],
graph_dict[book_2])
for book_2 in books} for book_1 in books}
return pd.DataFrame(comparison)
|
mit
|
facom/AstrodynTools
|
tides/tides-variable.py
|
1
|
1202
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
from constants import *
from numpy import *
from matplotlib.pyplot import *
from sys import exit
###################################################
#UTILITIES
###################################################
DEG=pi/180
RAD=180/pi
def P2(psi):
p=0.5*(3*cos(psi)**2-1)
return p
###################################################
#SCRIPT
###################################################
#"""
mp=Mearth
Rp=Rearth
a=rmoon
ms=Mmoon
#"""
"""SUN-EARTH
mp=Mearth
Rp=Rearth
a=rsun
ms=Msun
#"""
#EQUILIBRIUM
csi=ms/mp*(Rp/a)**3*Rp
g=Gconst*mp/Rp**2
#FREQUENCIES
Omega=2*pi/Day
n=2*pi/(Month*Day)
#MOON ORBIT INCLINATION
i=5*DEG
#CALCULATE ANGLES AS A FUNCTION OF TIME
t=linspace(0,Month*Day,1000)
fp=Omega*t
tp=90.0*DEG
#COLATITUDE (SPHERICAL TRIGONOMETRY)
fm=n*t
B=1/sin(i)**2
A=cos(fm)**2
tm=arcsin(sqrt((B-1)/(B-A)))
#ARTIFICIAL ENHACEMENT FACTORS
A=1.0E0
B=1.0E0
C=1.0E17
#"""
phase=\
A*0.50*(3*cos(tp)**2-1)*0.5*(3*cos(tm)**2-1)+\
B*0.75*sin(tp)**2*sin(tm)**2*cos(2*(fp-fm))+\
C*0.75*sin(2*tp)*sin(2*tm)*cos(fp-fm)
#"""
#TIDE
heq=csi*phase
figure()
plot(t/Day,heq)
xlabel("$t$ (Day)")
ylabel("Tide (m)")
savefig("tides-oscillation.png")
|
gpl-2.0
|
dagar/Firmware
|
Tools/ecl_ekf/plotting/data_plots.py
|
5
|
13860
|
#! /usr/bin/env python3
"""
function collection for plotting
"""
from typing import Optional, List, Tuple, Dict
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import Figure, Axes
from matplotlib.backends.backend_pdf import PdfPages
def get_min_arg_time_value(
time_series_data: np.ndarray, data_time: np.ndarray) -> Tuple[int, float, float]:
"""
:param time_series_data:
:param data_time:
:return:
"""
min_arg = np.argmin(time_series_data)
min_time = data_time[min_arg]
min_value = np.amin(time_series_data)
return (min_arg, min_value, min_time)
def get_max_arg_time_value(
time_series_data: np.ndarray, data_time: np.ndarray) -> Tuple[int, float, float]:
"""
:param time_series_data:
:param data_time:
:return:
"""
max_arg = np.argmax(time_series_data)
max_time = data_time[max_arg]
max_value = np.amax(time_series_data)
return max_arg, max_value, max_time
class DataPlot(object):
"""
A plotting class interface. Provides functions such as saving the figure.
"""
def __init__(
self, plot_data: Dict[str, np.ndarray], variable_names: List[List[str]],
plot_title: str = '', sub_titles: Optional[List[str]] = None,
x_labels: Optional[List[str]] = None, y_labels: Optional[List[str]] = None,
y_lim: Optional[Tuple[int, int]] = None, legend: Optional[List[str]] = None,
pdf_handle: Optional[PdfPages] = None) -> None:
"""
Initializes the data plot class interface.
:param plot_title:
:param pdf_handle:
"""
self._plot_data = plot_data
self._variable_names = variable_names
self._plot_title = plot_title
self._sub_titles = sub_titles
self._x_labels = x_labels
self._y_labels = y_labels
self._y_lim = y_lim
self._legend = legend
self._pdf_handle = pdf_handle
self._fig = None
self._ax = None
self._fig_size = (20, 13)
@property
def fig(self) -> Figure:
"""
:return: the figure handle
"""
if self._fig is None:
self._create_figure()
return self._fig
@property
def ax(self) -> Axes:
"""
:return: the axes handle
"""
if self._ax is None:
self._create_figure()
return self._ax
@property
def plot_data(self) -> dict:
"""
returns the plot data. calls _generate_plot_data if necessary.
:return:
"""
if self._plot_data is None:
self._generate_plot_data()
return self._plot_data
def plot(self) -> None:
"""
placeholder for the plotting function. A child class should implement this function.
:return:
"""
def _create_figure(self) -> None:
"""
creates the figure handle.
:return:
"""
self._fig, self._ax = plt.subplots(frameon=True, figsize=self._fig_size)
self._fig.suptitle(self._plot_title)
def _generate_plot_data(self) -> None:
"""
placeholder for a function that generates a data table necessary for plotting
:return:
"""
def show(self) -> None:
"""
displays the figure on the screen.
:return: None
"""
self.fig.show()
def save(self) -> None:
"""
saves the figure if a pdf_handle was initialized.
:return:
"""
if self._pdf_handle is not None and self.fig is not None:
self.plot()
self._pdf_handle.savefig(figure=self.fig)
else:
print('skipping saving to pdf: handle was not initialized.')
def close(self) -> None:
"""
closes the figure.
:return:
"""
plt.close(self._fig)
class TimeSeriesPlot(DataPlot):
"""
class for creating multiple time series plot.
"""
def __init__(
self, plot_data: dict, variable_names: List[List[str]], x_labels: List[str],
y_labels: List[str], plot_title: str = '', sub_titles: Optional[List[str]] = None,
pdf_handle: Optional[PdfPages] = None) -> None:
"""
initializes a timeseries plot
:param plot_data:
:param variable_names:
:param xlabels:
:param ylabels:
:param plot_title:
:param pdf_handle:
"""
super().__init__(
plot_data, variable_names, plot_title=plot_title, sub_titles=sub_titles,
x_labels=x_labels, y_labels=y_labels, pdf_handle=pdf_handle)
def plot(self):
"""
plots the time series data.
:return:
"""
if self.fig is None:
return
for i in range(len(self._variable_names)):
plt.subplot(len(self._variable_names), 1, i + 1)
for v in self._variable_names[i]:
plt.plot(self.plot_data[v], 'b')
plt.xlabel(self._x_labels[i])
plt.ylabel(self._y_labels[i])
self.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
class InnovationPlot(DataPlot):
"""
class for creating an innovation plot.
"""
def __init__(
self, plot_data: dict, variable_names: List[Tuple[str, str]], x_labels: List[str],
y_labels: List[str], plot_title: str = '', sub_titles: Optional[List[str]] = None,
pdf_handle: Optional[PdfPages] = None) -> None:
"""
initializes a timeseries plot
:param plot_data:
:param variable_names:
:param xlabels:
:param ylabels:
:param plot_title:
:param sub_titles:
:param pdf_handle:
"""
super().__init__(
plot_data, variable_names, plot_title=plot_title, sub_titles=sub_titles,
x_labels=x_labels, y_labels=y_labels, pdf_handle=pdf_handle)
def plot(self):
"""
plots the Innovation data.
:return:
"""
if self.fig is None:
return
for i in range(len(self._variable_names)):
# create a subplot for every variable
plt.subplot(len(self._variable_names), 1, i + 1)
if self._sub_titles is not None:
plt.title(self._sub_titles[i])
# plot the value and the standard deviation
plt.plot(
1e-6 * self.plot_data['timestamp'], self.plot_data[self._variable_names[i][0]], 'b')
plt.plot(
1e-6 * self.plot_data['timestamp'],
np.sqrt(self.plot_data[self._variable_names[i][1]]), 'r')
plt.plot(
1e-6 * self.plot_data['timestamp'],
-np.sqrt(self.plot_data[self._variable_names[i][1]]), 'r')
plt.xlabel(self._x_labels[i])
plt.ylabel(self._y_labels[i])
plt.grid()
# add the maximum and minimum value as an annotation
_, max_value, max_time = get_max_arg_time_value(
self.plot_data[self._variable_names[i][0]], 1e-6 * self.plot_data['timestamp'])
_, min_value, min_time = get_min_arg_time_value(
self.plot_data[self._variable_names[i][0]], 1e-6 * self.plot_data['timestamp'])
plt.text(
max_time, max_value, 'max={:.2f}'.format(max_value), fontsize=12,
horizontalalignment='left',
verticalalignment='bottom')
plt.text(
min_time, min_value, 'min={:.2f}'.format(min_value), fontsize=12,
horizontalalignment='left',
verticalalignment='top')
self.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
class ControlModeSummaryPlot(DataPlot):
"""
class for creating a control mode summary plot.
"""
def __init__(
self, data_time: np.ndarray, plot_data: dict, variable_names: List[List[str]],
x_label: str, y_labels: List[str], annotation_text: List[str],
additional_annotation: Optional[List[str]] = None, plot_title: str = '',
sub_titles: Optional[List[str]] = None,
pdf_handle: Optional[PdfPages] = None) -> None:
"""
initializes a timeseries plot
:param plot_data:
:param variable_names:
:param xlabels:
:param ylabels:
:param plot_title:
:param sub_titles:
:param pdf_handle:
"""
super().__init__(
plot_data, variable_names, plot_title=plot_title, sub_titles=sub_titles,
x_labels=[x_label]*len(y_labels), y_labels=y_labels, pdf_handle=pdf_handle)
self._data_time = data_time
self._annotation_text = annotation_text
self._additional_annotation = additional_annotation
def plot(self):
"""
plots the control mode data.
:return:
"""
if self.fig is None:
return
colors = ['b', 'r', 'g', 'c']
for i in range(len(self._variable_names)):
# create a subplot for every variable
plt.subplot(len(self._variable_names), 1, i + 1)
if self._sub_titles is not None:
plt.title(self._sub_titles[i])
for col, var in zip(colors[:len(self._variable_names[i])], self._variable_names[i]):
plt.plot(self._data_time, self.plot_data[var], col)
plt.xlabel(self._x_labels[i])
plt.ylabel(self._y_labels[i])
plt.grid()
plt.ylim(-0.1, 1.1)
for t in range(len(self._annotation_text[i])):
_, _, align_time = get_max_arg_time_value(
np.diff(self.plot_data[self._variable_names[i][t]]), self._data_time)
v_annot_pos = (t+1.0)/(len(self._variable_names[i])+1) # vert annotation position
if np.amin(self.plot_data[self._variable_names[i][t]]) > 0:
plt.text(
align_time, v_annot_pos,
'no pre-arm data - cannot calculate {:s} start time'.format(
self._annotation_text[i][t]), fontsize=12, horizontalalignment='left',
verticalalignment='center', color=colors[t])
elif np.amax(self.plot_data[self._variable_names[i][t]]) > 0:
plt.text(
align_time, v_annot_pos, '{:s} at {:.1f} sec'.format(
self._annotation_text[i][t], align_time), fontsize=12,
horizontalalignment='left', verticalalignment='center', color=colors[t])
if self._additional_annotation is not None:
for a in range(len(self._additional_annotation[i])):
v_annot_pos = (a + 1.0) / (len(self._additional_annotation[i]) + 1)
plt.text(
self._additional_annotation[i][a][0], v_annot_pos,
self._additional_annotation[i][a][1], fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
self.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
class CheckFlagsPlot(DataPlot):
"""
class for creating a control mode summary plot.
"""
def __init__(
self, data_time: np.ndarray, plot_data: dict, variable_names: List[List[str]],
x_label: str, y_labels: List[str], y_lim: Optional[Tuple[int, int]] = None,
plot_title: str = '', legend: Optional[List[str]] = None,
sub_titles: Optional[List[str]] = None, pdf_handle: Optional[PdfPages] = None,
annotate: bool = False) -> None:
"""
initializes a timeseries plot
:param plot_data:
:param variable_names:
:param xlabels:
:param ylabels:
:param plot_title:
:param sub_titles:
:param pdf_handle:
"""
super().__init__(
plot_data, variable_names, plot_title=plot_title, sub_titles=sub_titles,
x_labels=[x_label]*len(y_labels), y_labels=y_labels, y_lim=y_lim, legend=legend,
pdf_handle=pdf_handle)
self._data_time = data_time
self._b_annotate = annotate
def plot(self):
"""
plots the control mode data.
:return:
"""
if self.fig is None:
return
colors = ['b', 'r', 'g', 'c', 'k', 'm']
for i in range(len(self._variable_names)):
# create a subplot for every variable
plt.subplot(len(self._variable_names), 1, i + 1)
if self._sub_titles is not None:
plt.title(self._sub_titles[i])
for col, var in zip(colors[:len(self._variable_names[i])], self._variable_names[i]):
plt.plot(self._data_time, self.plot_data[var], col)
plt.xlabel(self._x_labels[i])
plt.ylabel(self._y_labels[i])
plt.grid()
if self._y_lim is not None:
plt.ylim(self._y_lim)
if self._legend is not None:
plt.legend(self._legend[i], loc='upper left')
if self._b_annotate:
for col, var in zip(colors[:len(self._variable_names[i])], self._variable_names[i]):
# add the maximum and minimum value as an annotation
_, max_value, max_time = get_max_arg_time_value(
self.plot_data[var], self._data_time)
mean_value = np.mean(self.plot_data[var])
plt.text(
max_time, max_value,
'max={:.4f}, mean={:.4f}'.format(max_value, mean_value), color=col,
fontsize=12, horizontalalignment='left', verticalalignment='bottom')
self.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
|
bsd-3-clause
|
dongjoon-hyun/spark
|
python/pyspark/sql/tests/test_dataframe.py
|
9
|
42005
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pydoc
import shutil
import tempfile
import time
import unittest
from pyspark.sql import SparkSession, Row
from pyspark.sql.types import StringType, IntegerType, DoubleType, StructType, StructField, \
BooleanType, DateType, TimestampType, FloatType
from pyspark.sql.utils import AnalysisException, IllegalArgumentException
from pyspark.testing.sqlutils import ReusedSQLTestCase, SQLTestUtils, have_pyarrow, have_pandas, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
class DataFrameTests(ReusedSQLTestCase):
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange(5, "name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(TypeError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(TypeError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegex(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
# add tests for SPARK-23647 (test more types for hint)
def test_extended_hint_types(self):
df = self.spark.range(10e10).toDF("id")
such_a_nice_list = ["itworks1", "itworks2", "itworks3"]
hinted_df = df.hint("my awesome hint", 1.2345, "what", such_a_nice_list)
logical_plan = hinted_df._jdf.queryExecution().logical()
self.assertEqual(1, logical_plan.toString().count("1.2345"))
self.assertEqual(1, logical_plan.toString().count("what"))
self.assertEqual(3, logical_plan.toString().count("itworks"))
def test_sample(self):
self.assertRaisesRegex(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegex(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegex(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_cache(self):
spark = self.spark
with self.tempView("tab1", "tab2"):
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEqual(types[0], np.int32)
self.assertEqual(types[1], np.object)
self.assertEqual(types[2], np.bool)
self.assertEqual(types[3], np.float32)
self.assertEqual(types[4], np.object) # datetime.date
self.assertEqual(types[5], 'datetime64[ns]')
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_with_duplicated_column_names(self):
import numpy as np
sql = "select 1 v, 1 v"
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEqual(types.iloc[0], np.int32)
self.assertEqual(types.iloc[1], np.int32)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_on_cross_join(self):
import numpy as np
sql = """
select t1.*, t2.* from (
select explode(sequence(1, 3)) v
) t1 left join (
select explode(sequence(1, 3)) v
) t2
"""
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.crossJoin.enabled": True,
"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEqual(types.iloc[0], np.int32)
self.assertEqual(types.iloc[1], np.int32)
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEqual(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEqual(types[1], np.object)
self.assertEqual(types[2], np.float64)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_empty_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on an empty dataframe has the correct dtypes
import numpy as np
sql = """
SELECT CAST(1 AS TINYINT) AS tinyint,
CAST(1 AS SMALLINT) AS smallint,
CAST(1 AS INT) AS int,
CAST(1 AS BIGINT) AS bigint,
CAST(0 AS FLOAT) AS float,
CAST(0 AS DOUBLE) AS double,
CAST(1 AS BOOLEAN) AS boolean,
CAST('foo' AS STRING) AS string,
CAST('2019-01-01' AS TIMESTAMP) AS timestamp
"""
dtypes_when_nonempty_df = self.spark.sql(sql).toPandas().dtypes
dtypes_when_empty_df = self.spark.sql(sql).filter("False").toPandas().dtypes
self.assertTrue(np.all(dtypes_when_empty_df == dtypes_when_nonempty_df))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_null_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with only nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(NULL AS TINYINT) AS tinyint,
CAST(NULL AS SMALLINT) AS smallint,
CAST(NULL AS INT) AS int,
CAST(NULL AS BIGINT) AS bigint,
CAST(NULL AS FLOAT) AS float,
CAST(NULL AS DOUBLE) AS double,
CAST(NULL AS BOOLEAN) AS boolean,
CAST(NULL AS STRING) AS string,
CAST(NULL AS TIMESTAMP) AS timestamp
"""
pdf = self.spark.sql(sql).toPandas()
types = pdf.dtypes
self.assertEqual(types[0], np.float64)
self.assertEqual(types[1], np.float64)
self.assertEqual(types[2], np.float64)
self.assertEqual(types[3], np.float64)
self.assertEqual(types[4], np.float32)
self.assertEqual(types[5], np.float64)
self.assertEqual(types[6], np.object)
self.assertEqual(types[7], np.object)
self.assertTrue(np.can_cast(np.datetime64, types[8]))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_mixed_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with some nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(col1 AS TINYINT) AS tinyint,
CAST(col2 AS SMALLINT) AS smallint,
CAST(col3 AS INT) AS int,
CAST(col4 AS BIGINT) AS bigint,
CAST(col5 AS FLOAT) AS float,
CAST(col6 AS DOUBLE) AS double,
CAST(col7 AS BOOLEAN) AS boolean,
CAST(col8 AS STRING) AS string,
timestamp_seconds(col9) AS timestamp
FROM VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1),
(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
"""
pdf_with_some_nulls = self.spark.sql(sql).toPandas()
pdf_with_only_nulls = self.spark.sql(sql).filter('tinyint is null').toPandas()
self.assertTrue(np.all(pdf_with_only_nulls.dtypes == pdf_with_some_nulls.dtypes))
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]}, columns=["d", "ts"])
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.testing import assert_frame_equal
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEqual(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEqual(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEqual(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEqual(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEqual(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEqual(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
def test_to_local_iterator(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator()
self.assertEqual(expected, list(it))
# Test DataFrame with empty partition
df = self.spark.range(3, numPartitions=4)
it = df.toLocalIterator()
expected = df.collect()
self.assertEqual(expected, list(it))
def test_to_local_iterator_prefetch(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator(prefetchPartitions=True)
self.assertEqual(expected, list(it))
def test_to_local_iterator_not_fully_consumed(self):
# SPARK-23961: toLocalIterator throws exception when not fully consumed
# Create a DataFrame large enough so that write to socket will eventually block
df = self.spark.range(1 << 20, numPartitions=2)
it = df.toLocalIterator()
self.assertEqual(df.take(1)[0], next(it))
with QuietTest(self.sc):
it = None # remove iterator from scope, socket is closed when cleaned up
# Make sure normal df operations still work
result = []
for i, row in enumerate(df.toLocalIterator()):
result.append(row)
if i == 7:
break
self.assertEqual(df.take(8), result)
def test_same_semantics_error(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(TypeError, "should be of DataFrame.*int"):
self.spark.range(10).sameSemantics(1)
def test_input_files(self):
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
self.spark.range(1, 100, 1, 10).write.parquet(tpath)
# read parquet file and get the input files list
input_files_list = self.spark.read.parquet(tpath).inputFiles()
# input files list should contain 10 entries
self.assertEqual(len(input_files_list), 10)
# all file paths in list must contain tpath
for file_path in input_files_list:
self.assertTrue(tpath in file_path)
finally:
shutil.rmtree(tpath)
def test_df_show(self):
# SPARK-35408: ensure better diagnostics if incorrect parameters are passed
# to DataFrame.show
df = self.spark.createDataFrame([('foo',)])
df.show(5)
df.show(5, True)
df.show(5, 1, True)
df.show(n=5, truncate='1', vertical=False)
df.show(n=5, truncate=1.5, vertical=False)
with self.assertRaisesRegex(TypeError, "Parameter 'n'"):
df.show(True)
with self.assertRaisesRegex(TypeError, "Parameter 'vertical'"):
df.show(vertical='foo')
with self.assertRaisesRegex(TypeError, "Parameter 'truncate=foo'"):
df.show(truncate='foo')
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
def test_to_pandas_on_spark(self):
import pandas as pd
from pandas.testing import assert_frame_equal
sdf = self.spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
psdf_from_sdf = sdf.to_pandas_on_spark()
psdf_from_sdf_with_index = sdf.to_pandas_on_spark(index_col="Col1")
pdf = pd.DataFrame({"Col1": ["a", "b", "c"], "Col2": [1, 2, 3]})
pdf_with_index = pdf.set_index("Col1")
assert_frame_equal(pdf, psdf_from_sdf.to_pandas())
assert_frame_equal(pdf_with_index, psdf_from_sdf_with_index.to_pandas())
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
if __name__ == "__main__":
from pyspark.sql.tests.test_dataframe import * # noqa: F401
try:
import xmlrunner # type: ignore
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
jorge2703/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
48
|
12645
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method, random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method', LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
@if_not_mac_os()
def test_lda_multi_jobs():
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=3,
learning_method=method, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_not_mac_os()
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=-1, learning_offset=5.,
total_samples=30, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X, invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X, invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
assert_allclose(_dirichlet_expectation_1d(x),
np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
mikaem/spectralDNS
|
tests/TG2D.py
|
4
|
2087
|
import warnings
from numpy import sin, cos, exp, zeros, sum, float64
from spectralDNS import config, get_solver, solve
try:
import matplotlib.pyplot as plt
except ImportError:
warnings.warn("matplotlib not installed")
plt = None
def initialize(sol, U, U_hat, X, **context):
U[0] = sin(X[0])*cos(X[1])
U[1] = -sin(X[1])*cos(X[0])
sol.set_velocity(U, U_hat, **context)
#U_hat = U.forward(U_hat)
config.params.t = 0.0
config.params.tstep = 0
im = None
def update(context):
global im
params = config.params
solver = config.solver
if not plt is None:
# initialize plot
if params.tstep == 1:
im = plt.imshow(zeros((params.N[0], params.N[1])))
plt.colorbar(im)
plt.draw()
if params.tstep % params.plot_result == 0 and params.plot_result > 0:
curl = solver.get_curl(**context)
im.set_data(curl[:, :])
im.autoscale()
plt.pause(1e-6)
def regression_test(context):
params = config.params
solver = config.solver
dx, L = params.dx, params.L
U = solver.get_velocity(**context)
k = solver.comm.reduce(sum(U.astype(float64)*U.astype(float64))*dx[0]*dx[1]/L[0]/L[1]/2)
U[0] = -sin(context.X[1])*cos(context.X[0])*exp(-2*params.nu*params.t)
U[1] = sin(context.X[0])*cos(context.X[1])*exp(-2*params.nu*params.t)
ke = solver.comm.reduce(sum(U.astype(float64)*U.astype(float64))*dx[0]*dx[1]/L[0]/L[1]/2)
if solver.rank == 0:
print("Error {}".format(k-ke))
assert round(float(k - ke), params.ntol) == 0
if __name__ == '__main__':
config.update(
{'nu': 0.01,
'dt': 0.05,
'T': 10,
'write_result': 100,
'M': [6, 6]}, 'doublyperiodic')
config.doublyperiodic.add_argument("--plot_result", type=int, default=10) # required to allow overloading through commandline
sol = get_solver(update=update, regression_test=regression_test, mesh="doublyperiodic")
context = sol.get_context()
initialize(sol, **context)
solve(sol, context)
|
gpl-3.0
|
Equitable/trump
|
trump/datadef.py
|
3
|
2137
|
import inspect
import sys
import pandas as pd
import datetime as dt
from sqlalchemy import DateTime, Integer, String, Float
class SkipDataDef(object):
"""
The SkipDataDef object implements a float column, but makes an assumption
that the data is already floats (or float-like), so it can skip any
check and conversion. It's just faster.
"""
sqlatyp = Float
astyp = float
def __init__(self, data):
self.data = data
@property
def converted(self):
return self.data
class ConvertedDataDef(object):
"""
Implements a basic functionality for a DataDef object.
The defaults, are floats.
"""
sqlatyp = Float
pythontyp = float
def __init__(self, data):
self.data = data
@property
def converted(self):
return self.data.astype(self.pythontyp)
class IntDataDef(ConvertedDataDef):
"""Implements a basic integer data definition."""
sqlatyp = Integer
pythontyp = int
class FloatDataDef(ConvertedDataDef):
"""Implements a basic float data definition."""
# redefined, just to avoid confusion. Floats are used by
# default in ConvertedDataDef
sqlatyp = Float
pythontyp = float
class StrDataDef(ConvertedDataDef):
"""Implements a basic string data definition."""
sqlatyp = String
pythontyp = str
def __init__(self, data):
self.data = data
#raise Warning("""Using this DataDef, on a feed with NaN, will convert
#the NaN to a string. The Aggregation functions will treat this as a
#value.""")
class DateTimeDataDef(ConvertedDataDef):
"""Implements a basic string data definition."""
sqlatyp = DateTime
pythontyp = dt.datetime
@property
def converted(self):
return pd.to_datetime(self.data)
def _pred(aclass):
"""
:param aclass
:return: boolean
"""
isaclass = inspect.isclass(aclass)
return isaclass and aclass.__module__ == _pred.__module__
classes = inspect.getmembers(sys.modules[__name__], _pred)
datadefs = {cls[0]: cls[1] for cls in classes}
|
bsd-3-clause
|
userdw/RaspberryPi_3_Starter_Kit
|
08_Image_Processing/Histogram/histogram/histogram.py
|
1
|
1293
|
import os, cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
_intensityBGR = 256
_projectDirectory = os.path.dirname(__file__)
_imagesDirectory = os.path.join(_projectDirectory, "images")
_images = []
for _root, _dirs, _files in os.walk(_imagesDirectory):
for _file in _files:
if _file.endswith(".jpg"):
_images.append(os.path.join(_imagesDirectory, _file))
_imageIndex = 0
_imageTotal = len(_images)
_img = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED)
_img = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB)
_fig = plt.figure("Histogram")
_gs = GridSpec(15, 2)
_fig1 = plt.subplot(_gs[0:15, 0])
_fig1.set_title("Image")
plt.tight_layout()
_fig1ShowIt = plt.imshow(_img)
_fig2 = plt.subplot(_gs[0:15, 1])
_fig2.set_title("Histogram")
_histB = cv2.calcHist([_img], [0], None, [256], [0, 256]) / _img.size
_histG = cv2.calcHist([_img], [1], None, [256], [0, 256]) / _img.size
_histR = cv2.calcHist([_img], [2], None, [256], [0, 256]) / _img.size
plt.xlabel("Intensity")
plt.ylabel("PMF")
_intensityPlotBGR = np.arange(_intensityBGR)
plt.tight_layout()
_fig2ShowIt = plt.plot(_intensityPlotBGR, _histB, "b", _intensityPlotBGR, _histG, "g", _intensityPlotBGR, _histR, "r", alpha = 0.8)
plt.show()
|
mit
|
gfyoung/pandas
|
pandas/tests/frame/methods/test_dropna.py
|
3
|
7549
|
import datetime
import dateutil
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestDataFrameMissingData:
def test_dropEmptyRows(self, float_frame):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
original = Series(mat, index=float_frame.index, name="foo")
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how="all")
# check that original was preserved
tm.assert_series_equal(frame["foo"], original)
return_value = inplace_frame1.dropna(how="all", inplace=True)
tm.assert_series_equal(smaller_frame["foo"], expected)
tm.assert_series_equal(inplace_frame1["foo"], expected)
assert return_value is None
smaller_frame = frame.dropna(how="all", subset=["foo"])
return_value = inplace_frame2.dropna(how="all", subset=["foo"], inplace=True)
tm.assert_series_equal(smaller_frame["foo"], expected)
tm.assert_series_equal(inplace_frame2["foo"], expected)
assert return_value is None
def test_dropIncompleteRows(self, float_frame):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
frame["bar"] = 5
original = Series(mat, index=float_frame.index, name="foo")
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
tm.assert_series_equal(frame["foo"], original)
return_value = inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=float_frame.index[5:], name="foo")
tm.assert_series_equal(smaller_frame["foo"], exp)
tm.assert_series_equal(inp_frame1["foo"], exp)
assert return_value is None
samesize_frame = frame.dropna(subset=["bar"])
tm.assert_series_equal(frame["foo"], original)
assert (frame["bar"] == 5).all()
return_value = inp_frame2.dropna(subset=["bar"], inplace=True)
tm.assert_index_equal(samesize_frame.index, float_frame.index)
tm.assert_index_equal(inp_frame2.index, float_frame.index)
assert return_value is None
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = np.nan
dropped = df.dropna(axis=1)
expected = df.loc[:, [0, 1, 3]]
inp = df.copy()
return_value = inp.dropna(axis=1, inplace=True)
tm.assert_frame_equal(dropped, expected)
tm.assert_frame_equal(inp, expected)
assert return_value is None
dropped = df.dropna(axis=0)
expected = df.loc[list(range(2, 6))]
inp = df.copy()
return_value = inp.dropna(axis=0, inplace=True)
tm.assert_frame_equal(dropped, expected)
tm.assert_frame_equal(inp, expected)
assert return_value is None
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.loc[:, [0, 1, 3]]
inp = df.copy()
return_value = inp.dropna(axis=1, thresh=5, inplace=True)
tm.assert_frame_equal(dropped, expected)
tm.assert_frame_equal(inp, expected)
assert return_value is None
dropped = df.dropna(axis=0, thresh=4)
expected = df.loc[range(2, 6)]
inp = df.copy()
return_value = inp.dropna(axis=0, thresh=4, inplace=True)
tm.assert_frame_equal(dropped, expected)
tm.assert_frame_equal(inp, expected)
assert return_value is None
dropped = df.dropna(axis=1, thresh=4)
tm.assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
tm.assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
return_value = inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
tm.assert_frame_equal(dropped, df)
tm.assert_frame_equal(inp, df)
assert return_value is None
# all
dropped = df.dropna(axis=1, how="all")
tm.assert_frame_equal(dropped, df)
df[2] = np.nan
dropped = df.dropna(axis=1, how="all")
expected = df.loc[:, [0, 1, 3]]
tm.assert_frame_equal(dropped, expected)
# bad input
msg = "No axis named 3 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.dropna(axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name="A")
expected = Series([1, 2], dtype=original.dtype, name="A")
df = DataFrame({"A": original.values.copy()})
df2 = df.copy()
df["A"].dropna()
tm.assert_series_equal(df["A"], original)
ser = df["A"]
return_value = ser.dropna(inplace=True)
tm.assert_series_equal(ser, expected)
tm.assert_series_equal(df["A"], original)
assert return_value is None
df2["A"].drop([1])
tm.assert_series_equal(df2["A"], original)
ser = df2["A"]
return_value = ser.drop([1], inplace=True)
tm.assert_series_equal(ser, original.drop([1]))
tm.assert_series_equal(df2["A"], original)
assert return_value is None
def test_dropna_corner(self, float_frame):
# bad input
msg = "invalid how option: foo"
with pytest.raises(ValueError, match=msg):
float_frame.dropna(how="foo")
msg = "must specify how or thresh"
with pytest.raises(TypeError, match=msg):
float_frame.dropna(how=None)
# non-existent column - 8303
with pytest.raises(KeyError, match=r"^\['X'\]$"):
float_frame.dropna(subset=["A", "X"])
def test_dropna_multiple_axes(self):
df = DataFrame(
[
[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9],
]
)
# GH20987
with pytest.raises(TypeError, match="supplying multiple axes"):
df.dropna(how="all", axis=[0, 1])
with pytest.raises(TypeError, match="supplying multiple axes"):
df.dropna(how="all", axis=(0, 1))
inp = df.copy()
with pytest.raises(TypeError, match="supplying multiple axes"):
inp.dropna(how="all", axis=(0, 1), inplace=True)
def test_dropna_tz_aware_datetime(self):
# GH13407
df = DataFrame()
dt1 = datetime.datetime(2015, 1, 1, tzinfo=dateutil.tz.tzutc())
dt2 = datetime.datetime(2015, 2, 2, tzinfo=dateutil.tz.tzutc())
df["Time"] = [dt1]
result = df.dropna(axis=0)
expected = DataFrame({"Time": [dt1]})
tm.assert_frame_equal(result, expected)
# Ex2
df = DataFrame({"Time": [dt1, None, np.nan, dt2]})
result = df.dropna(axis=0)
expected = DataFrame([dt1, dt2], columns=["Time"], index=[0, 3])
tm.assert_frame_equal(result, expected)
def test_dropna_categorical_interval_index(self):
# GH 25087
ii = pd.IntervalIndex.from_breaks([0, 2.78, 3.14, 6.28])
ci = pd.CategoricalIndex(ii)
df = DataFrame({"A": list("abc")}, index=ci)
expected = df
result = df.dropna()
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
eickenberg/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
14
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweeked to
improve readibility of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
shangwuhencc/scikit-learn
|
sklearn/utils/tests/test_multiclass.py
|
128
|
12853
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
|
bsd-3-clause
|
abhishekgahlot/pybrain
|
examples/rl/valuebased/nfq.py
|
4
|
1841
|
#!/usr/bin/env python
__author__ = 'Thomas Rueckstiess, [email protected]'
from pybrain.rl.environments.cartpole import CartPoleEnvironment, DiscreteBalanceTask, CartPoleRenderer
from pybrain.rl.agents import LearningAgent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.rl.learners.valuebased import NFQ, ActionValueNetwork
from pybrain.rl.explorers import BoltzmannExplorer
from numpy import array, arange, meshgrid, pi, zeros, mean
from matplotlib import pyplot as plt
# switch this to True if you want to see the cart balancing the pole (slower)
render = False
plt.ion()
env = CartPoleEnvironment()
if render:
renderer = CartPoleRenderer()
env.setRenderer(renderer)
renderer.start()
module = ActionValueNetwork(4, 3)
task = DiscreteBalanceTask(env, 100)
learner = NFQ()
learner.explorer.epsilon = 0.4
agent = LearningAgent(module, learner)
testagent = LearningAgent(module, None)
experiment = EpisodicExperiment(task, agent)
def plotPerformance(values, fig):
plt.figure(fig.number)
plt.clf()
plt.plot(values, 'o-')
plt.gcf().canvas.draw()
performance = []
if not render:
pf_fig = plt.figure()
while(True):
# one learning step after one episode of world-interaction
experiment.doEpisodes(1)
agent.learn(1)
# test performance (these real-world experiences are not used for training)
if render:
env.delay = True
experiment.agent = testagent
r = mean([sum(x) for x in experiment.doEpisodes(5)])
env.delay = False
testagent.reset()
experiment.agent = agent
performance.append(r)
if not render:
plotPerformance(performance, pf_fig)
print "reward avg", r
print "explorer epsilon", learner.explorer.epsilon
print "num episodes", agent.history.getNumSequences()
print "update step", len(performance)
|
bsd-3-clause
|
rseubert/scikit-learn
|
examples/plot_multilabel.py
|
25
|
4261
|
# Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
pl.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
pl.subplot(2, 2, subplot)
pl.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
pl.scatter(X[:, 0], X[:, 1], s=40, c='gray')
pl.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
pl.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
pl.xticks(())
pl.yticks(())
pl.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
pl.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
pl.xlabel('First principal component')
pl.ylabel('Second principal component')
pl.legend(loc="upper left")
pl.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
pl.subplots_adjust(.04, .02, .97, .94, .09, .2)
pl.show()
|
bsd-3-clause
|
annayqho/TheCannon
|
code/aaomega/aaomega_run_cannon.py
|
1
|
11861
|
""" Apply The Cannon to the AAOmega Spectra! """
import numpy as np
import matplotlib.pyplot as plt
import sys
from TheCannon import dataset
from TheCannon import model
DATA_DIR = '/Users/annaho/Data/AAOmega/Run_13_July'
SMALL = 1.0 / 1000000000.0
def test_step_iteration(ds, md, starting_guess):
errs, chisq = md.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def choose_reference_set():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
all_id = np.load("%s/ref_id_all.npz" %DATA_DIR)['arr_0']
all_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0']
all_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0']
all_label = np.load("%s/ref_label_all.npz" %DATA_DIR)['arr_0']
all_ivar = np.load("%s/ref_ivar_corr.npz" %DATA_DIR)['arr_0']
# choose reference objects
good_teff = np.logical_and(
all_label[:,0] > 4000, all_label[:,0] < 6000)
good_feh = np.logical_and(
all_label[:,2] > -2, all_label[:,2] < 0.3)
good_logg = np.logical_and(
all_label[:,1] > 1, all_label[:,1] < 3)
good_vrot = all_label[:,4] < 20.0
good_scat = all_scat < 0.1
good1 = np.logical_and(good_teff, good_feh)
good2 = np.logical_and(good_logg, good_vrot)
good12 = np.logical_and(good1, good2)
good = np.logical_and(good12, good_scat)
ref_id = all_id[good]
print("%s objects chosen for reference set" %len(ref_id))
ref_flux = all_flux[good]
ref_ivar = all_ivar[good]
ref_label = all_label[good]
np.savez("%s/ref_id.npz" %DATA_DIR, ref_id)
np.savez("%s/ref_flux.npz" %DATA_DIR, ref_flux)
np.savez("%s/ref_ivar.npz" %DATA_DIR, ref_ivar)
np.savez("%s/ref_label.npz" %DATA_DIR, ref_label)
def update_cont():
contpix = np.load("wl_contpix_old.npz")['arr_0']
# this array is a bit too long, clip it off
contpix_new = contpix[np.logical_and(contpix>8420, contpix<8700)]
inds = np.zeros(contpix_new.shape, dtype=int)
for i,val in enumerate(contpix_new):
# find the nearest pixel
inds[i] = int(np.argmin(np.abs(wl-val)))
contmask = np.zeros(len(wl), dtype=bool)
contmask[inds] = 1
np.savez("wl_contmask.npz", contmask)
print("SAVED")
def normalize_ref_set():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
ref_id = np.load("%s/ref_id.npz" %DATA_DIR)['arr_0']
ref_flux = np.load("%s/ref_flux.npz" %DATA_DIR)['arr_0']
ref_ivar = np.load("%s/ref_ivar.npz" %DATA_DIR)['arr_0']
ref_label = np.load("%s/ref_label.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, ref_id, ref_flux, ref_ivar, ref_label,
ref_id, ref_flux, ref_ivar)
contmask = np.load("%s/wl_contmask.npz" %DATA_DIR)['arr_0']
ds.set_continuum(contmask)
cont = ds.fit_continuum(3, "sinusoid")
np.savez("%s/ref_cont.npz" %DATA_DIR, cont)
norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = \
ds.continuum_normalize(cont)
bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1)
norm_tr_ivar[bad] = 0.0
np.savez("%s/ref_flux_norm.npz" %DATA_DIR, norm_tr_flux)
np.savez("%s/ref_ivar_norm.npz" %DATA_DIR, norm_tr_ivar)
def normalize_test_set():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
test_id = np.load("%s/test_id.npz" %DATA_DIR)['arr_0']
test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0']
test_ivar = np.load("%s/test_ivar_corr.npz" %DATA_DIR)['arr_0']
test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0']
contmask = np.load("%s/wl_contmask.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, test_id[0:2], test_flux[0:2], test_ivar[0:2], wl,
test_id, test_flux, test_ivar)
ds.set_continuum(contmask)
# For the sake of the normalization, no pixel with flux >= 3 sigma
# should be continuum.
for ii,spec in enumerate(ds.test_flux):
err = test_scat[ii]
bad = np.logical_and(
ds.contmask == True, np.abs(1-spec) >= 3*err)
ds.test_ivar[ii][bad] = SMALL
cont = ds.fit_continuum(3, "sinusoid")
np.savez("%s/test_cont.npz" %DATA_DIR, cont)
norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = \
ds.continuum_normalize(cont)
bad = np.logical_or(test_flux <= 0, test_flux > 1.1)
norm_test_ivar[bad] = 0.0
np.savez("%s/test_flux_norm.npz" %DATA_DIR, norm_test_flux)
np.savez("%s/test_ivar_norm.npz" %DATA_DIR, norm_test_ivar)
def choose_training_set():
ref_id = np.load("%s/ref_id.npz" %DATA_DIR)['arr_0']
ref_flux = np.load("%s/ref_flux_norm.npz" %DATA_DIR)['arr_0']
ref_ivar = np.load("%s/ref_ivar_norm.npz" %DATA_DIR)['arr_0']
ref_label = np.load("%s/ref_label.npz" %DATA_DIR)['arr_0']
# randomly pick 80% of the objects to be the training set
nobj = len(ref_id)
assignments = np.random.randint(10, size=nobj)
# if you're < 8, you're training
choose = assignments < 8
tr_id = ref_id[choose]
tr_flux = ref_flux[choose]
tr_ivar = ref_ivar[choose]
tr_label = ref_label[choose]
np.savez("%s/tr_id.npz" %DATA_DIR, tr_id)
np.savez("%s/tr_flux_norm.npz" %DATA_DIR, tr_flux)
np.savez("%s/tr_ivar_norm.npz" %DATA_DIR, tr_ivar)
np.savez("%s/tr_label.npz" %DATA_DIR, tr_label)
val_id = ref_id[~choose]
val_flux = ref_flux[~choose]
val_ivar = ref_ivar[~choose]
val_label = ref_label[~choose]
np.savez("%s/val_id.npz" %DATA_DIR, val_id)
np.savez("%s/val_flux_norm.npz" %DATA_DIR, val_flux)
np.savez("%s/val_ivar_norm.npz" %DATA_DIR, val_ivar)
np.savez("%s/val_label.npz" %DATA_DIR, val_label)
def train():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
tr_id = np.load("%s/tr_id.npz" %DATA_DIR)['arr_0']
tr_flux = np.load("%s/tr_flux_norm.npz" %DATA_DIR)['arr_0']
tr_ivar = np.load("%s/tr_ivar_norm.npz" %DATA_DIR)['arr_0']
tr_label = np.load("%s/tr_label.npz" %DATA_DIR)['arr_0']
val_id = np.load("%s/val_id.npz" %DATA_DIR)['arr_0']
val_flux = np.load("%s/val_flux_norm.npz" %DATA_DIR)['arr_0']
val_ivar = np.load("%s/val_ivar_norm.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label[:,0:4],
val_id, val_flux, val_ivar)
ds.set_label_names(["Teff", "logg", "FeH", 'aFe'])
np.savez("%s/tr_SNR.npz" %DATA_DIR, ds.tr_SNR)
fig = ds.diagnostics_SNR()
plt.savefig("%s/SNR_dist.png" %DATA_DIR)
plt.close()
fig = ds.diagnostics_ref_labels()
plt.savefig("%s/ref_label_triangle.png" %DATA_DIR)
plt.close()
md = model.CannonModel(2)
md.fit(ds)
fig = md.diagnostics_leading_coeffs(ds)
plt.savefig("%s/leading_coeffs.png" %DATA_DIR)
plt.close()
np.savez("%s/coeffs.npz" %DATA_DIR, md.coeffs)
np.savez("%s/scatters.npz" %DATA_DIR, md.scatters)
np.savez("%s/chisqs.npz" %DATA_DIR, md.chisqs)
np.savez("%s/pivots.npz" %DATA_DIR, md.pivots)
def validate():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
tr_id = np.load("%s/tr_id.npz" %DATA_DIR)['arr_0']
tr_flux = np.load("%s/tr_flux_norm.npz" %DATA_DIR)['arr_0']
tr_ivar = np.load("%s/tr_ivar_norm.npz" %DATA_DIR)['arr_0']
val_id = np.load("%s/val_id.npz" %DATA_DIR)['arr_0']
val_flux = np.load("%s/val_flux_norm.npz" %DATA_DIR)['arr_0']
val_ivar = np.load("%s/val_ivar_norm.npz" %DATA_DIR)['arr_0']
val_label = np.load("%s/val_label.npz" %DATA_DIR)['arr_0']
coeffs = np.load("%s/coeffs.npz" %DATA_DIR)['arr_0']
scatters = np.load("%s/scatters.npz" %DATA_DIR)['arr_0']
chisqs = np.load("%s/chisqs.npz" %DATA_DIR)['arr_0']
pivots = np.load("%s/pivots.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, val_label[:,0:4],
val_id, val_flux, val_ivar)
np.savez("%s/val_SNR.npz" %DATA_DIR, ds.test_SNR)
ds.set_label_names(["Teff", "logg", "FeH", "aFe"])
md = model.CannonModel(2)
md.coeffs = coeffs
md.scatters = scatters
md.chisqs = chisqs
md.pivots = pivots
md.diagnostics_leading_coeffs(ds)
nguesses = 7
nobj = len(ds.test_ID)
nlabels = ds.tr_label.shape[1]
choose = np.random.randint(0,nobj,size=nguesses)
starting_guesses = ds.tr_label[choose]-md.pivots
labels = np.zeros((nguesses, nobj, nlabels))
chisq = np.zeros((nguesses, nobj))
errs = np.zeros(labels.shape)
for ii,guess in enumerate(starting_guesses):
a,b,c = test_step_iteration(ds,md,starting_guesses[ii])
labels[ii,:] = a
chisq[ii,:] = b
errs[ii,:] = c
np.savez("%s/val_labels_all_starting_vals.npz" %DATA_DIR, labels)
np.savez("%s/val_chisq_all_starting_vals.npz" %DATA_DIR, chisq)
np.savez("%s/val_errs_all_starting_vals.npz" %DATA_DIR, errs)
choose = np.argmin(chisq, axis=0)
best_chisq = np.min(chisq, axis=0)
best_labels = np.zeros((nobj, nlabels))
best_errs = np.zeros(best_labels.shape)
for jj,val in enumerate(choose):
best_labels[jj,:] = labels[:,jj,:][val]
best_errs[jj,:] = errs[:,jj,:][val]
np.savez("%s/val_cannon_labels.npz" %DATA_DIR, best_labels)
np.savez("%s/val_errs.npz" %DATA_DIR, best_errs)
np.savez("%s/val_chisq.npz" %DATA_DIR, best_chisq)
ds.test_label_vals = best_labels
ds.diagnostics_1to1()
def test():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
tr_id = np.load("%s/tr_id.npz" %DATA_DIR)['arr_0']
tr_flux = np.load("%s/tr_flux_norm.npz" %DATA_DIR)['arr_0']
tr_ivar = np.load("%s/tr_ivar_norm.npz" %DATA_DIR)['arr_0']
test_id = np.load("%s/test_id.npz" %DATA_DIR)['arr_0']
test_flux = np.load("%s/test_flux_norm.npz" %DATA_DIR)['arr_0']
test_ivar = np.load("%s/test_ivar_norm.npz" %DATA_DIR)['arr_0']
tr_label = np.load("%s/tr_label.npz" %DATA_DIR)['arr_0']
coeffs = np.load("%s/coeffs.npz" %DATA_DIR)['arr_0']
scatters = np.load("%s/scatters.npz" %DATA_DIR)['arr_0']
chisqs = np.load("%s/chisqs.npz" %DATA_DIR)['arr_0']
pivots = np.load("%s/pivots.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label[:,0:4],
test_id, test_flux, test_ivar)
np.savez("%s/test_SNR.npz" %DATA_DIR, ds.test_SNR)
ds.set_label_names(["Teff", "logg", "FeH", "aFe"])
md = model.CannonModel(2)
md.coeffs = coeffs
md.scatters = scatters
md.chisqs = chisqs
md.pivots = pivots
md.diagnostics_leading_coeffs(ds)
nguesses = 7
nobj = len(ds.test_ID)
nlabels = ds.tr_label.shape[1]
choose = np.random.randint(0,nobj,size=nguesses)
starting_guesses = ds.tr_label[choose]-md.pivots
labels = np.zeros((nguesses, nobj, nlabels))
chisq = np.zeros((nguesses, nobj))
errs = np.zeros(labels.shape)
ds.tr_label = np.zeros((nobj, nlabels))
for ii,guess in enumerate(starting_guesses):
a,b,c = test_step_iteration(ds,md,starting_guesses[ii])
labels[ii,:] = a
chisq[ii,:] = b
errs[ii,:] = c
np.savez("%s/labels_all_starting_vals.npz" %DATA_DIR, labels)
np.savez("%s/chisq_all_starting_vals.npz" %DATA_DIR, chisq)
np.savez("%s/errs_all_starting_vals.npz" %DATA_DIR, errs)
choose = np.argmin(chisq, axis=0)
best_chisq = np.min(chisq, axis=0)
best_labels = np.zeros((nobj, nlabels))
best_errs = np.zeros(best_labels.shape)
for jj,val in enumerate(choose):
best_labels[jj,:] = labels[:,jj,:][val]
best_errs[jj,:] = errs[:,jj,:][val]
np.savez("%s/test_cannon_labels.npz" %DATA_DIR, best_labels)
np.savez("%s/test_errs.npz" %DATA_DIR, best_errs)
np.savez("%s/test_chisq.npz" %DATA_DIR, best_chisq)
ds.test_label_vals = best_labels
if __name__=="__main__":
#choose_reference_set()
#normalize_ref_set()
#normalize_test_set()
#choose_training_set()
#train()
#validate()
test()
|
mit
|
mortada/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py
|
71
|
12923
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
belavenir/rpg_svo
|
svo_analysis/scripts/compare_results.py
|
17
|
6127
|
#!/usr/bin/python
import os
import sys
import time
import rospkg
import numpy as np
import matplotlib.pyplot as plt
import yaml
import argparse
from matplotlib import rc
# tell matplotlib to use latex font
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
def plot_trajectory(ax, filename, label, color, linewidth):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
trajectory = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
ax.plot(trajectory[:,1], trajectory[:,2], label=label, color=color, linewidth=linewidth)
def compare_results(experiments, results_dir, comparison_dir,
plot_scale_drift = False):
# ------------------------------------------------------------------------------
# position error
fig_poserr = plt.figure(figsize=(8,6))
ax_poserr_x = fig_poserr.add_subplot(311, ylabel='x-error [m]')
ax_poserr_y = fig_poserr.add_subplot(312, ylabel='y-error [m]')
ax_poserr_z = fig_poserr.add_subplot(313, ylabel='z-error [m]', xlabel='time [s]')
for exp in experiments:
# load dataset parameters
params_stream = open(os.path.join(results_dir, exp, 'params.yaml'))
params = yaml.load(params_stream)
# plot translation error
trans_error = np.loadtxt(os.path.join(results_dir, exp, 'translation_error.txt'))
trans_error[:,0] = trans_error[:,0]-trans_error[0,0]
ax_poserr_x.plot(trans_error[:,0], trans_error[:,1], label=params['experiment_label'])
ax_poserr_y.plot(trans_error[:,0], trans_error[:,2])
ax_poserr_z.plot(trans_error[:,0], trans_error[:,3])
ax_poserr_x.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_y.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_z.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_x.legend(bbox_to_anchor=[0, 0], loc='lower left', ncol=3)
ax_poserr_x.grid()
ax_poserr_y.grid()
ax_poserr_z.grid()
fig_poserr.tight_layout()
fig_poserr.savefig(os.path.join(comparison_dir, 'translation_error.pdf'))
# ------------------------------------------------------------------------------
# orientation error
fig_roterr = plt.figure(figsize=(8,6))
ax_roterr_r = fig_roterr.add_subplot(311, ylabel='roll-error [rad]')
ax_roterr_p = fig_roterr.add_subplot(312, ylabel='pitch-error [rad]')
ax_roterr_y = fig_roterr.add_subplot(313, ylabel='yaw-error [rad]', xlabel='time [s]')
for exp in experiments:
# load dataset parameters
params_stream = open(os.path.join(results_dir, exp, 'params.yaml'))
params = yaml.load(params_stream)
# plot translation error
rot_error = np.loadtxt(os.path.join(results_dir, exp, 'orientation_error.txt'))
rot_error[:,0] = rot_error[:,0]-rot_error[0,0]
ax_roterr_r.plot(rot_error[:,0], rot_error[:,3], label=params['experiment_label'])
ax_roterr_p.plot(rot_error[:,0], rot_error[:,2])
ax_roterr_y.plot(rot_error[:,0], rot_error[:,1])
ax_roterr_r.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_p.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_y.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_r.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3)
ax_roterr_r.grid()
ax_roterr_p.grid()
ax_roterr_y.grid()
fig_roterr.tight_layout()
fig_roterr.savefig(os.path.join(comparison_dir, 'orientation_error.pdf'))
# ------------------------------------------------------------------------------
# scale error
if plot_scale_drift:
fig_scale = plt.figure(figsize=(8,2.5))
ax_scale = fig_scale.add_subplot(111, xlabel='time [s]', ylabel='scale change [\%]')
for exp in experiments:
# load dataset parameters
params = yaml.load(open(os.path.join(results_dir, exp, 'params.yaml')))
# plot translation error
scale_drift = open(os.path.join(results_dir, exp, 'scale_drift.txt'))
scale_drift[:,0] = scale_drift[:,0]-scale_drift[0,0]
ax_scale.plot(scale_drift[:,0], scale_drift[:,1], label=params['experiment_label'])
ax_scale.set_xlim([0, rot_error[-1,0]+4])
ax_scale.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3)
ax_scale.grid()
fig_scale.tight_layout()
fig_scale.savefig(os.path.join(comparison_dir, 'scale_drift.pdf'))
# ------------------------------------------------------------------------------
# trajectory
# fig_traj = plt.figure(figsize=(8,4.8))
# ax_traj = fig_traj.add_subplot(111, xlabel='x [m]', ylabel='y [m]', aspect='equal', xlim=[-3.1, 4], ylim=[-1.5, 2.6])
#
# plotTrajectory(ax_traj, '/home/cforster/Datasets/asl_vicon_d2/groundtruth_filtered.txt', 'Groundtruth', 'k', 1.5)
# plotTrajectory(ax_traj, results_dir+'/20130911_2229_nslam_i7_asl2_fast/traj_estimate_rotated.txt', 'Fast', 'g', 1)
# plotTrajectory(ax_traj, results_dir+'/20130906_2149_ptam_i7_asl2/traj_estimate_rotated.txt', 'PTAM', 'r', 1)
#
# mark_inset(ax_traj, axins, loc1=2, loc2=4, fc="none", ec='b')
# plt.draw()
# plt.show()
# ax_traj.legend(bbox_to_anchor=[1, 0], loc='lower right', ncol=3)
# ax_traj.grid()
# fig_traj.tight_layout()
# fig_traj.savefig('../results/trajectory_asl.pdf')
if __name__ == '__main__':
default_name = time.strftime("%Y%m%d_%H%M", time.localtime())+'_comparison'
parser = argparse.ArgumentParser(description='Compare results.')
parser.add_argument('result_directories', nargs='+', help='list of result directories to compare')
parser.add_argument('--name', help='name of the comparison', default=default_name)
args = parser.parse_args()
# create folder for comparison results
results_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'), 'results')
comparison_dir = os.path.join(results_dir, args.name)
if not os.path.exists(comparison_dir):
os.makedirs(comparison_dir)
# run comparison
compare_results(args.result_directories, results_dir, comparison_dir)
|
gpl-3.0
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/IPython/testing/decorators.py
|
12
|
13377
|
# -*- coding: utf-8 -*-
"""Decorators for labeling test objects.
Decorators that merely return a modified version of the original function
object are straightforward. Decorators that return a new function object need
to use nose.tools.make_decorator(original_function)(decorator) in returning the
decorator, in order to preserve metadata such as function name, setup and
teardown functions and so on - see nose.tools for more information.
This module provides a set of useful decorators meant to be ready to use in
your own tests. See the bottom of the file for the ready-made ones, and if you
find yourself writing a new one that may be of generic use, add it here.
Included decorators:
Lightweight testing that remains unittest-compatible.
- An @as_unittest decorator can be used to tag any normal parameter-less
function as a unittest TestCase. Then, both nose and normal unittest will
recognize it as such. This will make it easier to migrate away from Nose if
we ever need/want to while maintaining very lightweight tests.
NOTE: This file contains IPython-specific decorators. Using the machinery in
IPython.external.decorators, we import either numpy.testing.decorators if numpy is
available, OR use equivalent code in IPython.external._decorators, which
we've copied verbatim from numpy.
Authors
-------
- Fernando Perez <[email protected]>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
import sys
import os
import tempfile
import unittest
# Third-party imports
# This is Michele Simionato's decorator module, kept verbatim.
from IPython.external.decorator import decorator
# Expose the unittest-driven decorators
from .ipunittest import ipdoctest, ipdocstring
# Grab the numpy-specific decorators which we keep in a file that we
# occasionally update from upstream: decorators.py is a copy of
# numpy.testing.decorators, we expose all of it here.
from IPython.external.decorators import *
# For onlyif_cmd_exists decorator
from IPython.utils.process import is_cmd_found
from IPython.utils.py3compat import string_types
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# Simple example of the basic idea
def as_unittest(func):
"""Decorator to make a simple function into a normal test via unittest."""
class Tester(unittest.TestCase):
def test(self):
func()
Tester.__name__ = func.__name__
return Tester
# Utility functions
def apply_wrapper(wrapper,func):
"""Apply a wrapper to a function for decoration.
This mixes Michele Simionato's decorator tool with nose's make_decorator,
to apply a wrapper in a decorator so that all nose attributes, as well as
function signature and other properties, survive the decoration cleanly.
This will ensure that wrapped functions can still be well introspected via
IPython, for example.
"""
import nose.tools
return decorator(wrapper,nose.tools.make_decorator(func)(wrapper))
def make_label_dec(label,ds=None):
"""Factory function to create a decorator that applies one or more labels.
Parameters
----------
label : string or sequence
One or more labels that will be applied by the decorator to the functions
it decorates. Labels are attributes of the decorated function with their
value set to True.
ds : string
An optional docstring for the resulting decorator. If not given, a
default docstring is auto-generated.
Returns
-------
A decorator.
Examples
--------
A simple labeling decorator:
>>> slow = make_label_dec('slow')
>>> slow.__doc__
"Labels a test as 'slow'."
And one that uses multiple labels and a custom docstring:
>>> rare = make_label_dec(['slow','hard'],
... "Mix labels 'slow' and 'hard' for rare tests.")
>>> rare.__doc__
"Mix labels 'slow' and 'hard' for rare tests."
Now, let's test using this one:
>>> @rare
... def f(): pass
...
>>>
>>> f.slow
True
>>> f.hard
True
"""
if isinstance(label, string_types):
labels = [label]
else:
labels = label
# Validate that the given label(s) are OK for use in setattr() by doing a
# dry run on a dummy function.
tmp = lambda : None
for label in labels:
setattr(tmp,label,True)
# This is the actual decorator we'll return
def decor(f):
for label in labels:
setattr(f,label,True)
return f
# Apply the user's docstring, or autogenerate a basic one
if ds is None:
ds = "Labels a test as %r." % label
decor.__doc__ = ds
return decor
# Inspired by numpy's skipif, but uses the full apply_wrapper utility to
# preserve function metadata better and allows the skip condition to be a
# callable.
def skipif(skip_condition, msg=None):
''' Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = skip_condition
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None: out = 'Test skipped due to test condition.'
else: out = msg
return "Skipping test: %s. %s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual generator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
# A version with the condition set to true, common case just to attach a message
# to a skip decorator
def skip(msg=None):
"""Decorator factory - mark a test function for skipping from test suite.
Parameters
----------
msg : string
Optional message to be added.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised, with the optional message added.
"""
return skipif(True,msg)
def onlyif(condition, msg):
"""The reverse from skipif, see skipif for details."""
if callable(condition):
skip_condition = lambda : not condition()
else:
skip_condition = lambda : not condition
return skipif(skip_condition, msg)
#-----------------------------------------------------------------------------
# Utility functions for decorators
def module_not_available(module):
"""Can module be imported? Returns true if module does NOT import.
This is used to make a decorator to skip tests that require module to be
available, but delay the 'import numpy' to test execution time.
"""
try:
mod = __import__(module)
mod_not_avail = False
except ImportError:
mod_not_avail = True
return mod_not_avail
def decorated_dummy(dec, name):
"""Return a dummy function decorated with dec, with the given name.
Examples
--------
import IPython.testing.decorators as dec
setup = dec.decorated_dummy(dec.skip_if_no_x11, __name__)
"""
dummy = lambda: None
dummy.__name__ = name
return dec(dummy)
#-----------------------------------------------------------------------------
# Decorators for public use
# Decorators to skip certain tests on specific platforms.
skip_win32 = skipif(sys.platform == 'win32',
"This test does not run under Windows")
skip_linux = skipif(sys.platform.startswith('linux'),
"This test does not run under Linux")
skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
# Decorators to skip tests if not on specific platforms.
skip_if_not_win32 = skipif(sys.platform != 'win32',
"This test only runs under Windows")
skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
"This test only runs under Linux")
skip_if_not_osx = skipif(sys.platform != 'darwin',
"This test only runs under OSX")
_x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
os.environ.get('DISPLAY', '') == '')
_x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
# not a decorator itself, returns a dummy function to be used as setup
def skip_file_no_x11(name):
return decorated_dummy(skip_if_no_x11, name) if _x11_skip_cond else None
# Other skip decorators
# generic skip without module
skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
skipif_not_numpy = skip_without('numpy')
skipif_not_matplotlib = skip_without('matplotlib')
skipif_not_sympy = skip_without('sympy')
skip_known_failure = knownfailureif(True,'This test is known to fail')
known_failure_py3 = knownfailureif(sys.version_info[0] >= 3,
'This test is known to fail on Python 3.')
# A null 'decorator', useful to make more readable code that needs to pick
# between different decorators based on OS or other conditions
null_deco = lambda f: f
# Some tests only run where we can use unicode paths. Note that we can't just
# check os.path.supports_unicode_filenames, which is always False on Linux.
try:
f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
except UnicodeEncodeError:
unicode_paths = False
else:
unicode_paths = True
f.close()
onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
"where we can use unicode in filenames."))
def onlyif_cmds_exist(*commands):
"""
Decorator to skip test when at least one of `commands` is not found.
"""
for cmd in commands:
try:
if not is_cmd_found(cmd):
return skip("This test runs only if command '{0}' "
"is installed".format(cmd))
except ImportError as e:
# is_cmd_found uses pywin32 on windows, which might not be available
if sys.platform == 'win32' and 'pywin32' in str(e):
return skip("This test runs only if pywin32 and command '{0}' "
"is installed".format(cmd))
raise e
return null_deco
def onlyif_any_cmd_exists(*commands):
"""
Decorator to skip test unless at least one of `commands` is found.
"""
for cmd in commands:
try:
if is_cmd_found(cmd):
return null_deco
except ImportError as e:
# is_cmd_found uses pywin32 on windows, which might not be available
if sys.platform == 'win32' and 'pywin32' in str(e):
return skip("This test runs only if pywin32 and commands '{0}' "
"are installed".format(commands))
raise e
return skip("This test runs only if one of the commands {0} "
"is installed".format(commands))
|
bsd-3-clause
|
vab9/mosyco
|
mosyco/helpers.py
|
1
|
1495
|
# -*- coding: utf-8 -*-
"""This module contains various helper functions."""
import os
import sys
import contextlib
import logging
import pandas as pd
def load_dataframe():
"""Load the dataset into memory."""
df = pd.read_csv(os.path.join('data/sample_data.csv'),
index_col=1, parse_dates=True,
infer_datetime_format=True)
# sanitize dataframe
df = df.drop(['Unnamed: 0'], axis=1)
return df
def setup_logging(args):
"""Setup logging for the application"""
log = logging.getLogger(__package__)
log.setLevel(args.loglevel)
# disable root logger handlers
root_logger = logging.getLogger()
root_logger.handlers = []
# set log output destination
if args.logfile:
handler = logging.FileHandler('mosyco.log', mode='w')
else:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('{name}: {message}', style='{'))
root_logger.addHandler(handler)
# set prophet loglevel
logging.getLogger('fbprophet').setLevel(logging.WARNING)
return log
@contextlib.contextmanager
def silence():
"""Silence all output in block used with this context manager.
This is done by redirecting stdout to /dev/null while block is executing.
"""
devnull = open(os.devnull, 'w')
oldstdout_fno = os.dup(sys.stdout.fileno())
os.dup2(devnull.fileno(), 1)
yield
os.dup2(oldstdout_fno, 1)
devnull.close()
|
mit
|
akrherz/iem
|
htdocs/plotting/auto/scripts/p82.py
|
1
|
5972
|
"""Calendar Plot of Automated Station Summaries"""
import datetime
import psycopg2.extras
import pandas as pd
from pandas.io.sql import read_sql
from pyiem.util import get_autoplot_context, get_dbconn, convert_value
from pyiem.reference import TRACE_VALUE
from pyiem.plot import calendar_plot
from pyiem.exceptions import NoDataFound
PDICT = {
"max_tmpf": "High Temperature",
"high_departure": "High Temperature Departure",
"min_tmpf": "Low Temperature",
"low_departure": "Low Temperature Departure",
"avg_tmpf": "Average Temperature",
"avg_departure": "Average Temperature Departure",
"max_dwpf": "Highest Dew Point Temperature",
"min_dwpf": "Lowest Dew Point Temperature",
"avg_smph": "Average Wind Speed [mph]",
"max_smph": "Maximum Wind Speed/Gust [mph]",
"pday": "Precipitation",
}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc[
"description"
] = """This chart presents a series of daily summary data
as a calendar. The daily totals should be valid for the local day of the
weather station. The climatology is based on the nearest NCEI 1981-2010
climate station, which in most cases is the same station. Climatology
values are rounded to the nearest whole degree Fahrenheit and then compared
against the observed value to compute a departure.
"""
today = datetime.date.today()
m90 = today - datetime.timedelta(days=90)
desc["arguments"] = [
dict(
type="zstation",
name="station",
default="DSM",
network="IA_ASOS",
label="Select Station",
),
dict(
type="select",
name="var",
default="pday",
label="Which Daily Variable:",
options=PDICT,
),
dict(
type="date",
name="sdate",
default=m90.strftime("%Y/%m/%d"),
label="Start Date:",
min="1929/01/01",
),
dict(
type="date",
name="edate",
default=today.strftime("%Y/%m/%d"),
label="End Date:",
min="1929/01/01",
),
]
return desc
def safe(row, varname):
"""Safe conversion of value for printing as a string"""
val = row[varname]
if val is None:
return "M"
if varname == "pday":
if val == TRACE_VALUE:
return "T"
if val == 0:
return "0"
return "%.2f" % (val,)
# prevent -0 values
return "%i" % (val,)
def diff(val, climo):
"""Safe subtraction."""
if val is None or climo is None:
return None
return val - climo
def plotter(fdict):
"""Go"""
pgconn = get_dbconn("iem")
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
varname = ctx["var"]
sdate = ctx["sdate"]
edate = ctx["edate"]
# Get Climatology
cdf = read_sql(
"SELECT to_char(valid, 'mmdd') as sday, "
"round(high::numeric, 0) as high, "
"round(low::numeric, 0) as low, "
"round(((high + low) / 2.)::numeric, 0) as avg, "
"precip from ncei_climate91 WHERE station = %s ORDER by sday ASC",
get_dbconn("coop"),
params=(ctx["_nt"].sts[station]["ncei91"],),
index_col="sday",
)
if cdf.empty:
raise NoDataFound("No Data Found.")
cursor.execute(
"""
SELECT day, max_tmpf, min_tmpf, max_dwpf, min_dwpf,
(max_tmpf + min_tmpf) / 2. as avg_tmpf,
pday, avg_sknt, coalesce(max_gust, max_sknt) as peak_wind
from summary s JOIN stations t
on (t.iemid = s.iemid) WHERE s.day >= %s and s.day <= %s and
t.id = %s and t.network = %s ORDER by day ASC
""",
(sdate, edate, station, ctx["network"]),
)
rows = []
data = {}
for row in cursor:
hd = diff(row["max_tmpf"], cdf.at[row[0].strftime("%m%d"), "high"])
ld = diff(row["min_tmpf"], cdf.at[row[0].strftime("%m%d"), "low"])
ad = diff(row["avg_tmpf"], cdf.at[row[0].strftime("%m%d"), "avg"])
avg_sknt = row["avg_sknt"]
if avg_sknt is None:
if varname == "avg_smph":
continue
avg_sknt = 0
peak_wind = row["peak_wind"]
if peak_wind is None:
if varname == "max_smph":
continue
peak_wind = 0
rows.append(
dict(
day=row["day"],
max_tmpf=row["max_tmpf"],
avg_smph=convert_value(avg_sknt, "knot", "mile / hour"),
max_smph=convert_value(peak_wind, "knot", "mile / hour"),
min_dwpf=row["min_dwpf"],
max_dwpf=row["max_dwpf"],
high_departure=hd,
low_departure=ld,
avg_departure=ad,
min_tmpf=row["min_tmpf"],
pday=row["pday"],
)
)
data[row[0]] = {"val": safe(rows[-1], varname)}
if data[row[0]]["val"] == "0":
data[row[0]]["color"] = "k"
elif varname == "high_departure":
data[row[0]]["color"] = "b" if hd < 0 else "r"
elif varname == "low_departure":
data[row[0]]["color"] = "b" if ld < 0 else "r"
elif varname == "avg_departure":
data[row[0]]["color"] = "b" if ad < 0 else "r"
df = pd.DataFrame(rows)
title = "[%s] %s Daily %s" % (
station,
ctx["_nt"].sts[station]["name"],
PDICT.get(varname),
)
subtitle = "%s thru %s" % (
sdate.strftime("%-d %b %Y"),
edate.strftime("%-d %b %Y"),
)
fig = calendar_plot(sdate, edate, data, title=title, subtitle=subtitle)
return fig, df
if __name__ == "__main__":
plotter({"var": "avg_sknt"})
|
mit
|
3324fr/spinalcordtoolbox
|
dev/tamag/old/test_filtering.py
|
1
|
3779
|
#!/usr/bin/env python
#Test programm for the 2D gaussian filter
# check if needed Python libraries are already installed or not
import os
import commands
import sys
import sct_utils as sct
import nibabel as nib
import numpy
import matplotlib.pyplot as plt
import sct_create_mask
from msct_image import Image
def filter_2Dgaussian(input_padded_file, size_filter, output_file_name='Result'):
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(input_padded_file)
print nx, ny, nz, nt, px, py, pz, pt
gaussian_filter=sct_create_mask.create_mask2d(center=((int(size_filter/px)-1.0)/2.0,(int(size_filter/py)-1.0)/2.0), shape='gaussian', size=size_filter, nx=int(size_filter/px),ny=int(size_filter/py)) #pb center
#on a oublie le facteur multiplicatif du filtre gaussien classique (create_mask2D ne le prend pas en compte)
print (int(size_filter/px)-1.0)/2.0,(int(size_filter/py)-1.0)/2.0
print int(size_filter/px), int(size_filter/py)
plt.plot(gaussian_filter)
plt.grid()
plt.show()
#center=(int(size_filter/px)/2.0,int(size_filter/py)/2.0)
#Pad
#gaussian_filter_pad = 'pad_' + gaussian_filter
#sct.run('sct_c2d ' + gaussian_filter + ' -pad ' + pad + 'x0vox ' + pad + 'x' + pad + 'x0vox 0 -o ' + gaussian_filter_pad) #+ pad+ 'x'
image_input_padded_file=Image(input_padded_file)
print('1: numpy.sum(image_input_padded_file.data[:,:,:])', numpy.sum(image_input_padded_file.data[:,:,:]))
#Create the output file
#im_output=image_input_padded_file
im_output = image_input_padded_file.data * 0 #ici, image_input_padded_file.data est lui aussi mis a zero
#im_output_freq=image_input_padded_file
im_output_freq = image_input_padded_file.data * 0
#Create padded filter in frequency domain
gaussian_filter_freq=numpy.fft.fft2(gaussian_filter, s=(image_input_padded_file.data.shape[0], image_input_padded_file.data.shape[1]))
plt.plot(gaussian_filter_freq)
plt.grid()
plt.show()
hauteur_image=image_input_padded_file.data.shape[2]
print('2: numpy.sum(image_input_padded_file.data[:,:,:])', numpy.sum(image_input_padded_file.data[:,:,:]))
#Apply 2D filter to every slice of the image
for i in range(hauteur_image):
image_input_padded_file_frequentiel=numpy.fft.fft2(image_input_padded_file.data[:,:,i], axes=(0,1))
im_output_freq[:,:,i]=gaussian_filter_freq*image_input_padded_file_frequentiel
im_output[:,:,i]=numpy.fft.ifft2(im_output_freq[:,:,i], axes=(0,1))
print('numpy.sum(im_output[:,:,:])', numpy.sum(im_output[:,:,:]))
#Save the file
#im_output.setFileName(output_file_name)
#im_output.save('minimize')
# Generate the T1, PD and MTVF maps as a NIFTI file with the right header
path_spgr, file_name, ext_spgr = sct.extract_fname(input_padded_file)
fname_output = path_spgr + output_file_name + ext_spgr
sct.printv('Generate the NIFTI file with the right header...')
# Associate the header to the MTVF and PD maps data as a NIFTI file
hdr = nib.load(input_padded_file).get_header()
img_with_hdr = nib.Nifti1Image(im_output, None, hdr)
# Save the T1, PD and MTVF maps file
nib.save(img_with_hdr, fname_output) #PB: enregistre le fichier Result dans tmp.150317111945 lors des tests
return img_with_hdr
#def filter_2Dmean(input_padded_file, size_kernel, output_file_name='Result'):
#mean_filter=(1/9)*numpy.ones((int(9/px),int(9/py))) #filter of 9mm
os.chdir('/home/tamag/data/test_straightening/20150316_allan/tmp.150317111945/')
filter_2Dgaussian('tmp.centerline_pad.nii.gz', 15)
|
mit
|
petosegan/scikit-learn
|
sklearn/cluster/birch.py
|
207
|
22706
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
bsd-3-clause
|
thilbern/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
21
|
4761
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the boostrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
tmhm/scikit-learn
|
doc/datasets/mldata_fixture.py
|
367
|
1183
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
annajur/cbp
|
OrbitInteg.py
|
1
|
3948
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from astropy import units
from galpy.orbit import Orbit
from FerrersPotential import FerrersPotential as FP
matplotlib.rcParams['figure.figsize'] = (10, 10)
def rot(omega, t):
temp = [[np.cos(t*omega), -np.sin(t*omega)], [np.sin(t*omega), np.cos(t*omega)]]
return np.array(temp)
def inrotframe(orbit, ts, potential):
x, y = [], []
for item in ts:
x.append(orbit.x(item))
y.append(orbit.y(item))
xy = np.zeros([len(x),2])
xy[:,0] = x
xy[:,1] = y
omega = potential.OmegaP()
xrot, yrot = np.zeros(len(ts)), np.zeros(len(ts))
for i in range(len(ts)):
xrot[i],yrot[i] = np.dot(xy[i],rot(omega, ts[i]))
return xrot, yrot
pmw = FP(a = 8*units.kpc, b = 0.35, c = 0.2375, normalize = True, omegab = 10.*units.km/units.s/units.kpc)
initc = []
R = 1
svxvv = '[0.2,0.2,vT,0.,0.,0.]'
output_file = open('/home/annaj/cbp_usrp/pretty_pictures/in_rot/6/setvT/log','w')
output_file.write(svxvv+'\n'+'R = ')
'''
for i in range(1,41):
R = 0.025*i
initc.append([R,0.01,0.01,0.,0.,np.pi/2])
output_file.write(str(R)+', ')
ts = np.linspace(0,100,2000)
output_file.close()
'''
'''
for i in range(1,21):
vR = -0.2 + 0.02*i
initc.append([1.35,vR,0.1,0.,0.,np.pi/4])
output_file.write(str(vR)+', ')
ts = np.linspace(0,100,800)
output_file.close()
'''
for i in range(1,41):
vT = -0.7+0.05*i
initc.append([0.2,0.2,vT,0.,0.,0.])
output_file.write(str(vT)+', ')
ts = np.linspace(0,50,1500)
output_file.close()
orbits = []
for i in range(len(initc)):
orbits.append(Orbit(vxvv = initc[i]))
for i in range(len(orbits)):
try:
orbits[i].integrate(ts, pmw)
#orbits[i].plot(d1='x',d2='y', color = 'dodgerblue', overplot = False)
ts = np.linspace(0,50,1500)
xr, yr = inrotframe(orbits[i],ts,pmw)
plt.plot(xr,yr, c = 'crimson')
plt.xlabel(r'$x/R_0$', fontsize = 17)
plt.ylabel(r'$y/R_0$', fontsize = 17)
xr, yr = [],[]
if i<10:
name = '/home/annaj/cbp_usrp/pretty_pictures/in_rot/6/setvT/r0'+str(i)+'.png'
else:
name = '/home/annaj/cbp_usrp/pretty_pictures/in_rot/6/setvT/r'+str(i)+'.png'
plt.savefig(name)
plt.close()
except (RuntimeWarning,FutureWarning,RuntimeError,AttributeError,DeprecationWarning,ZeroDivisionError):
print('I am sorry, but something went wrong :(')
'''
--log--
out_rot:
[R,-0.1,0.1,0.,0.,0.] R = 1 + 0.05*i
[R,-0.1,0.1,0.,0.,np.pi/4] R = 1 + 0.05*i
[R,-0.1,0.1,0.,0.,np.pi/2] R = 1 + 0.05*i
[R,-0.1,0.2,0.,0.,0.]
[R,-0.1,0.3,0.,0.,0.]
[R,-0.2,0.3,0.,0.,0.]
[1.2,vR,0.2,0.,0.,0.] vR = -1.+ 0.03*i
[1.2,vR,0.1,0.,0.,0.] vR = -1.+ 0.03*i
[1.35,vR,0.1,0.,0.,np.pi/4] vR = -.2 + 0.02*i
[1.2,0.1,0.5,0.,vz,0.001] vz = 0.03*i
[1.2,0.1,0.1,0.,vz,0.001] vz = 0.03*i
[1.2,0.1,0.5,0.,0.,phi] phi = np.pi/20*i
[1.2,0.1,0.1,0.,0.,phi] phi = np.pi/200*i
in_rot:
[R,0.,0.009,0.,0.,0.001] R = 0.1*i
[R,0.1,0.009,0.,0.,0.0] R = 0.05*i
[R,.1,.009,0.,0.,np.pi/4] R = 0.05*i
[R,0.01,0.01,0.,0.,0.] R = 0.05*i
[R,0.01,0.01,0.,0.,np.pi/2] R = 0.05*i
[R,0.01,0.01,0.,0.,np.pi/4] R = 0.05*i
[0.7,vR,0.1,0.,0.,0.] vR = -0.8+0.1*i
[0.5,vR,0.1,0.,0.,0.] vR = -0.2+0.05*i
[0.5,vR,0.1,0.,0.,np.pi/4] vR = -0.2+0.05*i
[0.2,0.2,vT,0.,0.,0.] vT = -0.7+0.05*i
[0.3,0.2,vT,0.,0.,0.] vT = -0.7+0.05*i
[0.4,0.2,vT,0.,0.,0.] vT = -0.7+0.05*i
[0.5,0.2,vT,0.,0.,0.] vT = -0.7+0.05*i
[0.6,0.2,vT,0.,0.,0.] vT = -0.7+0.05*i
[0.7,0.2,vT,0.,0.,0.] vT = -0.7+0.05*i
[0.8,0.2,vT,0.,0.,0.] vT = -0.7+0.05*i
[0.7,0.2,0.6,0.,vz,0.001] vz = 0.03*i
[0.5,0.1,0.6,0.,vz,0.] vz = 0.03*i
[0.5,0.1,0.6,0.,vz,np.pi/4] vz = 0.03*i
[0.7,0.2,0.6,0.,0.,phi] phi = np.pi/20*i
[0.5,0.2,0.4,0.,0.,phi] phi = np.pi/20*i
[0.5,0.3,0.3,0.,0.,phi] phi = np.pi/20*i,
'''
|
mit
|
nguyentu1602/statsmodels
|
statsmodels/sandbox/distributions/otherdist.py
|
33
|
10145
|
'''Parametric Mixture Distributions
Created on Sat Jun 04 2011
Author: Josef Perktold
Notes:
Compound Poisson has mass point at zero
http://en.wikipedia.org/wiki/Compound_Poisson_distribution
and would need special treatment
need a distribution that has discrete mass points and contiuous range, e.g.
compound Poisson, Tweedie (for some parameter range),
pdf of Tobit model (?) - truncation with clipping
Question: Metaclasses and class factories for generating new distributions from
existing distributions by transformation, mixing, compounding
'''
from __future__ import print_function
import numpy as np
from scipy import stats
class ParametricMixtureD(object):
'''mixtures with a discrete distribution
The mixing distribution is a discrete distribution like scipy.stats.poisson.
All distribution in the mixture of the same type and parameterized
by the outcome of the mixing distribution and have to be a continuous
distribution (or have a pdf method).
As an example, a mixture of normal distributed random variables with
Poisson as the mixing distribution.
assumes vectorized shape, loc and scale as in scipy.stats.distributions
assume mixing_dist is frozen
initialization looks fragile for all possible cases of lower and upper
bounds of the distributions.
'''
def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,
cutoff=1e-3):
'''create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parameterized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support.
'''
self.mixing_dist = mixing_dist
self.base_dist = base_dist
#self.bd_args = bd_args
if not np.isneginf(mixing_dist.dist.a):
lower = mixing_dist.dist.a
else:
lower = mixing_dist.ppf(1e-4)
if not np.isposinf(mixing_dist.dist.b):
upper = mixing_dist.dist.b
else:
upper = mixing_dist.isf(1e-4)
self.ma = lower
self.mb = upper
mixing_support = np.arange(lower, upper+1)
self.mixing_probs = mixing_dist.pmf(mixing_support)
self.bd_args = bd_args_func(mixing_support)
self.bd_kwds = bd_kwds_func(mixing_support)
def rvs(self, size=1):
mrvs = self.mixing_dist.rvs(size)
#TODO: check strange cases ? this assumes continous integers
mrvs_idx = (np.clip(mrvs, self.ma, self.mb) - self.ma).astype(int)
bd_args = tuple(md[mrvs_idx] for md in self.bd_args)
bd_kwds = dict((k, self.bd_kwds[k][mrvs_idx]) for k in self.bd_kwds)
kwds = {'size':size}
kwds.update(bd_kwds)
rvs = self.base_dist.rvs(*self.bd_args, **kwds)
return rvs, mrvs_idx
def pdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.pdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
def cdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.cdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
#try:
class ClippedContinuous(object):
'''clipped continuous distribution with a masspoint at clip_lower
Notes
-----
first version, to try out possible designs
insufficient checks for valid arguments and not clear
whether it works for distributions that have compact support
clip_lower is fixed and independent of the distribution parameters.
The clip_lower point in the pdf has to be interpreted as a mass point,
i.e. different treatment in integration and expect function, which means
none of the generic methods for this can be used.
maybe this will be better designed as a mixture between a degenerate or
discrete and a continuous distribution
Warning: uses equality to check for clip_lower values in function
arguments, since these are floating points, the comparison might fail
if clip_lower values are not exactly equal.
We could add a check whether the values are in a small neighborhood, but
it would be expensive (need to search and check all values).
'''
def __init__(self, base_dist, clip_lower):
self.base_dist = base_dist
self.clip_lower = clip_lower
def _get_clip_lower(self, kwds):
'''helper method to get clip_lower from kwds or attribute
'''
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
clip_lower = kwds.pop('clip_lower')
return clip_lower, kwds
def rvs(self, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
rvs_ = self.base_dist.rvs(*args, **kwds)
#same as numpy.clip ?
rvs_[rvs_ < clip_lower] = clip_lower
return rvs_
def pdf(self, x, *args, **kwds):
x = np.atleast_1d(x)
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
pdf_raw = np.atleast_1d(self.base_dist.pdf(x, *args, **kwds))
clip_mask = (x == self.clip_lower)
if np.any(clip_mask):
clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
pdf_raw[x < clip_lower] = 0
return pdf_raw
def cdf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
cdf_raw = self.base_dist.cdf(x, *args, **kwds)
#not needed if equality test is used
## clip_mask = (x == self.clip_lower)
## if np.any(clip_mask):
## clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
## pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
#if self.a is defined
cdf_raw[x < clip_lower] = 0
return cdf_raw
def sf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
sf_raw = self.base_dist.sf(x, *args, **kwds)
sf_raw[x <= clip_lower] = 1
return sf_raw
def ppf(self, x, *args, **kwds):
raise NotImplementedError
def plot(self, x, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
mass = self.pdf(clip_lower, *args, **kwds)
xr = np.concatenate(([clip_lower+1e-6], x[x>clip_lower]))
import matplotlib.pyplot as plt
#x = np.linspace(-4, 4, 21)
#plt.figure()
plt.xlim(clip_lower-0.1, x.max())
#remove duplicate calculation
xpdf = self.pdf(x, *args, **kwds)
plt.ylim(0, max(mass, xpdf.max())*1.1)
plt.plot(xr, self.pdf(xr, *args, **kwds))
#plt.vline(clip_lower, self.pdf(clip_lower, *args, **kwds))
plt.stem([clip_lower], [mass],
linefmt='b-', markerfmt='bo', basefmt='r-')
return
if __name__ == '__main__':
doplots = 1
#*********** Poisson-Normal Mixture
mdist = stats.poisson(2.)
bdist = stats.norm
bd_args_fn = lambda x: ()
#bd_kwds_fn = lambda x: {'loc': np.atleast_2d(10./(1+x))}
bd_kwds_fn = lambda x: {'loc': x, 'scale': 0.1*np.ones_like(x)} #10./(1+x)}
pd = ParametricMixtureD(mdist, bdist, bd_args_fn, bd_kwds_fn)
print(pd.pdf(1))
p, bp = pd.pdf(np.linspace(0,20,21))
pc, bpc = pd.cdf(np.linspace(0,20,21))
print(pd.rvs())
rvs, m = pd.rvs(size=1000)
if doplots:
import matplotlib.pyplot as plt
plt.hist(rvs, bins = 100)
plt.title('poisson mixture of normal distributions')
#********** clipped normal distribution (Tobit)
bdist = stats.norm
clip_lower_ = 0. #-0.5
cnorm = ClippedContinuous(bdist, clip_lower_)
x = np.linspace(1e-8, 4, 11)
print(cnorm.pdf(x))
print(cnorm.cdf(x))
if doplots:
#plt.figure()
#cnorm.plot(x)
plt.figure()
cnorm.plot(x = np.linspace(-1, 4, 51), loc=0.5, scale=np.sqrt(2))
plt.title('clipped normal distribution')
fig = plt.figure()
for i, loc in enumerate([0., 0.5, 1.,2.]):
fig.add_subplot(2,2,i+1)
cnorm.plot(x = np.linspace(-1, 4, 51), loc=loc, scale=np.sqrt(2))
plt.title('clipped normal, loc = %3.2f' % loc)
loc = 1.5
rvs = cnorm.rvs(loc=loc, size=2000)
plt.figure()
plt.hist(rvs, bins=50)
plt.title('clipped normal rvs, loc = %3.2f' % loc)
#plt.show()
|
bsd-3-clause
|
zuku1985/scikit-learn
|
benchmarks/bench_sample_without_replacement.py
|
397
|
8008
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/indexes/categorical/test_reindex.py
|
2
|
2400
|
import numpy as np
from pandas import Categorical, CategoricalIndex, Index
import pandas._testing as tm
class TestReindex:
def test_reindex_dtype(self):
c = CategoricalIndex(["a", "b", "c", "a"])
res, indexer = c.reindex(["a", "c"])
tm.assert_index_equal(res, Index(["a", "a", "c"]), exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(["a", "b", "c", "a"])
res, indexer = c.reindex(Categorical(["a", "c"]))
exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
res, indexer = c.reindex(["a", "c"])
exp = Index(["a", "a", "c"], dtype="object")
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
res, indexer = c.reindex(Categorical(["a", "c"]))
exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))
def test_reindex_duplicate_target(self):
# See GH25459
cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"])
res, indexer = cat.reindex(["a", "c", "c"])
exp = Index(["a", "c", "c"], dtype="object")
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
res, indexer = cat.reindex(
CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
)
exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))
def test_reindex_empty_index(self):
# See GH16770
c = CategoricalIndex([])
res, indexer = c.reindex(["a", "b"])
tm.assert_index_equal(res, Index(["a", "b"]), exact=True)
tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))
|
bsd-3-clause
|
rajikaimal/emma
|
src/predict.py
|
1
|
3069
|
import pandas as pd
import io
import os
import numpy as np
import matplotlib.pyplot as plt
import csv
from sklearn import svm
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.feature_extraction import DictVectorizer
class Predict:
# path to current directory
PATH = os.getcwd()
# train ML model with repository specific data
def train(self, test_vector):
"""
Trains the classfier with extracted data from the repository commits
using the extractor
"""
with open(self.PATH + '/src/data/train_emma.csv', 'rt') as f:
reader = csv.reader(f)
train_data = dict()
train_data_labels = list()
train_data_list = []
train_data_labels_list = []
next(reader, None)
for row in reader:
for idx in range(len(row)):
if idx == 0:
train_data['file'] = row[idx]
if idx == 1:
train_data['line'] = int(row[idx])
if idx == 2:
train_data['timestamp'] = row[idx]
if idx == 3:
train_data_labels.append(row[idx])
if idx == 4:
train_data_labels.append(row[idx])
train_data_list.append(train_data)
train_data_labels_list.append(train_data_labels)
train_data = dict()
train_data_labels = list()
C = 0.8
dict_vectorizer = DictVectorizer(sparse=False)
train_data_trasformed = dict_vectorizer.fit_transform(train_data_list)
test_vector_transformed = dict_vectorizer.transform(test_vector)
# print(dict_vectorizer.get_feature_names())
# print(dict_vectorizer.inverse_transform(train_data_trasformed))
# print('Inverse transformation !!!')
# print(test_vector)
# inv_trans = dict_vectorizer.inverse_transform(test_vector_transformed)
# fit LinearSVC
# multi label binarizer to convert iterable of iterables into processing format
mlb = MultiLabelBinarizer()
y_enc = mlb.fit_transform(train_data_labels_list)
train_vector = OneVsRestClassifier(svm.SVC(probability=True))
classifier_rbf = train_vector.fit(train_data_trasformed, y_enc)
# test_vecc = cnt_vectorizer.fit_transform(X[:, 0])
# # todo use pickle to persist
# test_vector_reshaped = np.array(test_vector.ravel()).reshape((1, -1))
prediction = classifier_rbf.predict(test_vector_transformed)
print("Predicted usernames: \n")
# print(prediction)
# print(mlb.inverse_transform(prediction))
users = self.parse_prediction(mlb.inverse_transform(prediction))
print(users)
return users
def parse_prediction(self, predictions):
"""
Transform list of tuples to list of predicted users
"""
users = list()
print(predictions)
for prediction in predictions:
for email in prediction:
users.append(email)
return users
def visualize(self, X, y):
# visualize commit with ghusernames
plt.scatter(X[:, :1], X[:, 1:], c=y[:, :1], cmap=plt.cm.coolwarm)
plt.xlabel('Commits')
plt.ylabel('Reviewer')
plt.title('GitHub PR reviewer selection')
plt.show()
# pr = Predict()
# pr.train([{'file': 'package', 'line': 31, 'timestamp': '2017-01-16T23:57:20-0600'}])
|
mit
|
phoexer/Kelly
|
scripts/Maths/plotting_functions.py
|
1
|
1139
|
a# -*- coding: utf-8 -*-
"""
Created on Sat Aug 19 21:40:35 2017
@author: michael
Simple script that draws the graph of a function
I was figuring this out as I went along, just to see if I can do it.
"""
import matplotlib.pyplot as plt
import numpy as np
import math
#Constants
a = 1
b = 8
c = 5#10
d = 6
#plt.plot([2,4,6,8],[1,2,3,4],'ro')
#plt.ylabel('Some Numbers')
def sinFunc(x):
"""f(x) = sin(x)"""
return math.sin(x)
def quadraticFunc(x):
"""y = ax^2 + bx + c,"""
return a*x**2 + b*x + c
def powerFunc(x):
"""y = ax^c"""
return a*x**c
def polinomialFunc(x):
"""y = x^5 -8x^3 +10x + 6"""
return a*x**5 - b*x**3 + c*x + d
def someFunc(x):
"""y = 1/x"""
return 1/x
x = np.arange(-3., 3., .2)
sy = [someFunc(y) for y in x]
qy = [quadraticFunc(y) for y in x]
py = [powerFunc(y) for y in x]
ply = [polinomialFunc(y) for y in x]
#T = np.matrix(t)
plt.subplot(2,2,1)
plt.plot(x, sy,'r-.')
plt.grid(True)
plt.subplot(2,2,2)
plt.plot(x,qy,'b-.')
plt.grid(True)
plt.subplot(2,2,3)
plt.plot(x,py,'g-.')
plt.grid(True)
plt.subplot(2,2,4)
plt.plot(x,ply,'y-.')
plt.grid(True)
plt.show()
|
mit
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/testing/jpl_units/Duration.py
|
6
|
6600
|
#===========================================================================
#
# Duration
#
#===========================================================================
"""Duration module."""
#===========================================================================
# Place all imports after here.
#
from __future__ import print_function
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class Duration:
"""Class Duration in development.
"""
allowed = [ "ET", "UTC" ]
#-----------------------------------------------------------------------
def __init__( self, frame, seconds ):
"""Create a new Duration object.
= ERROR CONDITIONS
- If the input frame is not in the allowed list, an error is thrown.
= INPUT VARIABLES
- frame The frame of the duration. Must be 'ET' or 'UTC'
- seconds The number of seconds in the Duration.
"""
if frame not in self.allowed:
msg = "Input frame '%s' is not one of the supported frames of %s" \
% ( frame, str( self.allowed ) )
raise ValueError( msg )
self._frame = frame
self._seconds = seconds
#-----------------------------------------------------------------------
def frame( self ):
"""Return the frame the duration is in."""
return self._frame
#-----------------------------------------------------------------------
def __abs__( self ):
"""Return the absolute value of the duration."""
return Duration( self._frame, abs( self._seconds ) )
#-----------------------------------------------------------------------
def __neg__( self ):
"""Return the negative value of this Duration."""
return Duration( self._frame, -self._seconds )
#-----------------------------------------------------------------------
def seconds( self ):
"""Return the number of seconds in the Duration."""
return self._seconds
#-----------------------------------------------------------------------
def __nonzero__( self ):
"""Compare two Durations.
= INPUT VARIABLES
- rhs The Duration to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
return self._seconds != 0
#-----------------------------------------------------------------------
def __cmp__( self, rhs ):
"""Compare two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
self.checkSameFrame( rhs, "compare" )
return cmp( self._seconds, rhs._seconds )
#-----------------------------------------------------------------------
def __add__( self, rhs ):
"""Add two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to add.
= RETURN VALUE
- Returns the sum of ourselves and the input Duration.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
if isinstance( rhs, U.Epoch ):
return rhs + self
self.checkSameFrame( rhs, "add" )
return Duration( self._frame, self._seconds + rhs._seconds )
#-----------------------------------------------------------------------
def __sub__( self, rhs ):
"""Subtract two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to subtract.
= RETURN VALUE
- Returns the difference of ourselves and the input Duration.
"""
self.checkSameFrame( rhs, "sub" )
return Duration( self._frame, self._seconds - rhs._seconds )
#-----------------------------------------------------------------------
def __mul__( self, rhs ):
"""Scale a UnitDbl by a value.
= INPUT VARIABLES
- rhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds * float( rhs ) )
#-----------------------------------------------------------------------
def __rmul__( self, lhs ):
"""Scale a Duration by a value.
= INPUT VARIABLES
- lhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds * float( lhs ) )
#-----------------------------------------------------------------------
def __div__( self, rhs ):
"""Divide a Duration by a value.
= INPUT VARIABLES
- rhs The scalar to divide by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds / float( rhs ) )
#-----------------------------------------------------------------------
def __rdiv__( self, rhs ):
"""Divide a Duration by a value.
= INPUT VARIABLES
- rhs The scalar to divide by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, float( rhs ) / self._seconds )
#-----------------------------------------------------------------------
def __str__( self ):
"""Print the Duration."""
return "%g %s" % ( self._seconds, self._frame )
#-----------------------------------------------------------------------
def __repr__( self ):
"""Print the Duration."""
return "Duration( '%s', %g )" % ( self._frame, self._seconds )
#-----------------------------------------------------------------------
def checkSameFrame( self, rhs, func ):
"""Check to see if frames are the same.
= ERROR CONDITIONS
- If the frame of the rhs Duration is not the same as our frame,
an error is thrown.
= INPUT VARIABLES
- rhs The Duration to check for the same frame
- func The name of the function doing the check.
"""
if self._frame != rhs._frame:
msg = "Cannot %s Duration's with different frames.\n" \
"LHS: %s\n" \
"RHS: %s" % ( func, self._frame, rhs._frame )
raise ValueError( msg )
#===========================================================================
|
mit
|
diegocavalca/Studies
|
phd-thesis/neuralnilm/neuralnilm/data/syntheticaggregatesource.py
|
4
|
3659
|
from __future__ import print_function, division
import numpy as np
import pandas as pd
from neuralnilm.data.source import Sequence
from neuralnilm.data.activationssource import ActivationsSource
import logging
logger = logging.getLogger(__name__)
class SyntheticAggregateSource(ActivationsSource):
def __init__(self, activations, target_appliance, seq_length,
sample_period,
distractor_inclusion_prob=0.25,
target_inclusion_prob=0.5,
uniform_prob_of_selecting_each_building=True,
allow_incomplete_target=True,
allow_incomplete_distractors=True,
include_incomplete_target_in_output=True,
rng_seed=None):
self.activations = activations
self.target_appliance = target_appliance
self.seq_length = seq_length
self.sample_period = sample_period
self.distractor_inclusion_prob = distractor_inclusion_prob
self.target_inclusion_prob = target_inclusion_prob
self.uniform_prob_of_selecting_each_building = (
uniform_prob_of_selecting_each_building)
self.allow_incomplete_target = allow_incomplete_target
self.allow_incomplete_distractors = allow_incomplete_distractors
self.include_incomplete_target_in_output = (
include_incomplete_target_in_output)
super(SyntheticAggregateSource, self).__init__(rng_seed=rng_seed)
def _get_sequence(self, fold='train', enable_all_appliances=False):
seq = Sequence(self.seq_length)
all_appliances = {}
# Target appliance
if self.rng.binomial(n=1, p=self.target_inclusion_prob):
building_name = self._select_building(fold, self.target_appliance)
activations = (
self.activations[fold][self.target_appliance][building_name])
activation_i = self._select_activation(activations)
activation = activations[activation_i]
positioned_activation, is_complete = self._position_activation(
activation, is_target_appliance=True)
positioned_activation = positioned_activation.values
seq.input += positioned_activation
if enable_all_appliances:
all_appliances[self.target_appliance] = positioned_activation
if is_complete or self.include_incomplete_target_in_output:
seq.target += positioned_activation
# Distractor appliances
distractor_appliances = [
appliance for appliance in self._distractor_appliances(fold)
if self.rng.binomial(n=1, p=self.distractor_inclusion_prob)]
for appliance in distractor_appliances:
building_name = self._select_building(fold, appliance)
activations = self.activations[fold][appliance][building_name]
activation_i = self._select_activation(activations)
activation = activations[activation_i]
positioned_activation, is_complete = self._position_activation(
activation, is_target_appliance=False)
positioned_activation = positioned_activation.values
seq.input += positioned_activation
if enable_all_appliances:
all_appliances[appliance] = positioned_activation
seq.input = seq.input[:, np.newaxis]
seq.target = seq.target[:, np.newaxis]
assert len(seq.input) == self.seq_length
assert len(seq.target) == self.seq_length
if enable_all_appliances:
seq.all_appliances = pd.DataFrame(all_appliances)
return seq
|
cc0-1.0
|
LucaDiStasio/thinPlyMechanics
|
python/reportData.py
|
1
|
257251
|
#!/usr/bin/env Python
# -*- coding: utf-8 -*-
'''
=====================================================================================
Copyright (c) 2016 - 2019 Université de Lorraine & Luleå tekniska universitet
Author: Luca Di Stasio <[email protected]>
<[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
=====================================================================================
DESCRIPTION
Tested with Python 2.7 Anaconda 2.4.1 (64-bit) distribution in Windows 10.
'''
import sys
import os
from os.path import isfile, join, exists
from shutil import copyfile
from os import listdir, stat, makedirs
from datetime import datetime
from time import strftime
from platform import platform
import matplotlib as plt
import numpy as np
import xlsxwriter
import ast
import getopt
#===============================================================================#
# Latex files
#===============================================================================#
def createLatexFile(folder,filename,documentclass,options=''):
if not exists(folder):
makedirs(folder)
with open(join(folder,filename + '.tex'),'w') as tex:
if options!='':
tex.write('\\documentclass[' + options + ']{' + documentclass + '}\n')
else:
tex.write('\\documentclass{' + documentclass + '}\n')
tex.write('\n')
def writeLatexPackages(folder,filename,packages,options):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('% Packages and basic declarations\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('\n')
for i,package in enumerate(packages):
if options[i]!='':
tex.write('\\usepackage[' + options[i] + ']{' + package + '}\n')
else:
tex.write('\\usepackage{' + package + '}\n')
tex.write('\n')
def writeLatexDocumentStarts(folder,filename):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('% DOCUMENT STARTS\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('\n')
tex.write('\\begin{document}\n')
tex.write('\n')
def writeLatexDocumentEnds(folder,filename):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('\n')
tex.write('\\end{document}\n')
tex.write('\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('% DOCUMENT ENDS\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('%----------------------------------------------------------------------------------------------%\n')
tex.write('\n')
def writeLatexTikzPicStarts(folder,filename,options=''):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('\n')
tex.write('%Tikz picture starts%\n')
tex.write('\n')
if options!='':
tex.write('\\begin{tikzpicture}[' + options + ']\n')
else:
tex.write('\\begin{tikzpicture}\n')
def writeLatexTikzPicEnds(folder,filename):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('\n')
tex.write('\\end{tikzpicture}\n')
tex.write('%Tikz picture ends%\n')
tex.write('\n')
def writeLatexTikzAxisStarts(folder,filename,options):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('\n')
tex.write('%Tikz axis starts%\n')
tex.write('\n')
if options!='':
tex.write('\\begin{axis}[' + options + ']\n')
else:
tex.write('\\begin{axis}\n')
def writeLatexTikzAxisEnds(folder,filename):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('\n')
tex.write('\\end{axis}\n')
tex.write('%Tikz axis ends%\n')
tex.write('\n')
def writeLatexAddPlotTable(folder,filename,data,options):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('\n')
tex.write('\\addplot')
if options!='':
tex.write('[' + options + ']\n')
tex.write('table{\n')
for element in data:
tex.write(str(element[0]) + ' ' + str(element[1]) + '\n')
tex.write('};\n')
def writeLatexSinglePlot(folder,filename,data,axoptions,dataoptions):
print('In function: writeLatexSinglePlot(folder,filename,data,axoptions,dataoptions,logfilepath,baselogindent,logindent)')
print('Create latex file')
createLatexFile(folder,filename,'standalone')
print('Write latex packages')
writeLatexPackages(folder,filename,['inputenc','pgfplots','tikz'],['utf8','',''])
print('Document starts')
writeLatexDocumentStarts(folder,filename)
writeLatexTikzPicStarts(folder,filename,'')
writeLatexTikzAxisStarts(folder,filename,axoptions)
writeLatexAddPlotTable(folder,filename,data,dataoptions)
writeLatexTikzAxisEnds(folder,filename)
writeLatexTikzPicEnds(folder,filename)
print('Document ends')
writeLatexDocumentEnds(folder,filename)
if 'Windows' in system():
print('Create Windows command file')
cmdfile = join(folder,filename,'runlatex.cmd')
with open(cmdfile,'w') as cmd:
cmd.write('\n')
cmd.write('CD ' + folder + '\n')
cmd.write('\n')
cmd.write('pdflatex ' + join(folder,filename + '.tex') + ' -job-name=' + filename + '\n')
print('Executing Windows command file...')
try:
subprocess.call('cmd.exe /C ' + cmdfile)
print('... done.')
except Exception:
print('ERROR')
print(str(Exception))
print(str(error))
sys.exc_clear()
elif 'Linux' in system():
print('Create Linux bash file')
bashfile = join(folder,filename,'runlatex.sh')
with open(bashfile,'w') as bsh:
bsh.write('#!/bin/bash\n')
bsh.write('\n')
bsh.write('cd ' + folder + '\n')
bsh.write('\n')
bsh.write('pdflatex ' + join(folder,filename + '.tex') + ' -job-name=' + filename + '\n')
print('Executing Linux bash file...')
try:
print('Change permissions to ' + bashfile)
os.chmod(bashfile, 0o755)
print('Run bash file')
rc = call('.' + bashfile)
print('... done.')
except Exception:
print('ERROR')
print(str(Exception))
print(str(error))
sys.exc_clear()
def writeLatexMultiplePlots(folder,filename,data,axoptions,dataoptions):
print('In function: writeLatexMultiplePlots(folder,filename,data,axoptions,dataoptions,logfilepath,baselogindent,logindent)',True)
print('Create latex file')
createLatexFile(folder,filename,'standalone')
print('Write latex packages')
writeLatexPackages(folder,filename,['inputenc','pgfplots','tikz'],['utf8','',''])
print('Document starts')
writeLatexDocumentStarts(folder,filename)
writeLatexTikzPicStarts(folder,filename,'')
writeLatexTikzAxisStarts(folder,filename,axoptions)
for k,datum in enumerate(data):
writeLatexAddPlotTable(folder,filename,datum,dataoptions[k])
writeLatexTikzAxisEnds(folder,filename)
writeLatexTikzPicEnds(folder,filename)
print('Document ends')
writeLatexDocumentEnds(folder,filename)
if 'Windows' in system():
print('Create Windows command file')
cmdfile = join(folder,'runlatex.cmd')
with open(cmdfile,'w') as cmd:
cmd.write('\n')
cmd.write('CD ' + folder + '\n')
cmd.write('\n')
cmd.write('pdflatex ' + join(folder,filename + '.tex') + ' -job-name=' + filename + '\n')
print('Executing Windows command file...')
try:
subprocess.call('cmd.exe /C ' + cmdfile)
print('... done.')
except Exception,error:
print('ERROR')
print(str(Exception))
print(str(error))
sys.exc_clear()
elif 'Linux' in system():
print('Create Linux bash file')
bashfile = join(folder,filename,'runlatex.sh')
with open(bashfile,'w') as bsh:
bsh.write('#!/bin/bash\n')
bsh.write('\n')
bsh.write('cd ' + folder + '\n')
bsh.write('\n')
bsh.write('pdflatex ' + join(folder,filename + '.tex') + ' -job-name=' + filename + '\n')
print('Executing Linux bash file...')
try:
print('Change permissions to ' + bashfile)
os.chmod(bashfile, 0o755)
print('Run bash file')
rc = call('.' + bashfile)
print('... done.')
except Exception:
print('ERROR')
print(str(Exception))
print(str(error))
sys.exc_clear()
def writeLatexGenericCommand(folder,filename,command,options,arguments):
with open(join(folder,filename + '.tex'),'a') as tex:
if options!='' and arguments!='':
tex.write('\\'+ command +'[' + options + ']{' + arguments + '}\n')
elif options!='':
tex.write('\\'+ command +'{' + arguments + '}\n')
else:
tex.write('\\'+ command + '\n')
tex.write('\n')
def writeLatexCustomLine(folder,filename,line):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write(line + '\n')
def writeLatexSetLength(folder,filename,length,value):
with open(join(folder,filename + '.tex'),'a') as tex:
tex.write('\\setlength' +'{' + '\\' + length + '}' +'{' + value + '}\n')
#===============================================================================#
# Reference data
#===============================================================================#
def provideBEMdata():
G0 = 7.52548E-06
normGs = np.array([[10.,1.39E-01,3.76E-02,1.76E-01],
[20.,1.93E-01,1.47E-01,3.40E-01],
[30.,1.64E-01,3.02E-01,4.66E-01],
[40.,9.80E-02,4.86E-01,5.84E-01],
[50.,3.05E-02,6.16E-01,6.47E-01],
[60.,1.27E-03,6.66E-01,6.67E-01],
[70.,-4.79E-05,6.44E-01,6.44E-01],
[80.,6.85E-05,5.79E-01,5.79E-01],
[90.,1.12E-04,4.70E-01,4.70E-01],
[100.,1.12E-04,3.37E-01,3.37E-01],
[110.,8.95E-04,2.08E-01,2.09E-01],
[120.,6.07E-03,1.05E-01,1.11E-01],
[130.,2.29E-03,3.89E-02,4.12E-02],
[140.,5.52E-04,7.92E-03,8.47E-03],
[150.,3.06E-04,1.65E-04,4.71E-04]])
BEMdata = {}
BEMdata['G0'] = G0
BEMdata['normGs'] = normGs
return BEMdata
def provideMatrixProperties():
props = {}
props['E'] = 3500.0
props['nu'] = 0.4
props['G'] = 0.5*props['E']/(1+props['nu'])
props['k-planestrain'] = 3-4*props['nu']
return props
def provideGFiberProperties():
props = {}
props['E'] = 70000.0
props['nu'] = 0.2
props['G'] = 0.5*props['E']/(1+props['nu'])
props['k-planestrain'] = 3-4*props['nu']
return props
def computePlyTransverseModulus(Vff,Ef,Em):
return (Vff/Ef + (1.0-Vff)/Em)
def main(argv):
# Read the command line, throw error if not option is provided
try:
opts, args = getopt.getopt(argv,'hw:i:o:f:e:l:d:',['help','Help',"workdir", "workdirectory", "wdir","inputfile", "input","out","outdir","outputfile","xlsx","outfile","excel","latex","sql"])
except getopt.GetoptError:
print('reportDataToXlsx.py -w <working directory> -i <input file> -o <output directory> -f <output filename> --excel --latex --sql')
sys.exit(2)
# Parse the options and create corresponding variables
for opt, arg in opts:
if opt in ('-h', '--help','--Help'):
print(' ')
print(' ')
print('*****************************************************************************************************')
print(' ')
print(' GATHER DATA AND CREATE REPORTS\n')
print(' ')
print(' by')
print(' ')
print(' Luca Di Stasio, 2016-2018')
print(' ')
print(' ')
print('*****************************************************************************************************')
print(' ')
print('Program syntax:')
print('reportDataToXlsx.py -w <working directory> -i <input file> -o <output directory> -f <output filename> --excel --latex --sql')
print(' ')
print('Mandatory arguments:')
print('-w <working directory>')
print('-i <input file>')
print('at least one out of: -e --excel/-l --latex/-d --sql')
print(' ')
print('Optional arguments:')
print('-o <output directory>')
print('-f <output filename>')
print(' ')
print('Default values:')
print('-o <output directory> ===> working directory')
print('-f <output filename> ===> Y-m-d_H-M-S_<input file name>.xlsx')
print(' ')
print(' ')
print(' ')
sys.exit()
elif opt in ("-w", "--workdir", "--workdirectory", "--wdir"):
if arg[-1] != '/':
workdir = arg
else:
workdir = arg[:-1]
elif opt in ("-i", "--inputfile", "--input"):
inputfile = arg.split(".")[0] + '.csv'
elif opt in ("-o", "--out","--outdir"):
if arg[-1] != '/':
outdir = arg
else:
outdir = arg[:-1]
elif opt in ("-f", "--outputfile","--xlsx","--outfile"):
outputfileBasename = arg.split(".")[0]
elif opt in ("-e", "--excel"):
toExcel = True
elif opt in ("-l", "--latex"):
toLatex = True
elif opt in ("-d", "--sql"):
toSql = True
# Check the existence of variables: if a required variable is missing, an error is thrown and program is terminated; if an optional variable is missing, it is set to the default value
if 'workdir' not in locals():
print('Error: working directory not provided.')
sys.exit()
if 'inputfile' not in locals():
print('Error: file list not provided.')
sys.exit()
if 'outputfileBasename' not in locals():
outputfileBasename = datetime.now().strftime('%Y-%m-%d') + '_' + datetime.now().strftime('%H-%M-%S') + '_' + inputfile.split(".")[0]
if 'outdir' not in locals():
outdir = workdir
if 'toExcel' not in locals() and 'toLatex' not in locals() and 'toSql' not in locals():
print('Error: no output format specified.')
sys.exit()
if 'toExcel' not in locals():
toExcel = False
if 'toLatex' not in locals():
toLatex = False
if 'toSql' not in locals():
toSql = False
bemData = provideBEMdata()
print('Reading file ' + join(workdir,inputfile) + ' ...')
try:
with open(join(workdir,inputfile),'r') as csv:
lines = csv.readlines()
print(' Number of lines: ' + str(len(lines)))
print('...done.')
except Exception,error:
print('EXCEPTION ENCOUNTERED')
print(str(Exception))
print(str(error))
sys.exit(2)
print('Extracting names of subfolders ...')
try:
subfoldersList = []
for l,line in enumerate(lines[1:]):
currentSubfolder = '/'.join(line.replace('\n','').split(',')[0].replace('\\','/').split('/')[:-1])
if currentSubfolder != workdir:
if (len(subfoldersList)>0 and currentSubfolder != subfoldersList[-1]) or len(subfoldersList)==0:
print(' ' + '-- ' + str(currentSubfolder))
subfoldersList.append(currentSubfolder)
print('...done.')
except Exception,error:
print('EXCEPTION ENCOUNTERED')
print(str(Exception))
print(str(error))
sys.exit(2)
print('Check if .dat files are present ...')
try:
isDatPresent = False
for fileName in listdir(subfoldersList[0]):
if '.dat'in fileName:
isDatPresent = True
print(' .dat files are present,')
print(' analysis of path data needs to be performed')
break
if not isDatPresent:
print(' .dat files are not present.')
print('...done.')
except Exception,error:
print('EXCEPTION ENCOUNTERED')
print(str(Exception))
print(str(error))
sys.exit(2)
if toExcel:
print('Open workbook ' + join(outdir,outputfileBasename + '.xlsx'))
workbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '.xlsx'),{'nan_inf_to_errors': True})
print('Set string and number format')
stringFormat = workbook.add_format({'bold': 1})
numberFormat = workbook.add_format({'num_format': '0.000000'})
bemdataSheetname = 'BEM-Data'
print('Create sheet for BEM data: ' + bemdataSheetname )
worksheet = workbook.add_worksheet(bemdataSheetname)
print('Fill in values of BEM data...')
worksheet.write(0,0,'deltatheta [deg]',stringFormat)
worksheet.write(0,1,'GI/G0 [-]',stringFormat)
worksheet.write(0,2,'GII/G0 [-]',stringFormat)
worksheet.write(0,3,'GTOT/G0 [-]',stringFormat)
for r,row in enumerate(bemData['normGs']):
for c,value in enumerate(row):
worksheet.write(r+1,c,value,numberFormat)
print('...done.')
print('Creating sheets for results ...')
for line in lines[1:]:
csvPath = line.replace('\n','').split(',')[0]
try:
with open(csvPath,'r') as csv:
csvlines = csv.readlines()
except Exception,error:
continue
sys.exc_clear()
sheetName = line.replace('\n','').split(',')[1].replace('deltatheta','')
print(' Create sheet ' + sheetName)
toPlot = bool(line.replace('\n','').split(',')[2])
plotSettings = []
if toPlot:
settingsString = ','.join(line.replace('\n','').split(',')[3:])
plotSettings = ast.literal_eval(settingsString[1:])
worksheet = workbook.add_worksheet(sheetName.decode('utf-8'))
for e,element in enumerate(csvlines[0].replace('\n','').split(',')):
worksheet.write(0,e,element,stringFormat)
for c,csvline in enumerate(csvlines[1:]):
for e,element in enumerate(csvline.replace('\n','').split(',')):
try:
if 'phiCZ' in csvlines[0][e]:
worksheet.write(c+1,e,float(element)*180.0/np.pi,numberFormat)
else:
worksheet.write(c+1,e,float(element),numberFormat)
except Exception,error:
worksheet.write(c+1,e,str(element).decode('utf-8'),numberFormat)
sys.exc_clear()
for p,plot in enumerate(plotSettings):
print(' Create plot ' + plot[-1] + ' in sheet ' + sheetName)
chart = workbook.add_chart({'type': 'scatter',
'subtype': 'smooth_with_markers'})
isGIinplot = False
isGIIinplot = False
isGTOTinplot = False
for curve in plot[:-3]:
if 'GI' in csvlines[0][curve[1]]:
isGIinplot = True
elif 'GII' in csvlines[0][curve[1]]:
isGIIinplot = True
elif 'GTOT' in csvlines[0][curve[1]]:
isGTOTinplot = True
chart.add_series({
'name': curve[2].decode('utf-8'),
'categories': [sheetName,1,curve[0],len(csvlines),curve[0]],
'values': [sheetName,1,curve[1],len(csvlines),curve[1]],
})
if isGIinplot:
chart.add_series({
'name': 'GI-BEM',
'categories': [bemdataSheetname,1,0,15,0],
'values': [bemdataSheetname,1,1,15,1],
})
if isGIIinplot:
chart.add_series({
'name': 'GII-BEM',
'categories': [bemdataSheetname,1,0,15,0],
'values': [bemdataSheetname,1,2,15,2],
})
if isGTOTinplot:
chart.add_series({
'name': 'GTOT-BEM',
'categories': [bemdataSheetname,1,0,15,0],
'values': [bemdataSheetname,1,3,15,3],
})
chart.set_title ({'name': plot[-1].decode('utf-8')})
chart.set_x_axis({'name': plot[-3].decode('utf-8')})
chart.set_y_axis({'name': plot[-2].decode('utf-8')})
worksheet.insert_chart(len(csvlines)+10,10*p, chart)
print('...done.')
workbook.close()
print('Workbook closed.')
if isDatPresent:
print('Analysis of path data ...')
print(' ')
print('----------------->')
print(' Open workbook ' + join(outdir,outputfileBasename + '-radialpathsData' + '.xlsx'))
radialpathsWorkbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '-radialpathsData' + '.xlsx'),{'nan_inf_to_errors': True})
print(' Set string and number format')
radialpathsstringFormat = radialpathsWorkbook.add_format({'bold': 1})
radialpathsnumberFormat = radialpathsWorkbook.add_format({'num_format': '0.000000'})
radialpathsnumberFormatReduced = radialpathsWorkbook.add_format({'num_format': '0.00'})
print(' ')
print(' Open workbook ' + join(outdir,outputfileBasename + '-circumferentialpathsData' + '.xlsx'))
circumferentialpathsWorkbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '-circumferentialpathsData' + '.xlsx'),{'nan_inf_to_errors': True})
print(' Set string and number format')
circumferentialpathsstringFormat = circumferentialpathsWorkbook.add_format({'bold': 1})
circumferentialpathsnumberFormat = circumferentialpathsWorkbook.add_format({'num_format': '0.000000'})
circumferentialpathsnumberFormatReduced = radialpathsWorkbook.add_format({'num_format': '0.00'})
print(' ')
print(' Open workbook ' + join(outdir,outputfileBasename + '-horizontalpathsData' + '.xlsx'))
horizontalpathsWorkbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '-horizontalpathsData' + '.xlsx'),{'nan_inf_to_errors': True})
print(' Set string and number format')
horizontalpathsstringFormat = horizontalpathsWorkbook.add_format({'bold': 1})
horizontalpathsnumberFormat = horizontalpathsWorkbook.add_format({'num_format': '0.000000'})
horizontalpathsnumberFormatReduced = radialpathsWorkbook.add_format({'num_format': '0.00'})
print(' ')
print(' Open workbook ' + join(outdir,outputfileBasename + '-verticalpathsData' + '.xlsx'))
verticalpathsWorkbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '-verticalpathsData' + '.xlsx'),{'nan_inf_to_errors': True})
print(' Set string and number format')
verticalpathsstringFormat = verticalpathsWorkbook.add_format({'bold': 1})
verticalpathsnumberFormat = verticalpathsWorkbook.add_format({'num_format': '0.000000'})
verticalpathsnumberFormatReduced = radialpathsWorkbook.add_format({'num_format': '0.00'})
print(' Open workbook ' + join(outdir,outputfileBasename + '-radialpathsStrainData' + '.xlsx'))
radialpathsStrainWorkbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '-radialpathsStrainData' + '.xlsx'),{'nan_inf_to_errors': True})
print(' Set string and number format')
radialpathsStrainstringFormat = radialpathsStrainWorkbook.add_format({'bold': 1})
radialpathsStrainnumberFormat = radialpathsStrainWorkbook.add_format({'num_format': '0.000000'})
radialpathsStrainnumberFormatReduced = radialpathsStrainWorkbook.add_format({'num_format': '0.00'})
print(' ')
print(' Open workbook ' + join(outdir,outputfileBasename + '-circumferentialpathsStrainData' + '.xlsx'))
circumferentialpathsStrainWorkbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '-circumferentialpathsStrainData' + '.xlsx'),{'nan_inf_to_errors': True})
print(' Set string and number format')
circumferentialpathsStrainstringFormat = circumferentialpathsStrainWorkbook.add_format({'bold': 1})
circumferentialpathsStrainnumberFormat = circumferentialpathsStrainWorkbook.add_format({'num_format': '0.000000'})
circumferentialpathsStrainnumberFormatReduced = radialpathsStrainWorkbook.add_format({'num_format': '0.00'})
print(' ')
print(' Open workbook ' + join(outdir,outputfileBasename + '-horizontalpathsStrainData' + '.xlsx'))
horizontalpathsStrainWorkbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '-horizontalpathsStrainData' + '.xlsx'),{'nan_inf_to_errors': True})
print(' Set string and number format')
horizontalpathsStrainstringFormat = horizontalpathsStrainWorkbook.add_format({'bold': 1})
horizontalpathsStrainnumberFormat = horizontalpathsStrainWorkbook.add_format({'num_format': '0.000000'})
horizontalpathsStrainnumberFormatReduced = radialpathsStrainWorkbook.add_format({'num_format': '0.00'})
print(' ')
print(' Open workbook ' + join(outdir,outputfileBasename + '-verticalpathsStrainData' + '.xlsx'))
verticalpathsStrainWorkbook = xlsxwriter.Workbook(join(outdir,outputfileBasename + '-verticalpathsStrainData' + '.xlsx'),{'nan_inf_to_errors': True})
print(' Set string and number format')
verticalpathsStrainstringFormat = verticalpathsStrainWorkbook.add_format({'bold': 1})
verticalpathsStrainnumberFormat = verticalpathsStrainWorkbook.add_format({'num_format': '0.000000'})
verticalpathsStrainnumberFormatReduced = radialpathsStrainWorkbook.add_format({'num_format': '0.00'})
print('<-----------------')
print(' ')
radialpathsSheetnames = []
radialpathsStrainSheetnames = []
numberOfRadialpaths = []
numberOfRadialpathsStrain = []
radialpathsDatalengths = []
radialpathsStrainDatalengths = []
circumferentialpathsSheetnames = []
circumferentialpathsStrainSheetnames = []
numberOfCircumferentialpaths = []
circumferentialpathsDatalengths = []
circumferentialpathsStrainDatalengths = []
horizontalpathsSheetnames = []
horizontalpathsStrainSheetnames = []
numberOfHorizontalpaths = []
horizontalpathsDatalengths = []
horizontalpathsStrainDatalengths = []
verticalpathsSheetnames = []
verticalpathsStrainSheetnames = []
numberOfVerticalpaths = []
verticalpathsDatalengths = []
verticalpathsStrainDatalengths = []
for subFolder in subfoldersList:
radialpathsSummary = join(subFolder,subFolder.split('/')[-1] + '-stressesradialpaths' + '.csv')
circumferentialpathsSummary = join(subFolder,subFolder.split('/')[-1] + '-stressescircumferentialpaths' + '.csv')
horizontalpathsSummary = join(subFolder,subFolder.split('/')[-1] + '-stresseshorizontalpaths' + '.csv')
verticalpathsSummary = join(subFolder,subFolder.split('/')[-1] + '-stressesverticalpaths' + '.csv')
radialpathsStrainSummary = join(subFolder,subFolder.split('/')[-1] + '-strainsradialpaths' + '.csv')
circumferentialpathsStrainSummary = join(subFolder,subFolder.split('/')[-1] + '-strainscircumferentialpaths' + '.csv')
horizontalpathsStrainSummary = join(subFolder,subFolder.split('/')[-1] + '-strainshorizontalpaths' + '.csv')
verticalpathsStrainSummary = join(subFolder,subFolder.split('/')[-1] + '-strainsverticalpaths' + '.csv')
if subFolder.split('/')[-1] + '-stressesradialpaths' + '.csv' in listdir(subFolder):
print('----------------->')
print('----------------->')
print(' Analysis of radial paths for folder ' + subFolder)
print(' ')
with open(radialpathsSummary,'r') as csv:
lines = csv.readlines()
Sxx = []
Syy = []
Szz = []
Sxy = []
Szx = []
Syz = []
Srr = []
Stt = []
Srt = []
S1D3 = []
S2D3 = []
S3D3 = []
I1D3 = []
I2D3 = []
I3D3 = []
SMisesD3 = []
SaverD3 = []
S1D2 = []
S2D2 = []
I1D2 = []
I2D2 = []
SMisesD2 = []
SaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
for line in lines[1:]:
stressComp = line.replace('\n','').replace(' ','').split(',')[0]
pathVariable = float(line.replace('\n','').replace(' ','').split(',')[1])
pathStartVariable = float(line.replace('\n','').replace(' ','').split(',')[2])
pathEndVariable = float(line.replace('\n','').replace(' ','').split(',')[3])
datfilePath = join(subFolder,line.replace('\n','').replace(' ','').split(',')[-1])
print(' Reading component ' + stressComp)
print(' ' + 'for radial path at ' + str(pathVariable) + ' deg')
print(' ' + 'starting at ' + str(pathStartVariable) + ' mum')
print(' ' + 'ending at ' + str(pathEndVariable))
print(' ')
with open(datfilePath,'r') as dat:
datLines = dat.readlines()
print(' --> File read')
print(' ')
currentxyData = []
for datLine in datLines:
if len(datLine.replace('\n','').replace(' ',''))>0 and 'X' not in datLine:
lineParts = datLine.replace('\n','').split(' ')
rowVec = []
for linePart in lineParts:
if linePart!='':
rowVec.append(float(linePart))
currentxyData.append(rowVec)
print(' --> Lines parsed')
print(' ')
normxData = []
xData = []
yData = []
for xyPair in currentxyData:
normxData.append(xyPair[0])
xData.append(pathStartVariable+(pathEndVariable-pathStartVariable)*xyPair[0])
yData.append(xyPair[1])
print(' --> Data categorized in independent and dependent variables.')
print(' ')
if 'S11' in stressComp:
Sxx.append(yData)
pathCoords.append(xData)
pathNormCoords.append(normxData)
pathVariables.append(pathVariable)
pathStartVariables.append(pathStartVariable)
pathEndVariables.append(pathEndVariable)
print(' --> Stress component is S11.')
print(' ')
elif 'S22' in stressComp:
Syy.append(yData)
print(' --> Stress component is S22.')
print(' ')
elif 'S23' in stressComp:
Syz.append(yData)
print(' --> Stress component is S23.')
print(' ')
elif 'S12' in stressComp:
Sxy.append(yData)
print(' --> Stress component is S12.')
print(' ')
elif 'S13' in stressComp:
Szx.append(yData)
print(' --> Stress component is S13.')
print(' ')
elif 'S33' in stressComp:
Szz.append(yData)
Szx.append(0.0)
Syz.append(0.0)
print(' --> Stress component is S33.')
print(' ')
currentSxx = Sxx[-1]
currentSyy = Syy[-1]
currentSzz = yData
currentSxy = Sxy[-1]
currentSzx = 0.0
currentSyz = 0.0
currentSrr = []
currentStt = []
currentSrt = []
current3DS1 = []
current3DS2 = []
current3DS3 = []
current3DI1 = []
current3DI2 = []
current3DI3 = []
current3DSMises = []
current3DSaver = []
current2DS1 = []
current2DS2 = []
current2DI1 = []
current2DI2 = []
current2DSMises = []
current2DSaver = []
rotateBy = pathVariable*np.pi/180.0
cosRot = np.cos(rotateBy)
sinRot = np.sin(rotateBy)
nstressPoints = np.min([len(currentSxx),len(currentSyy),len(currentSzz),len(currentSxy)])
for s in range(0,nstressPoints):
sxx = currentSxx[s]
syy = currentSyy[s]
szz = currentSzz[s]
sxy = currentSxy[s]
szx = 0.0
syz = 0.0
srr = sxx*cosRot*cosRot+syy*sinRot*sinRot+2*sxy*cosRot*sinRot
stt = sxx*sinRot*sinRot+syy*cosRot*cosRot-2*sxy*cosRot*sinRot
srt = -sxx*cosRot*sinRot+syy*cosRot*sinRot+sxy*(cosRot*cosRot-sinRot*sinRot)
currentSrr.append(srr)
currentStt.append(stt)
currentSrt.append(srt)
i1d2 = sxx + syy
i1d3 = sxx + syy + szz
i2d2 = sxx*syy - sxy*sxy
i2d3 = sxx*syy + syy*szz + sxx*szz - sxy*sxy - syz*syz - szx*szx
i3d3 = sxx*syy*szz - sxx*syz*syz - syy*szx*szx - szz*sxy*sxy + 2*sxy*syz*szx
saverd2 = i1d2/2.0
saverd3 = i1d3/3.0
smises2d = np.sqrt(sxx*sxx + syy*syy - sxx*syy + 3*sxy*sxy)
smises3d = np.sqrt(sxx*sxx + syy*syy + szz*szz - sxx*syy - syy*szz - sxx*szz + 3*(sxy*sxy + syz*syz + szx*szx))
s1d2 = 0.5*(sxx+syy)+np.sqrt((0.5*(sxx-syy))*(0.5*(sxx-syy))+sxy*sxy)
s2d2 = 0.5*(sxx+syy)-np.sqrt((0.5*(sxx-syy))*(0.5*(sxx-syy))+sxy*sxy)
try:
princOrient = np.arccos((2*i1d3*i1d3*i1d3-9*i1d3*i2d3+27*i3d3)/(2*np.sqrt((i1d3*i1d3-3*i2d3)*(i1d3*i1d3-3*i2d3)*(i1d3*i1d3-3*i2d3))))/3.0
s1d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient)/3.0
s2d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient-2*np.pi/3.0)/3.0
s3d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient-4*np.pi/3.0)/3.0
except Exception:
s1d3 = s1d2
s2d3 = s2d2
s3d3 = 0.0
current3DS1.append(s1d3)
current3DS2.append(s2d3)
current3DS3.append(s2d3)
current3DI1.append(i1d3)
current3DI2.append(i2d3)
current3DI3.append(i3d3)
current3DSMises.append(smises3d)
current3DSaver.append(saverd3)
current2DS1.append(s1d2)
current2DS2.append(s2d2)
current2DI1.append(i1d2)
current2DI2.append(i2d2)
current2DSMises.append(smises2d)
current2DSaver.append(saverd2)
Srr.append(currentSrr)
Stt.append(currentStt)
Srt.append(currentSrt)
S1D3.append(current3DS1)
S2D3.append(current3DS2)
S3D3.append(current3DS3)
S1D2.append(current2DS1)
S2D2.append(current2DS2)
I1D3.append(current3DI1)
I2D3.append(current3DI2)
I3D3.append(current3DI3)
I1D2.append(current2DI1)
I2D2.append(current2DI2)
SMisesD3.append(current3DSMises)
SaverD3.append(current3DSaver)
SMisesD2.append(current2DSMises)
SaverD2.append(current2DSaver)
currentSrr = []
currentStt = []
currentSrt = []
current3DS1 = []
current3DS2 = []
current3DS3 = []
current3DI1 = []
current3DI2 = []
current3DI3 = []
current3DSMises = []
current3DSaver = []
current2DS1 = []
current2DS2 = []
current2DI1 = []
current2DI2 = []
current2DSMises = []
current2DSaver = []
pathVariableName = 'pathAngle [deg]'
pathStartVariableName = 'Ri [mum]'
pathEndVariableName = 'Rf [mum]'
pathCoordinateName = 'R [mum]'
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
radialpathsSheetnames.append(datasheetName)
numberOfRadialpaths.append(len(pathVariables))
worksheet = radialpathsWorkbook.add_worksheet(datasheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + datasheetName)
print(' ')
for p, pathVariable in enumerate(pathVariables):
print(' pathAngle = ' + str(pathVariable) + ' deg')
worksheet.write(0,p*25,pathVariableName,radialpathsstringFormat)
worksheet.write(1,p*25,pathVariable,radialpathsnumberFormatReduced)
worksheet.write(0,p*25+1,pathStartVariableName,radialpathsstringFormat)
worksheet.write(1,p*25+1,pathStartVariables[p],radialpathsnumberFormat)
worksheet.write(0,p*25+2,pathEndVariableName,radialpathsstringFormat)
worksheet.write(1,p*25+2,pathEndVariables[p],radialpathsnumberFormat)
worksheet.write(2,p*25,pathCoordinateName,radialpathsstringFormat)
worksheet.write(2,p*25+1,'Norm ' + pathCoordinateName,radialpathsstringFormat)
worksheet.write(2,p*25+2,'Sxx [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+3,'Syy [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+4,'Szz [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+5,'Sxy [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+6,'Szx [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+7,'Syz [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+8,'Srr [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+9,'Stt [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+10,'Srt [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+11,'S1_3D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+12,'S2_3D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+13,'S3_3D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+14,'S1_2D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+15,'S2_2D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+16,'Smises_3D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+17,'Smises_2D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+18,'Saverage_3D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+19,'Saverage_2D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+20,'I1_3D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+21,'I2_3D [MPa^2]',radialpathsstringFormat)
worksheet.write(2,p*25+22,'I3_3D [MPa^3]',radialpathsstringFormat)
worksheet.write(2,p*25+23,'I1_2D [MPa]',radialpathsstringFormat)
worksheet.write(2,p*25+24,'I2_2D [MPa^2]',radialpathsstringFormat)
measureNum = np.min([len(Sxx[p]),len(Syy[p]),len(Szz[p]),len(Sxy[p])])
print(' number of path points = ' + str(measureNum))
for c in range(0,measureNum):
coord = pathCoords[p][c]
worksheet.write(3+c,p*25,coord,radialpathsnumberFormat)
worksheet.write(3+c,p*25+1,pathNormCoords[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+2,Sxx[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+3,Syy[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+4,Szz[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+5,Sxy[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+6,0.0,radialpathsnumberFormat)
worksheet.write(3+c,p*25+7,0.0,radialpathsnumberFormat)
worksheet.write(3+c,p*25+8,Srr[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+9,Stt[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+10,Srt[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+11,S1D3[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+12,S2D3[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+13,S3D3[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+14,S1D2[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+15,S2D2[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+16,SMisesD3[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+17,SMisesD2[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+18,SaverD3[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+19,SaverD2[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+20,I1D3[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+21,I2D3[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+22,I3D3[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+23,I1D2[p][c],radialpathsnumberFormat)
worksheet.write(3+c,p*25+24,I2D2[p][c],radialpathsnumberFormat)
graphsheetName = 'Graphs, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
graphworksheet = radialpathsWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['Sxx [MPa]','Syy [MPa]','Szz [MPa]','Sxy [MPa]','Szx [MPa]','Syz [MPa]','Srr [MPa]','Stt [MPa]','Srt [MPa]','S1_3D [MPa]','S2_3D [MPa]','S3_3D [MPa]','S1_2D [MPa]','S2_2D [MPa]','Smises_3D [MPa]','Smises_2D [MPa]','Saver_3D [MPa]','Saver_2D [MPa]','I1_3D [MPa]','I2_3D [MPa^2]','I3_3D [MPa^3]','I1_2D [MPa]','I2_2D [MPa^2]']
for v,variableName in enumerate(variableNames):
chartA = radialpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
if v==0:
radialpathsDatalengths.append(dataLength)
chartA.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p,dataLength,25*p],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = radialpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.B')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
chartB.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p+1,dataLength,25*p+1],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
Sxx = []
Syy = []
Szz = []
Sxy = []
Szx = []
Syz = []
Srr = []
Stt = []
Srt = []
S1D3 = []
S2D3 = []
S3D3 = []
I1D3 = []
I2D3 = []
I3D3 = []
SMisesD3 = []
SaverD3 = []
S1D2 = []
S2D2 = []
I1D2 = []
I2D2 = []
SMisesD2 = []
SaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
print('<-----------------')
print('<-----------------')
if subFolder.split('/')[-1] + '-stressescircumferentialpaths' + '.csv' in listdir(subFolder):
print(' Analysis of circumferential paths for folder ' + subFolder)
print(' ')
with open(circumferentialpathsSummary,'r') as csv:
lines = csv.readlines()
Sxx = []
Syy = []
Szz = []
Sxy = []
Szx = []
Syz = []
Srr = []
Stt = []
Srt = []
S1D3 = []
S2D3 = []
S3D3 = []
I1D3 = []
I2D3 = []
I3D3 = []
SMisesD3 = []
SaverD3 = []
S1D2 = []
S2D2 = []
I1D2 = []
I2D2 = []
SMisesD2 = []
SaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
for line in lines[1:]:
stressComp = line.replace('\n','').replace(' ','').split(',')[0]
pathVariable = float(line.replace('\n','').replace(' ','').split(',')[1])
pathStartVariable = float(line.replace('\n','').replace(' ','').split(',')[2])
pathEndVariable = float(line.replace('\n','').replace(' ','').split(',')[3])
datfilePath = join(subFolder,line.replace('\n','').replace(' ','').split(',')[-1])
print(' Reading component ' + stressComp)
print(' ' + 'for circumferential path at ' + str(pathVariable) + ' mum')
print(' ' + 'starting at ' + str(pathStartVariable) + ' deg')
print(' ' + 'ending at ' + str(pathEndVariable) + ' deg')
print(' ')
with open(datfilePath,'r') as dat:
datLines = dat.readlines()
currentxyData = []
for datLine in datLines:
if len(datLine.replace('\n','').replace(' ',''))>0 and 'X' not in datLine:
lineParts = datLine.replace('\n','').split(' ')
rowVec = []
for linePart in lineParts:
if linePart!='':
rowVec.append(float(linePart))
currentxyData.append(rowVec)
normxData = []
xData = []
yData = []
for xyPair in currentxyData:
normxData.append(xyPair[0])
xData.append(pathStartVariable+(pathEndVariable-pathStartVariable)*xyPair[0])
yData.append(xyPair[1])
if 'S11' in stressComp:
Sxx.append(yData)
pathCoords.append(xData)
pathNormCoords.append(normxData)
pathVariables.append(pathVariable)
pathStartVariables.append(pathStartVariable)
pathEndVariables.append(pathEndVariable)
elif 'S22' in stressComp:
Syy.append(yData)
elif 'S23' in stressComp:
Syz.append(yData)
elif 'S12' in stressComp:
Sxy.append(yData)
elif 'S13' in stressComp:
Szx.append(yData)
elif 'S33' in stressComp:
Szz.append(yData)
Szx.append(0.0)
Syz.append(0.0)
currentSxx = Sxx[-1]
currentSyy = Syy[-1]
currentSzz = yData
currentSxy = Sxy[-1]
currentSzx = 0.0
currentSyz = 0.0
currentSrr = []
currentStt = []
currentSrt = []
current3DS1 = []
current3DS2 = []
current3DS3 = []
current3DI1 = []
current3DI2 = []
current3DI3 = []
current3DSMises = []
current3DSaver = []
current2DS1 = []
current2DS2 = []
current2DI1 = []
current2DI2 = []
current2DSMises = []
current2DSaver = []
nstressPoints = np.min([len(currentSxx),len(currentSyy),len(currentSzz),len(currentSxy)])
for s in range(0,nstressPoints):
rotateBy = pathCoords[-1][s]*np.pi/180.0
cosRot = np.cos(rotateBy)
sinRot = np.sin(rotateBy)
sxx = currentSxx[s]
syy = currentSyy[s]
szz = currentSzz[s]
sxy = currentSxy[s]
szx = 0.0
syz = 0.0
srr = sxx*cosRot*cosRot+syy*sinRot*sinRot+2*sxy*cosRot*sinRot
stt = sxx*sinRot*sinRot+syy*cosRot*cosRot-2*sxy*cosRot*sinRot
srt = -sxx*cosRot*sinRot+syy*cosRot*sinRot+sxy*(cosRot*cosRot-sinRot*sinRot)
currentSrr.append(srr)
currentStt.append(stt)
currentSrt.append(srt)
i1d2 = sxx + syy
i1d3 = sxx + syy + szz
i2d2 = sxx*syy - sxy*sxy
i2d3 = sxx*syy + syy*szz + sxx*szz - sxy*sxy - syz*syz - szx*szx
i3d3 = sxx*syy*szz - sxx*syz*syz - syy*szx*szx - szz*sxy*sxy + 2*sxy*syz*szx
saverd2 = i1d2/2.0
saverd3 = i1d3/3.0
smises2d = np.sqrt(sxx*sxx + syy*syy - sxx*syy + 3*sxy*sxy)
smises3d = np.sqrt(sxx*sxx + syy*syy + szz*szz - sxx*syy - syy*szz - sxx*szz + 3*(sxy*sxy + syz*syz + szx*szx))
s1d2 = 0.5*(sxx+syy)+np.sqrt((0.5*(sxx-syy))*(0.5*(sxx-syy))+sxy*sxy)
s2d2 = 0.5*(sxx+syy)-np.sqrt((0.5*(sxx-syy))*(0.5*(sxx-syy))+sxy*sxy)
try:
princOrient = np.arccos((2*i1d3*i1d3*i1d3-9*i1d3*i2d3+27*i3d3)/(2*np.sqrt((i1d3*i1d3-3*i2d3)*(i1d3*i1d3-3*i2d3)*(i1d3*i1d3-3*i2d3))))/3.0
s1d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient)/3.0
s2d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient-2*np.pi/3.0)/3.0
s3d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient-4*np.pi/3.0)/3.0
except Exception:
s1d3 = s1d2
s2d3 = s2d2
s3d3 = 0.0
current3DS1.append(s1d3)
current3DS2.append(s2d3)
current3DS3.append(s2d3)
current3DI1.append(i1d3)
current3DI2.append(i2d3)
current3DI3.append(i3d3)
current3DSMises.append(smises3d)
current3DSaver.append(saverd3)
current2DS1.append(s1d2)
current2DS2.append(s2d2)
current2DI1.append(i1d2)
current2DI2.append(i2d2)
current2DSMises.append(smises2d)
current2DSaver.append(saverd2)
Srr.append(currentSrr)
Stt.append(currentStt)
Srt.append(currentSrt)
S1D3.append(current3DS1)
S2D3.append(current3DS2)
S3D3.append(current3DS3)
S1D2.append(current2DS1)
S2D2.append(current2DS2)
I1D3.append(current3DI1)
I2D3.append(current3DI2)
I3D3.append(current3DI3)
I1D2.append(current2DI1)
I2D2.append(current2DI2)
SMisesD3.append(current3DSMises)
SaverD3.append(current3DSaver)
SMisesD2.append(current2DSMises)
SaverD2.append(current2DSaver)
currentSrr = []
currentStt = []
currentSrt = []
current3DS1 = []
current3DS2 = []
current3DS3 = []
current3DI1 = []
current3DI2 = []
current3DI3 = []
current3DSMises = []
current3DSaver = []
current2DS1 = []
current2DS2 = []
current2DI1 = []
current2DI2 = []
current2DSMises = []
current2DSaver = []
pathVariableName = 'pathRadius [mum]'
pathStartVariableName = 'startAngle [deg]'
pathEndVariableName = 'endAngle [deg]'
pathCoordinateName = 'angle [deg]'
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
circumferentialpathsSheetnames.append(datasheetName)
numberOfCircumferentialpaths.append(len(pathVariables))
worksheet = circumferentialpathsWorkbook.add_worksheet(datasheetName.decode('utf-8'))
for p, pathVariable in enumerate(pathVariables):
worksheet.write(0,p*25,pathVariableName,circumferentialpathsstringFormat)
worksheet.write(1,p*25,pathVariable,circumferentialpathsnumberFormatReduced)
worksheet.write(0,p*25+1,pathStartVariableName,circumferentialpathsstringFormat)
worksheet.write(1,p*25+1,pathStartVariables[p],circumferentialpathsnumberFormat)
worksheet.write(0,p*25+2,pathEndVariableName,circumferentialpathsstringFormat)
worksheet.write(1,p*25+2,pathEndVariables[p],circumferentialpathsnumberFormat)
worksheet.write(2,p*25,pathCoordinateName,circumferentialpathsstringFormat)
worksheet.write(2,p*25+1,'Norm ' + pathCoordinateName,circumferentialpathsstringFormat)
worksheet.write(2,p*25+2,'Sxx [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+3,'Syy [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+4,'Szz [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+5,'Sxy [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+6,'Szx [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+7,'Syz [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+8,'Srr [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+9,'Stt [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+10,'Srt [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+11,'S1_3D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+12,'S2_3D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+13,'S3_3D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+14,'S1_2D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+15,'S2_2D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+16,'Smises_3D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+17,'Smises_2D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+18,'Saverage_3D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+19,'Saverage_2D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+20,'I1_3D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+21,'I2_3D [MPa^2]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+22,'I3_3D [MPa^3]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+23,'I1_2D [MPa]',circumferentialpathsstringFormat)
worksheet.write(2,p*25+24,'I2_2D [MPa^2]',circumferentialpathsstringFormat)
measureNum = np.min([len(Sxx[p]),len(Syy[p]),len(Szz[p]),len(Sxy[p])])
print(' number of path points = ' + str(measureNum))
for c in range(0,measureNum):
coord = pathCoords[p][c]
worksheet.write(3+c,p*25,coord,circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+1,pathNormCoords[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+2,Sxx[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+3,Syy[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+4,Szz[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+5,Sxy[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+6,0.0,circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+7,0.0,circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+8,Srr[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+9,Stt[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+10,Srt[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+11,S1D3[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+12,S2D3[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+13,S3D3[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+14,S1D2[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+15,S2D2[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+16,SMisesD3[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+17,SMisesD2[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+18,SaverD3[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+19,SaverD2[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+20,I1D3[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+21,I2D3[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+22,I3D3[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+23,I1D2[p][c],circumferentialpathsnumberFormat)
worksheet.write(3+c,p*25+24,I2D2[p][c],circumferentialpathsnumberFormat)
graphsheetName = 'Graphs, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
graphworksheet = circumferentialpathsWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['Sxx [MPa]','Syy [MPa]','Szz [MPa]','Sxy [MPa]','Szx [MPa]','Syz [MPa]','Srr [MPa]','Stt [MPa]','Srt [MPa]','S1_3D [MPa]','S2_3D [MPa]','S3_3D [MPa]','S1_2D [MPa]','S2_2D [MPa]','Smises_3D [MPa]','Smises_2D [MPa]','Saverage_3D [MPa]','Saverage_2D [MPa]','I1_3D [MPa]','I2_3D [MPa^2]','I3_3D [MPa^3]','I1_2D [MPa]','I2_2D [MPa^2]']
circumferentialpathsDatalengths.append(len(pathCoords[p]))
for v,variableName in enumerate(variableNames):
chartA = circumferentialpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
if v==0:
circumferentialpathsDatalengths.append(dataLength)
chartA.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p,dataLength,25*p],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = circumferentialpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.B')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
chartB.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p+1,dataLength,25*p+1],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
Sxx = []
Syy = []
Szz = []
Sxy = []
Szx = []
Syz = []
Srr = []
Stt = []
Srt = []
S1D3 = []
S2D3 = []
S3D3 = []
I1D3 = []
I2D3 = []
I3D3 = []
SMisesD3 = []
SaverD3 = []
S1D2 = []
S2D2 = []
I1D2 = []
I2D2 = []
SMisesD2 = []
SaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
if subFolder.split('/')[-1] + '-stresseshorizontalpaths' + '.csv' in listdir(subFolder):
print(' Analysis of horizontal paths for folder ' + subFolder)
with open(horizontalpathsSummary,'r') as csv:
lines = csv.readlines()
Sxx = []
Syy = []
Szz = []
Sxy = []
Szx = []
Syz = []
Srr = []
Stt = []
Srt = []
S1D3 = []
S2D3 = []
S3D3 = []
I1D3 = []
I2D3 = []
I3D3 = []
SMisesD3 = []
SaverD3 = []
S1D2 = []
S2D2 = []
I1D2 = []
I2D2 = []
SMisesD2 = []
SaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
for line in lines[1:]:
stressComp = line.replace('\n','').replace(' ','').split(',')[0]
pathVariable = float(line.replace('\n','').replace(' ','').split(',')[1])
pathStartVariable = float(line.replace('\n','').replace(' ','').split(',')[2])
pathEndVariable = float(line.replace('\n','').replace(' ','').split(',')[3])
datfilePath = join(subFolder,line.replace('\n','').replace(' ','').split(',')[-1])
print(' Reading component ' + stressComp)
print(' ' + 'for horizontal path at ' + str(pathVariable) + ' [mum]')
print(' ' + 'starting at ' + str(pathStartVariable) + ' mum')
print(' ' + 'ending at ' + str(pathEndVariable) + ' mum')
print(' ')
with open(datfilePath,'r') as dat:
datLines = dat.readlines()
currentxyData = []
for datLine in datLines:
if len(datLine.replace('\n','').replace(' ',''))>0 and 'X' not in datLine:
lineParts = datLine.replace('\n','').split(' ')
rowVec = []
for linePart in lineParts:
if linePart!='':
rowVec.append(float(linePart))
currentxyData.append(rowVec)
normxData = []
xData = []
yData = []
for xyPair in currentxyData:
normxData.append(xyPair[0])
xData.append(pathStartVariable+(pathEndVariable-pathStartVariable)*xyPair[0])
yData.append(xyPair[1])
if 'S11' in stressComp:
Sxx.append(yData)
pathCoords.append(xData)
pathNormCoords.append(normxData)
pathVariables.append(pathVariable)
pathStartVariables.append(pathStartVariable)
pathEndVariables.append(pathEndVariable)
elif 'S22' in stressComp:
Syy.append(yData)
elif 'S23' in stressComp:
Syz.append(yData)
elif 'S12' in stressComp:
Sxy.append(yData)
elif 'S13' in stressComp:
Szx.append(yData)
elif 'S33' in stressComp:
Szz.append(yData)
Szx.append(0.0)
Syz.append(0.0)
currentSxx = Sxx[-1]
currentSyy = Syy[-1]
currentSzz = yData
currentSxy = Sxy[-1]
currentSzx = 0.0
currentSyz = 0.0
currentSrr = []
currentStt = []
currentSrt = []
current3DS1 = []
current3DS2 = []
current3DS3 = []
current3DI1 = []
current3DI2 = []
current3DI3 = []
current3DSMises = []
current3DSaver = []
current2DS1 = []
current2DS2 = []
current2DI1 = []
current2DI2 = []
current2DSMises = []
current2DSaver = []
nstressPoints = np.min([len(currentSxx),len(currentSyy),len(currentSzz),len(currentSxy)])
for s in range(0,nstressPoints):
rotateBy = np.arctan2(pathVariable,pathCoords[-1][s])
cosRot = np.cos(rotateBy)
sinRot = np.sin(rotateBy)
sxx = currentSxx[s]
syy = currentSyy[s]
szz = currentSzz[s]
sxy = currentSxy[s]
szx = 0.0
syz = 0.0
srr = sxx*cosRot*cosRot+syy*sinRot*sinRot+2*sxy*cosRot*sinRot
stt = sxx*sinRot*sinRot+syy*cosRot*cosRot-2*sxy*cosRot*sinRot
srt = -sxx*cosRot*sinRot+syy*cosRot*sinRot+sxy*(cosRot*cosRot-sinRot*sinRot)
currentSrr.append(srr)
currentStt.append(stt)
currentSrt.append(srt)
i1d2 = sxx + syy
i1d3 = sxx + syy + szz
i2d2 = sxx*syy - sxy*sxy
i2d3 = sxx*syy + syy*szz + sxx*szz - sxy*sxy - syz*syz - szx*szx
i3d3 = sxx*syy*szz - sxx*syz*syz - syy*szx*szx - szz*sxy*sxy + 2*sxy*syz*szx
saverd2 = i1d2/2.0
saverd3 = i1d3/3.0
smises2d = np.sqrt(sxx*sxx + syy*syy - sxx*syy + 3*sxy*sxy)
smises3d = np.sqrt(sxx*sxx + syy*syy + szz*szz - sxx*syy - syy*szz - sxx*szz + 3*(sxy*sxy + syz*syz + szx*szx))
s1d2 = 0.5*(sxx+syy)+np.sqrt((0.5*(sxx-syy))*(0.5*(sxx-syy))+sxy*sxy)
s2d2 = 0.5*(sxx+syy)-np.sqrt((0.5*(sxx-syy))*(0.5*(sxx-syy))+sxy*sxy)
try:
princOrient = np.arccos((2*i1d3*i1d3*i1d3-9*i1d3*i2d3+27*i3d3)/(2*np.sqrt((i1d3*i1d3-3*i2d3)*(i1d3*i1d3-3*i2d3)*(i1d3*i1d3-3*i2d3))))/3.0
s1d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient)/3.0
s2d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient-2*np.pi/3.0)/3.0
s3d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient-4*np.pi/3.0)/3.0
except Exception:
s1d3 = s1d2
s2d3 = s2d2
s3d3 = 0.0
current3DS1.append(s1d3)
current3DS2.append(s2d3)
current3DS3.append(s2d3)
current3DI1.append(i1d3)
current3DI2.append(i2d3)
current3DI3.append(i3d3)
current3DSMises.append(smises3d)
current3DSaver.append(saverd3)
current2DS1.append(s1d2)
current2DS2.append(s2d2)
current2DI1.append(i1d2)
current2DI2.append(i2d2)
current2DSMises.append(smises2d)
current2DSaver.append(saverd2)
Srr.append(currentSrr)
Stt.append(currentStt)
Srt.append(currentSrt)
S1D3.append(current3DS1)
S2D3.append(current3DS2)
S3D3.append(current3DS3)
S1D2.append(current2DS1)
S2D2.append(current2DS2)
I1D3.append(current3DI1)
I2D3.append(current3DI2)
I3D3.append(current3DI3)
I1D2.append(current2DI1)
I2D2.append(current2DI2)
SMisesD3.append(current3DSMises)
SaverD3.append(current3DSaver)
SMisesD2.append(current2DSMises)
SaverD2.append(current2DSaver)
currentSrr = []
currentStt = []
currentSrt = []
current3DS1 = []
current3DS2 = []
current3DS3 = []
current3DI1 = []
current3DI2 = []
current3DI3 = []
current3DSMises = []
current3DSaver = []
current2DS1 = []
current2DS2 = []
current2DI1 = []
current2DI2 = []
current2DSMises = []
current2DSaver = []
pathVariableName = 'y [mum]'
pathStartVariableName = 'xi [mum]'
pathEndVariableName = 'xf [mum]'
pathCoordinateName = 'x [mum]'
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
horizontalpathsSheetnames.append(datasheetName)
numberOfHorizontalpaths.append(len(pathVariables))
worksheet = horizontalpathsWorkbook.add_worksheet(datasheetName.decode('utf-8'))
for p, pathVariable in enumerate(pathVariables):
worksheet.write(0,p*25,pathVariableName,horizontalpathsstringFormat)
worksheet.write(1,p*25,pathVariable,horizontalpathsnumberFormatReduced)
worksheet.write(0,p*25+1,pathStartVariableName,horizontalpathsstringFormat)
worksheet.write(1,p*25+1,pathStartVariables[p],horizontalpathsnumberFormat)
worksheet.write(0,p*25+2,pathEndVariableName,horizontalpathsstringFormat)
worksheet.write(1,p*25+2,pathEndVariables[p],horizontalpathsnumberFormat)
worksheet.write(2,p*25,pathCoordinateName,horizontalpathsstringFormat)
worksheet.write(2,p*25+1,'Norm ' + pathCoordinateName,horizontalpathsstringFormat)
worksheet.write(2,p*25+2,'Sxx [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+3,'Syy [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+4,'Szz [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+5,'Sxy [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+6,'Szx [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+7,'Syz [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+8,'Srr [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+9,'Stt [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+10,'Srt [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+11,'S1_3D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+12,'S2_3D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+13,'S3_3D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+14,'S1_2D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+15,'S2_2D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+16,'Smises_3D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+17,'Smises_2D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+18,'Saverage_3D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+19,'Saverage_2D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+20,'I1_3D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+21,'I2_3D [MPa^2]',horizontalpathsstringFormat)
worksheet.write(2,p*25+22,'I3_3D [MPa^3]',horizontalpathsstringFormat)
worksheet.write(2,p*25+23,'I1_2D [MPa]',horizontalpathsstringFormat)
worksheet.write(2,p*25+24,'I2_2D [MPa^2]',horizontalpathsstringFormat)
measureNum = np.min([len(Sxx[p]),len(Syy[p]),len(Szz[p]),len(Sxy[p])])
print(' number of path points = ' + str(measureNum))
for c in range(0,measureNum):
coord = pathCoords[p][c]
worksheet.write(3+c,p*25,coord,horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+1,pathNormCoords[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+2,Sxx[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+3,Syy[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+4,Szz[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+5,Sxy[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+6,0.0,horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+7,0.0,horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+8,Srr[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+9,Stt[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+10,Srt[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+11,S1D3[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+12,S2D3[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+13,S3D3[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+14,S1D2[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+15,S2D2[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+16,SMisesD3[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+17,SMisesD2[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+18,SaverD3[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+19,SaverD2[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+20,I1D3[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+21,I2D3[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+22,I3D3[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+23,I1D2[p][c],horizontalpathsnumberFormat)
worksheet.write(3+c,p*25+24,I2D2[p][c],horizontalpathsnumberFormat)
graphsheetName = 'Graphs, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
graphworksheet = horizontalpathsWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['Sxx [MPa]','Syy [MPa]','Szz [MPa]','Sxy [MPa]','Szx [MPa]','Syz [MPa]','Srr [MPa]','Stt [MPa]','Srt [MPa]','S1_3D [MPa]','S2_3D [MPa]','S3_3D [MPa]','S1_2D [MPa]','S2_2D [MPa]','Smises_3D [MPa]','Smises_2D [MPa]','Saverage_3D [MPa]','Saverage_2D [MPa]','I1_3D [MPa]','I2_3D [MPa^2]','I3_3D [MPa^3]','I1_2D [MPa]','I2_2D [MPa^2]']
horizontalpathsDatalengths.append(len(pathCoords[p]))
for v,variableName in enumerate(variableNames):
chartA = horizontalpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
if v==0:
horizontalpathsDatalengths.append(dataLength)
chartA.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p,dataLength,25*p],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = horizontalpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
chartB.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p+1,dataLength,25*p+1],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
Sxx = []
Syy = []
Szz = []
Sxy = []
Szx = []
Syz = []
Srr = []
Stt = []
Srt = []
S1D3 = []
S2D3 = []
S3D3 = []
I1D3 = []
I2D3 = []
I3D3 = []
SMisesD3 = []
SaverD3 = []
S1D2 = []
S2D2 = []
I1D2 = []
I2D2 = []
SMisesD2 = []
SaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
if subFolder.split('/')[-1] + '-stressesverticalpaths' + '.csv' in listdir(subFolder):
print(' Analysis of vertical paths for folder ' + subFolder)
with open(verticalpathsSummary,'r') as csv:
lines = csv.readlines()
Sxx = []
Syy = []
Szz = []
Sxy = []
Szx = []
Syz = []
Srr = []
Stt = []
Srt = []
S1D3 = []
S2D3 = []
S3D3 = []
I1D3 = []
I2D3 = []
I3D3 = []
SMisesD3 = []
SaverD3 = []
S1D2 = []
S2D2 = []
I1D2 = []
I2D2 = []
SMisesD2 = []
SaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
for line in lines[1:]:
stressComp = line.replace('\n','').replace(' ','').split(',')[0]
pathVariable = float(line.replace('\n','').replace(' ','').split(',')[1])
pathStartVariable = float(line.replace('\n','').replace(' ','').split(',')[2])
pathEndVariable = float(line.replace('\n','').replace(' ','').split(',')[3])
datfilePath = join(subFolder,line.replace('\n','').replace(' ','').split(',')[-1])
print(' Reading component ' + stressComp)
print(' ' + 'for vertical path at ' + str(pathVariable) + ' mum')
print(' ' + 'starting at ' + str(pathStartVariable) + ' mum')
print(' ' + 'ending at ' + str(pathEndVariable) + ' mum')
print(' ')
with open(datfilePath,'r') as dat:
datLines = dat.readlines()
currentxyData = []
for datLine in datLines:
if len(datLine.replace('\n','').replace(' ',''))>0 and 'X' not in datLine:
lineParts = datLine.replace('\n','').split(' ')
rowVec = []
for linePart in lineParts:
if linePart!='':
rowVec.append(float(linePart))
currentxyData.append(rowVec)
normxData = []
xData = []
yData = []
for xyPair in currentxyData:
normxData.append(xyPair[0])
xData.append(pathStartVariable+(pathEndVariable-pathStartVariable)*xyPair[0])
yData.append(xyPair[1])
if 'S11' in stressComp:
Sxx.append(yData)
pathCoords.append(xData)
pathNormCoords.append(normxData)
pathVariables.append(pathVariable)
pathStartVariables.append(pathStartVariable)
pathEndVariables.append(pathEndVariable)
elif 'S22' in stressComp:
Syy.append(yData)
elif 'S23' in stressComp:
Syz.append(yData)
elif 'S12' in stressComp:
Sxy.append(yData)
elif 'S13' in stressComp:
Szx.append(yData)
elif 'S33' in stressComp:
Szz.append(yData)
Szx.append(0.0)
Syz.append(0.0)
currentSxx = Sxx[-1]
currentSyy = Syy[-1]
currentSzz = yData
currentSxy = Sxy[-1]
currentSzx = 0.0
currentSyz = 0.0
currentSrr = []
currentStt = []
currentSrt = []
current3DS1 = []
current3DS2 = []
current3DS3 = []
current3DI1 = []
current3DI2 = []
current3DI3 = []
current3DSMises = []
current3DSaver = []
current2DS1 = []
current2DS2 = []
current2DI1 = []
current2DI2 = []
current2DSMises = []
current2DSaver = []
nstressPoints = np.min([len(currentSxx),len(currentSyy),len(currentSzz),len(currentSxy)])
for s in range(0,nstressPoints):
rotateBy = np.arctan2(xData[s],pathVariable)
cosRot = np.cos(rotateBy)
sinRot = np.sin(rotateBy)
sxx = currentSxx[s]
syy = currentSyy[s]
szz = currentSzz[s]
sxy = currentSxy[s]
szx = 0.0
syz = 0.0
srr = sxx*cosRot*cosRot+syy*sinRot*sinRot+2*sxy*cosRot*sinRot
stt = sxx*sinRot*sinRot+syy*cosRot*cosRot-2*sxy*cosRot*sinRot
srt = -sxx*cosRot*sinRot+syy*cosRot*sinRot+sxy*(cosRot*cosRot-sinRot*sinRot)
currentSrr.append(srr)
currentStt.append(stt)
currentSrt.append(srt)
i1d2 = sxx + syy
i1d3 = sxx + syy + szz
i2d2 = sxx*syy - sxy*sxy
i2d3 = sxx*syy + syy*szz + sxx*szz - sxy*sxy - syz*syz - szx*szx
i3d3 = sxx*syy*szz - sxx*syz*syz - syy*szx*szx - szz*sxy*sxy + 2*sxy*syz*szx
saverd2 = i1d2/2.0
saverd3 = i1d3/3.0
smises2d = np.sqrt(sxx*sxx + syy*syy - sxx*syy + 3*sxy*sxy)
smises3d = np.sqrt(sxx*sxx + syy*syy + szz*szz - sxx*syy - syy*szz - sxx*szz + 3*(sxy*sxy + syz*syz + szx*szx))
s1d2 = 0.5*(sxx+syy)+np.sqrt((0.5*(sxx-syy))*(0.5*(sxx-syy))+sxy*sxy)
s2d2 = 0.5*(sxx+syy)-np.sqrt((0.5*(sxx-syy))*(0.5*(sxx-syy))+sxy*sxy)
try:
princOrient = np.arccos((2*i1d3*i1d3*i1d3-9*i1d3*i2d3+27*i3d3)/(2*np.sqrt((i1d3*i1d3-3*i2d3)*(i1d3*i1d3-3*i2d3)*(i1d3*i1d3-3*i2d3))))/3.0
s1d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient)/3.0
s2d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient-2*np.pi/3.0)/3.0
s3d3 = i1d3/3.0 + 2*np.sqrt(i1d3*i1d3-3*i2d3)*np.cos(princOrient-4*np.pi/3.0)/3.0
except Exception:
s1d3 = s1d2
s2d3 = s2d2
s3d3 = 0.0
current3DS1.append(s1d3)
current3DS2.append(s2d3)
current3DS3.append(s2d3)
current3DI1.append(i1d3)
current3DI2.append(i2d3)
current3DI3.append(i3d3)
current3DSMises.append(smises3d)
current3DSaver.append(saverd3)
current2DS1.append(s1d2)
current2DS2.append(s2d2)
current2DI1.append(i1d2)
current2DI2.append(i2d2)
current2DSMises.append(smises2d)
current2DSaver.append(saverd2)
Srr.append(currentSrr)
Stt.append(currentStt)
Srt.append(currentSrt)
S1D3.append(current3DS1)
S2D3.append(current3DS2)
S3D3.append(current3DS3)
S1D2.append(current2DS1)
S2D2.append(current2DS2)
I1D3.append(current3DI1)
I2D3.append(current3DI2)
I3D3.append(current3DI3)
I1D2.append(current2DI1)
I2D2.append(current2DI2)
SMisesD3.append(current3DSMises)
SaverD3.append(current3DSaver)
SMisesD2.append(current2DSMises)
SaverD2.append(current2DSaver)
currentSrr = []
currentStt = []
currentSrt = []
current3DS1 = []
current3DS2 = []
current3DS3 = []
current3DI1 = []
current3DI2 = []
current3DI3 = []
current3DSMises = []
current3DSaver = []
current2DS1 = []
current2DS2 = []
current2DI1 = []
current2DI2 = []
current2DSMises = []
current2DSaver = []
pathVariableName = 'x [mum]'
pathStartVariableName = 'yi [mum]'
pathEndVariableName = 'yf [mum]'
pathCoordinateName = 'y [mum]'
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
verticalpathsSheetnames.append(datasheetName)
numberOfVerticalpaths.append(len(pathVariables))
worksheet = verticalpathsWorkbook.add_worksheet(datasheetName.decode('utf-8'))
for p, pathVariable in enumerate(pathVariables):
worksheet.write(0,p*25,pathVariableName,verticalpathsstringFormat)
worksheet.write(1,p*25,pathVariable,verticalpathsnumberFormatReduced)
worksheet.write(0,p*25+1,pathStartVariableName,verticalpathsstringFormat)
worksheet.write(1,p*25+1,pathStartVariables[p],verticalpathsnumberFormat)
worksheet.write(0,p*25+2,pathEndVariableName,verticalpathsstringFormat)
worksheet.write(1,p*25+2,pathEndVariables[p],verticalpathsnumberFormat)
worksheet.write(2,p*25,pathCoordinateName,verticalpathsstringFormat)
worksheet.write(2,p*25+1,'Norm ' + pathCoordinateName,verticalpathsstringFormat)
worksheet.write(2,p*25+2,'Sxx [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+3,'Syy [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+4,'Szz [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+5,'Sxy [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+6,'Szx [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+7,'Syz [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+8,'Srr [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+9,'Stt [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+10,'Srt [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+11,'S1_3D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+12,'S2_3D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+13,'S3_3D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+14,'S1_2D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+15,'S2_2D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+16,'Smises_3D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+17,'Smises_2D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+18,'Saverage_3D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+19,'Saverage_2D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+20,'I1_3D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+21,'I2_3D [MPa^2]',verticalpathsstringFormat)
worksheet.write(2,p*25+22,'I3_3D [MPa^3]',verticalpathsstringFormat)
worksheet.write(2,p*25+23,'I1_2D [MPa]',verticalpathsstringFormat)
worksheet.write(2,p*25+24,'I2_2D [MPa^2]',verticalpathsstringFormat)
measureNum = np.min([len(Sxx[p]),len(Syy[p]),len(Szz[p]),len(Sxy[p])])
print(' number of path points = ' + str(measureNum))
for c in range(0,measureNum):
coord = pathCoords[p][c]
worksheet.write(3+c,p*25,coord,verticalpathsnumberFormat)
worksheet.write(3+c,p*25+1,pathNormCoords[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+2,Sxx[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+3,Syy[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+4,Szz[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+5,Sxy[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+6,0.0,verticalpathsnumberFormat)
worksheet.write(3+c,p*25+7,0.0,verticalpathsnumberFormat)
worksheet.write(3+c,p*25+8,Srr[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+9,Stt[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+10,Srt[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+11,S1D3[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+12,S2D3[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+13,S3D3[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+14,S1D2[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+15,S2D2[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+16,SMisesD3[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+17,SMisesD2[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+18,SaverD3[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+19,SaverD2[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+20,I1D3[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+21,I2D3[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+22,I3D3[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+23,I1D2[p][c],verticalpathsnumberFormat)
worksheet.write(3+c,p*25+24,I2D2[p][c],verticalpathsnumberFormat)
graphsheetName = 'Graphs, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
graphworksheet = verticalpathsWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['Sxx [MPa]','Syy [MPa]','Szz [MPa]','Sxy [MPa]','Szx [MPa]','Syz [MPa]','Srr [MPa]','Stt [MPa]','Srt [MPa]','S1_3D [MPa]','S2_3D [MPa]','S3_3D [MPa]','S1_2D [MPa]','S2_2D [MPa]','Smises_3D [MPa]','Smises_2D [MPa]','Saverage_3D [MPa]','Saverage_2D [MPa]','I1_3D [MPa]','I2_3D [MPa^2]','I3_3D [MPa^3]','I1_2D [MPa]','I2_2D [MPa^2]']
for v,variableName in enumerate(variableNames):
chartA = verticalpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
if v==0:
verticalpathsDatalengths.append(dataLength)
chartA.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p,dataLength,25*p],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = verticalpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
chartB.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p+1,dataLength,25*p+1],
'values': [datasheetName,3,2+v,dataLength,2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
Sxx = []
Syy = []
Szz = []
Sxy = []
Szx = []
Syz = []
Srr = []
Stt = []
Srt = []
S1D3 = []
S2D3 = []
S3D3 = []
I1D3 = []
I2D3 = []
I3D3 = []
SMisesD3 = []
SaverD3 = []
S1D2 = []
S2D2 = []
I1D2 = []
I2D2 = []
SMisesD2 = []
SaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
if subFolder.split('/')[-1] + '-strainsradialpaths' + '.csv' in listdir(subFolder):
print('----------------->')
print('----------------->')
print(' Analysis of radial paths for folder ' + subFolder)
print(' ')
with open(radialpathsStrainSummary,'r') as csv:
lines = csv.readlines()
EExx = []
EEyy = []
EEzz = []
EExy = []
EEzx = []
EEyz = []
EErr = []
EEtt = []
EErt = []
EE1D3 = []
EE2D3 = []
EE3D3 = []
EEI1D3 = []
EEI2D3 = []
EEI3D3 = []
EEMisesD3 = []
EEaverD3 = []
EE1D2 = []
EE2D2 = []
EEI1D2 = []
EEI2D2 = []
EEMisesD2 = []
EEaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
for line in lines[1:]:
strainComp = line.replace('\n','').replace(' ','').split(',')[0]
pathVariable = float(line.replace('\n','').replace(' ','').split(',')[1])
pathStartVariable = float(line.replace('\n','').replace(' ','').split(',')[2])
pathEndVariable = float(line.replace('\n','').replace(' ','').split(',')[3])
datfilePath = join(subFolder,line.replace('\n','').replace(' ','').split(',')[-1])
print(' Reading component ' + strainComp)
print(' ' + 'for radial path at ' + str(pathVariable) + ' deg')
print(' ' + 'starting at ' + str(pathStartVariable) + ' mum')
print(' ' + 'ending at ' + str(pathEndVariable))
print(' ')
with open(datfilePath,'r') as dat:
datLines = dat.readlines()
print(' --> File read')
print(' ')
currentxyData = []
for datLine in datLines:
if len(datLine.replace('\n','').replace(' ',''))>0 and 'X' not in datLine:
lineParts = datLine.replace('\n','').split(' ')
rowVec = []
for linePart in lineParts:
if linePart!='':
rowVec.append(float(linePart))
currentxyData.append(rowVec)
print(' --> Lines parsed')
print(' ')
normxData = []
xData = []
yData = []
for xyPair in currentxyData:
normxData.append(xyPair[0])
xData.append(pathStartVariable+(pathEndVariable-pathStartVariable)*xyPair[0])
yData.append(xyPair[1])
print(' --> Data categorized in independent and dependent variables.')
print(' ')
if 'EE11' in strainComp:
EExx.append(yData)
pathCoords.append(xData)
pathNormCoords.append(normxData)
pathVariables.append(pathVariable)
pathStartVariables.append(pathStartVariable)
pathEndVariables.append(pathEndVariable)
print(' --> Strain component is EE11.')
print(' ')
elif 'EE22' in strainComp:
EEyy.append(yData)
print(' --> Strain component is EE22.')
print(' ')
elif 'EE23' in strainComp:
EEyz.append(yData)
print(' --> Strain component is EE23.')
print(' ')
elif 'EE12' in strainComp:
EExy.append(yData)
print(' --> Strain component is EE12.')
print(' ')
elif 'EE13' in strainComp:
EEzx.append(yData)
print(' --> Strain component is EE13.')
print(' ')
elif 'EE33' in strainComp:
EEzz.append(yData)
EEzx.append(0.0)
EEyz.append(0.0)
print(' --> Strain component is EE33.')
print(' ')
currentEExx = EExx[-1]
currentEEyy = EEyy[-1]
currentEEzz = yData
currentEExy = EExy[-1]
currentEEzx = 0.0
currentEEyz = 0.0
currentEErr = []
currentEEtt = []
currentEErt = []
current3DEE1 = []
current3DEE2 = []
current3DEE3 = []
current3DEEI1 = []
current3DEEI2 = []
current3DEEI3 = []
current3DEEMises = []
current3DEEaver = []
current2DEE1 = []
current2DEE2 = []
current2DEEI1 = []
current2DEEI2 = []
current2DEEMises = []
current2DEEaver = []
rotateBy = pathVariable*np.pi/180.0
cosRot = np.cos(rotateBy)
sinRot = np.sin(rotateBy)
nstrainPoints = np.min([len(currentEExx),len(currentEEyy),len(currentEEzz),len(currentEExy)])
for s in range(0,nstrainPoints):
eexx = currentEExx[s]
eeyy = currentEEyy[s]
eezz = currentEEzz[s]
eexy = currentEExy[s]
eezx = 0.0
eeyz = 0.0
eerr = eexx*cosRot*cosRot+eeyy*sinRot*sinRot+2*eexy*cosRot*sinRot
eett = eexx*sinRot*sinRot+eeyy*cosRot*cosRot-2*eexy*cosRot*sinRot
eert = -eexx*cosRot*sinRot+eeyy*cosRot*sinRot+eexy*(cosRot*cosRot-sinRot*sinRot)
currentEErr.append(eerr)
currentEEtt.append(eett)
currentEErt.append(eert)
eei1d2 = eexx + eeyy
eei1d3 = eexx + eeyy + eezz
eei2d2 = eexx*eeyy - eexy*eexy
eei2d3 = eexx*eeyy + eeyy*eezz + eexx*eezz - eexy*eexy - eeyz*eeyz - eezx*eezx
eei3d3 = eexx*eeyy*eezz - eexx*eeyz*eeyz - eeyy*eezx*eezx - eezz*eexy*eexy + 2*eexy*eeyz*eezx
eeaverd2 = eei1d2/2.0
eeaverd3 = eei1d3/3.0
eemises2d = np.sqrt(eexx*eexx + eeyy*eeyy - eexx*eeyy + 3*eexy*eexy)
eemises3d = np.sqrt(eexx*eexx + eeyy*eeyy + eezz*eezz - eexx*eeyy - eeyy*eezz - eexx*eezz + 3*(eexy*eexy + eeyz*eeyz + eezx*eezx))
ee1d2 = 0.5*(eexx+eeyy)+np.sqrt((0.5*(eexx-eeyy))*(0.5*(eexx-eeyy))+eexy*eexy)
ee2d2 = 0.5*(eexx+eeyy)-np.sqrt((0.5*(eexx-eeyy))*(0.5*(eexx-eeyy))+eexy*eexy)
try:
princOrient = np.arccos((2*eei1d3*eei1d3*eei1d3-9*eei1d3*eei2d3+27*eei3d3)/(2*np.sqrt((eei1d3*eei1d3-3*eei2d3)*(eei1d3*eei1d3-3*eei2d3)*(eei1d3*eei1d3-3*eei2d3))))/3.0
ee1d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient)/3.0
ee2d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient-2*np.pi/3.0)/3.0
ee3d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient-4*np.pi/3.0)/3.0
except Exception:
ee1d3 = ee1d2
ee2d3 = ee2d2
ee3d3 = 0.0
current3DEE1.append(ee1d3)
current3DEE2.append(ee2d3)
current3DEE3.append(ee2d3)
current3DEEI1.append(eei1d3)
current3DEEI2.append(eei2d3)
current3DEEI3.append(eei3d3)
current3DEEMises.append(eemises3d)
current3DEEaver.append(eeaverd3)
current2DEE1.append(ee1d2)
current2DEE2.append(ee2d2)
current2DEEI1.append(eei1d2)
current2DEEI2.append(eei2d2)
current2DEEMises.append(eemises2d)
current2DEEaver.append(eeaverd2)
EErr.append(currentEErr)
EEtt.append(currentEEtt)
EErt.append(currentEErt)
EE1D3.append(current3DEE1)
EE2D3.append(current3DEE2)
EE3D3.append(current3DEE3)
EE1D2.append(current2DEE1)
EE2D2.append(current2DEE2)
EEI1D3.append(current3DEEI1)
EEI2D3.append(current3DEEI2)
EEI3D3.append(current3DEEI3)
EEI1D2.append(current2DEEI1)
EEI2D2.append(current2DEEI2)
EEMisesD3.append(current3DEEMises)
EEaverD3.append(current3DEEaver)
EEMisesD2.append(current2DEEMises)
EEaverD2.append(current2DEEaver)
currentEErr = []
currentEEtt = []
currentEErt = []
current3DEE1 = []
current3DEE2 = []
current3DEE3 = []
current3DEEI1 = []
current3DEEI2 = []
current3DEEI3 = []
current3DEEMises = []
current3DEEaver = []
current2DEE1 = []
current2DEE2 = []
current2DEEI1 = []
current2DEEI2 = []
current2DEEMises = []
current2DEEaver = []
pathVariableName = 'pathAngle [deg]'
pathStartVariableName = 'Ri [mum]'
pathEndVariableName = 'Rf [mum]'
pathCoordinateName = 'R [mum]'
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
radialpathsStrainSheetnames.append(datasheetName)
numberOfRadialpathsStrain.append(len(pathVariables))
worksheet = radialpathsStrainWorkbook.add_worksheet(datasheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + datasheetName)
print(' ')
for p, pathVariable in enumerate(pathVariables):
print(' pathAngle = ' + str(pathVariable) + ' deg')
worksheet.write(0,p*25,pathVariableName,radialpathsStrainstringFormat)
worksheet.write(1,p*25,pathVariable,radialpathsStrainnumberFormatReduced)
worksheet.write(0,p*25+1,pathStartVariableName,radialpathsStrainstringFormat)
worksheet.write(1,p*25+1,pathStartVariables[p],radialpathsStrainnumberFormat)
worksheet.write(0,p*25+2,pathEndVariableName,radialpathsStrainstringFormat)
worksheet.write(1,p*25+2,pathEndVariables[p],radialpathsStrainnumberFormat)
worksheet.write(2,p*25,pathCoordinateName,radialpathsStrainstringFormat)
worksheet.write(2,p*25+1,'Norm ' + pathCoordinateName,radialpathsStrainstringFormat)
worksheet.write(2,p*25+2,'EExx [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+3,'EEyy [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+4,'EEzz [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+5,'EExy [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+6,'EEzx [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+7,'EEyz [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+8,'EErr [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+9,'EEtt [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+10,'EErt [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+11,'EE1_3D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+12,'EE2_3D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+13,'EE3_3D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+14,'EE1_2D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+15,'EE2_2D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+16,'EEmises_3D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+17,'EEmises_2D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+18,'EEaverage_3D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+19,'EEaverage_2D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+20,'EEI1_3D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+21,'EEI2_3D [(mum/mum)^2]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+22,'EEI3_3D [(mum/mum)^3]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+23,'EEI1_2D [mum/mum]',radialpathsStrainstringFormat)
worksheet.write(2,p*25+24,'EEI2_2D [(mum/mum)^2]',radialpathsStrainstringFormat)
measureNum = np.min([len(EExx[p]),len(EEyy[p]),len(EEzz[p]),len(EExy[p])])
print(' number of path points = ' + str(measureNum))
for c in range(0,measureNum):
coord = pathCoords[p][c]
worksheet.write(3+c,p*25,coord,radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+1,pathNormCoords[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+2,EExx[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+3,EEyy[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+4,EEzz[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+5,EExy[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+6,0.0,radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+7,0.0,radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+8,EErr[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+9,EEtt[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+10,EErt[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+11,EE1D3[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+12,EE2D3[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+13,EE3D3[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+14,EE1D2[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+15,EE2D2[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+16,EEMisesD3[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+17,EEMisesD2[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+18,EEaverD3[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+19,EEaverD2[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+20,EEI1D3[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+21,EEI2D3[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+22,EEI3D3[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+23,EEI1D2[p][c],radialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+24,EEI2D2[p][c],radialpathsStrainnumberFormat)
graphsheetName = 'Graphs, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
graphworksheet = radialpathsStrainWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['EExx [mum/mum]','EEyy [mum/mum]','EEzz [mum/mum]','EExy [mum/mum]','EEzx [mum/mum]','EEyz [mum/mum]','EErr [mum/mum]','EEtt [mum/mum]','EErt [mum/mum]','EE1_3D [mum/mum]','EE2_3D [mum/mum]','EE3_3D [mum/mum]','EE1_2D [mum/mum]','EE2_2D [mum/mum]','EEmises_3D [mum/mum]','EEmises_2D [mum/mum]','EEaver_3D [mum/mum]','EEaver_2D [mum/mum]','EEI1_3D [mum/mum]','EEI2_3D [(mum/mum)^2]','EEI3_3D [(mum/mum)^3]','EEI1_2D [mum/mum]','EEI2_2D [(mum/mum)^2]']
for v,variableName in enumerate(variableNames):
chartA = radialpathsStrainWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
if v==0:
radialpathsStrainDatalengths.append(dataLength)
chartA.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p,dataLength,25*p],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = radialpathsStrainWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.B')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
chartB.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p+1,dataLength,25*p+1],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
EExx = []
EEyy = []
EEzz = []
EExy = []
EEzx = []
EEyz = []
EErr = []
EEtt = []
EErt = []
EE1D3 = []
EE2D3 = []
EE3D3 = []
EEI1D3 = []
EEI2D3 = []
EEI3D3 = []
EEMisesD3 = []
EEaverD3 = []
EE1D2 = []
EE2D2 = []
EEI1D2 = []
EEI2D2 = []
EEMisesD2 = []
EEaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
print('<-----------------')
print('<-----------------')
if subFolder.split('/')[-1] + '-strainscircumferentialpaths' + '.csv' in listdir(subFolder):
print(' Analysis of circumferential paths for folder ' + subFolder)
print(' ')
with open(circumferentialpathsStrainSummary,'r') as csv:
lines = csv.readlines()
EExx = []
EEyy = []
EEzz = []
EExy = []
EEzx = []
EEyz = []
EErr = []
EEtt = []
EErt = []
EE1D3 = []
EE2D3 = []
EE3D3 = []
EEI1D3 = []
EEI2D3 = []
EEI3D3 = []
EEMisesD3 = []
EEaverD3 = []
EE1D2 = []
EE2D2 = []
EEI1D2 = []
EEI2D2 = []
EEMisesD2 = []
EEaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
for line in lines[1:]:
strainComp = line.replace('\n','').replace(' ','').split(',')[0]
pathVariable = float(line.replace('\n','').replace(' ','').split(',')[1])
pathStartVariable = float(line.replace('\n','').replace(' ','').split(',')[2])
pathEndVariable = float(line.replace('\n','').replace(' ','').split(',')[3])
datfilePath = join(subFolder,line.replace('\n','').replace(' ','').split(',')[-1])
print(' Reading component ' + strainComp)
print(' ' + 'for circumferential path at ' + str(pathVariable) + ' mum')
print(' ' + 'starting at ' + str(pathStartVariable) + ' deg')
print(' ' + 'ending at ' + str(pathEndVariable) + ' deg')
print(' ')
with open(datfilePath,'r') as dat:
datLines = dat.readlines()
currentxyData = []
for datLine in datLines:
if len(datLine.replace('\n','').replace(' ',''))>0 and 'X' not in datLine:
lineParts = datLine.replace('\n','').split(' ')
rowVec = []
for linePart in lineParts:
if linePart!='':
rowVec.append(float(linePart))
currentxyData.append(rowVec)
normxData = []
xData = []
yData = []
for xyPair in currentxyData:
normxData.append(xyPair[0])
xData.append(pathStartVariable+(pathEndVariable-pathStartVariable)*xyPair[0])
yData.append(xyPair[1])
if 'EE11' in strainComp:
EExx.append(yData)
pathCoords.append(xData)
pathNormCoords.append(normxData)
pathVariables.append(pathVariable)
pathStartVariables.append(pathStartVariable)
pathEndVariables.append(pathEndVariable)
print(' --> Strain component is EE11.')
print(' ')
elif 'EE22' in strainComp:
EEyy.append(yData)
print(' --> Strain component is EE22.')
print(' ')
elif 'EE23' in strainComp:
EEyz.append(yData)
print(' --> Strain component is EE23.')
print(' ')
elif 'EE12' in strainComp:
EExy.append(yData)
print(' --> Strain component is EE12.')
print(' ')
elif 'EE13' in strainComp:
EEzx.append(yData)
print(' --> Strain component is EE13.')
print(' ')
elif 'EE33' in strainComp:
EEzz.append(yData)
EEzx.append(0.0)
EEyz.append(0.0)
print(' --> Strain component is EE33.')
print(' ')
currentEExx = EExx[-1]
currentEEyy = EEyy[-1]
currentEEzz = yData
currentEExy = EExy[-1]
currentEEzx = 0.0
currentEEyz = 0.0
currentEErr = []
currentEEtt = []
currentEErt = []
current3DEE1 = []
current3DEE2 = []
current3DEE3 = []
current3DEEI1 = []
current3DEEI2 = []
current3DEEI3 = []
current3DEEMises = []
current3DEEaver = []
current2DEE1 = []
current2DEE2 = []
current2DEEI1 = []
current2DEEI2 = []
current2DEEMises = []
current2DEEaver = []
nstrainPoints = np.min([len(currentEExx),len(currentEEyy),len(currentEEzz),len(currentEExy)])
for s in range(0,nstrainPoints):
rotateBy = pathCoords[-1][s]*np.pi/180.0
cosRot = np.cos(rotateBy)
sinRot = np.sin(rotateBy)
eexx = currentEExx[s]
eeyy = currentEEyy[s]
eezz = currentEEzz[s]
eexy = currentEExy[s]
eezx = 0.0
eeyz = 0.0
eerr = eexx*cosRot*cosRot+eeyy*sinRot*sinRot+2*eexy*cosRot*sinRot
eett = eexx*sinRot*sinRot+eeyy*cosRot*cosRot-2*eexy*cosRot*sinRot
eert = -eexx*cosRot*sinRot+eeyy*cosRot*sinRot+eexy*(cosRot*cosRot-sinRot*sinRot)
currentEErr.append(eerr)
currentEEtt.append(eett)
currentEErt.append(eert)
eei1d2 = eexx + eeyy
eei1d3 = eexx + eeyy + eezz
eei2d2 = eexx*eeyy - eexy*eexy
eei2d3 = eexx*eeyy + eeyy*eezz + eexx*eezz - eexy*eexy - eeyz*eeyz - eezx*eezx
eei3d3 = eexx*eeyy*eezz - eexx*eeyz*eeyz - eeyy*eezx*eezx - eezz*eexy*eexy + 2*eexy*eeyz*eezx
eeaverd2 = eei1d2/2.0
eeaverd3 = eei1d3/3.0
eemises2d = np.sqrt(eexx*eexx + eeyy*eeyy - eexx*eeyy + 3*eexy*eexy)
eemises3d = np.sqrt(eexx*eexx + eeyy*eeyy + eezz*eezz - eexx*eeyy - eeyy*eezz - eexx*eezz + 3*(eexy*eexy + eeyz*eeyz + eezx*eezx))
ee1d2 = 0.5*(eexx+eeyy)+np.sqrt((0.5*(eexx-eeyy))*(0.5*(eexx-eeyy))+eexy*eexy)
ee2d2 = 0.5*(eexx+eeyy)-np.sqrt((0.5*(eexx-eeyy))*(0.5*(eexx-eeyy))+eexy*eexy)
try:
princOrient = np.arccos((2*eei1d3*eei1d3*eei1d3-9*eei1d3*eei2d3+27*eei3d3)/(2*np.sqrt((eei1d3*eei1d3-3*eei2d3)*(eei1d3*eei1d3-3*eei2d3)*(eei1d3*eei1d3-3*eei2d3))))/3.0
ee1d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient)/3.0
ee2d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient-2*np.pi/3.0)/3.0
ee3d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient-4*np.pi/3.0)/3.0
except Exception:
ee1d3 = ee1d2
ee2d3 = ee2d2
ee3d3 = 0.0
current3DEE1.append(ee1d3)
current3DEE2.append(ee2d3)
current3DEE3.append(ee2d3)
current3DEEI1.append(eei1d3)
current3DEEI2.append(eei2d3)
current3DEEI3.append(eei3d3)
current3DEEMises.append(eemises3d)
current3DEEaver.append(eeaverd3)
current2DEE1.append(ee1d2)
current2DEE2.append(ee2d2)
current2DEEI1.append(eei1d2)
current2DEEI2.append(eei2d2)
current2DEEMises.append(eemises2d)
current2DEEaver.append(eeaverd2)
EErr.append(currentEErr)
EEtt.append(currentEEtt)
EErt.append(currentEErt)
EE1D3.append(current3DEE1)
EE2D3.append(current3DEE2)
EE3D3.append(current3DEE3)
EE1D2.append(current2DEE1)
EE2D2.append(current2DEE2)
EEI1D3.append(current3DEEI1)
EEI2D3.append(current3DEEI2)
EEI3D3.append(current3DEEI3)
EEI1D2.append(current2DEEI1)
EEI2D2.append(current2DEEI2)
EEMisesD3.append(current3DEEMises)
EEaverD3.append(current3DEEaver)
EEMisesD2.append(current2DEEMises)
EEaverD2.append(current2DEEaver)
currentEErr = []
currentEEtt = []
currentEErt = []
current3DEE1 = []
current3DEE2 = []
current3DEE3 = []
current3DEEI1 = []
current3DEEI2 = []
current3DEEI3 = []
current3DEEMises = []
current3DEEaver = []
current2DEE1 = []
current2DEE2 = []
current2DEEI1 = []
current2DEEI2 = []
current2DEEMises = []
current2DEEaver = []
pathVariableName = 'pathRadius [mum]'
pathStartVariableName = 'startAngle [deg]'
pathEndVariableName = 'endAngle [deg]'
pathCoordinateName = 'angle [deg]'
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
circumferentialpathsStrainSheetnames.append(datasheetName)
numberOfCircumferentialpaths.append(len(pathVariables))
worksheet = circumferentialpathsStrainWorkbook.add_worksheet(datasheetName.decode('utf-8'))
for p, pathVariable in enumerate(pathVariables):
worksheet.write(0,p*25,pathVariableName,circumferentialpathsStrainstringFormat)
worksheet.write(1,p*25,pathVariable,circumferentialpathsStrainnumberFormatReduced)
worksheet.write(0,p*25+1,pathStartVariableName,circumferentialpathsStrainstringFormat)
worksheet.write(1,p*25+1,pathStartVariables[p],circumferentialpathsStrainnumberFormat)
worksheet.write(0,p*25+2,pathEndVariableName,circumferentialpathsStrainstringFormat)
worksheet.write(1,p*25+2,pathEndVariables[p],circumferentialpathsStrainnumberFormat)
worksheet.write(2,p*25,pathCoordinateName,circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+1,'Norm ' + pathCoordinateName,circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+2,'EExx [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+3,'EEyy [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+4,'EEzz [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+5,'EExy [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+6,'EEzx [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+7,'EEyz [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+8,'EErr [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+9,'EEtt [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+10,'EErt [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+11,'EE1_3D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+12,'EE2_3D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+13,'EE3_3D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+14,'EE1_2D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+15,'EE2_2D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+16,'EEmises_3D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+17,'EEmises_2D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+18,'EEaverage_3D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+19,'EEaverage_2D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+20,'EEI1_3D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+21,'EEI2_3D [(mum/mum)^2]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+22,'EEI3_3D [(mum/mum)^3]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+23,'EEI1_2D [mum/mum]',circumferentialpathsStrainstringFormat)
worksheet.write(2,p*25+24,'EEI2_2D [(mum/mum)^2]',circumferentialpathsStrainstringFormat)
measureNum = np.min([len(EExx[p]),len(EEyy[p]),len(EEzz[p]),len(EExy[p])])
print(' number of path points = ' + str(measureNum))
for c in range(0,measureNum):
coord = pathCoords[p][c]
worksheet.write(3+c,p*25,coord,circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+1,pathNormCoords[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+2,EExx[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+3,EEyy[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+4,EEzz[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+5,EExy[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+6,0.0,circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+7,0.0,circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+8,EErr[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+9,EEtt[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+10,EErt[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+11,EE1D3[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+12,EE2D3[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+13,EE3D3[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+14,EE1D2[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+15,EE2D2[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+16,EEMisesD3[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+17,EEMisesD2[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+18,EEaverD3[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+19,EEaverD2[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+20,EEI1D3[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+21,EEI2D3[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+22,EEI3D3[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+23,EEI1D2[p][c],circumferentialpathsStrainnumberFormat)
worksheet.write(3+c,p*25+24,EEI2D2[p][c],circumferentialpathsStrainnumberFormat)
graphsheetName = 'Graphs, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
graphworksheet = circumferentialpathsStrainWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['EExx [mum/mum]','EEyy [mum/mum]','EEzz [mum/mum]','EExy [mum/mum]','EEzx [mum/mum]','EEyz [mum/mum]','EErr [mum/mum]','EEtt [mum/mum]','EErt [mum/mum]','EE1_3D [mum/mum]','EE2_3D [mum/mum]','EE3_3D [mum/mum]','EE1_2D [mum/mum]','EE2_2D [mum/mum]','EEmises_3D [mum/mum]','EEmises_2D [mum/mum]','EEaver_3D [mum/mum]','EEaver_2D [mum/mum]','EEI1_3D [mum/mum]','EEI2_3D [(mum/mum)^2]','EEI3_3D [(mum/mum)^3]','EEI1_2D [mum/mum]','EEI2_2D [(mum/mum)^2]']
#circumferentialpathsStrainDatalengths.append(len(pathCoords[p]))
for v,variableName in enumerate(variableNames):
chartA = circumferentialpathsStrainWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
#if v==0:
# circumferentialpathsStrainDatalengths.append(dataLength)
chartA.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p,dataLength,25*p],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = circumferentialpathsStrainWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.B')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
chartB.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p+1,dataLength,25*p+1],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
EExx = []
EEyy = []
EEzz = []
EExy = []
EEzx = []
EEyz = []
EErr = []
EEtt = []
EErt = []
EE1D3 = []
EE2D3 = []
EE3D3 = []
EEI1D3 = []
EEI2D3 = []
EEI3D3 = []
EEMisesD3 = []
EEaverD3 = []
EE1D2 = []
EE2D2 = []
EEI1D2 = []
EEI2D2 = []
EEMisesD2 = []
EEaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
print('<-----------------')
print('<-----------------')
if subFolder.split('/')[-1] + '-strainshorizontalpaths' + '.csv' in listdir(subFolder):
print(' Analysis of horizontal paths for folder ' + subFolder)
with open(horizontalpathsStrainSummary,'r') as csv:
lines = csv.readlines()
EExx = []
EEyy = []
EEzz = []
EExy = []
EEzx = []
EEyz = []
EErr = []
EEtt = []
EErt = []
EE1D3 = []
EE2D3 = []
EE3D3 = []
EEI1D3 = []
EEI2D3 = []
EEI3D3 = []
EEMisesD3 = []
EEaverD3 = []
EE1D2 = []
EE2D2 = []
EEI1D2 = []
EEI2D2 = []
EEMisesD2 = []
EEaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
for line in lines[1:]:
strainComp = line.replace('\n','').replace(' ','').split(',')[0]
pathVariable = float(line.replace('\n','').replace(' ','').split(',')[1])
pathStartVariable = float(line.replace('\n','').replace(' ','').split(',')[2])
pathEndVariable = float(line.replace('\n','').replace(' ','').split(',')[3])
datfilePath = join(subFolder,line.replace('\n','').replace(' ','').split(',')[-1])
print(' Reading component ' + strainComp)
print(' ' + 'for horizontal path at ' + str(pathVariable) + ' [mum]')
print(' ' + 'starting at ' + str(pathStartVariable) + ' mum')
print(' ' + 'ending at ' + str(pathEndVariable) + ' mum')
print(' ')
with open(datfilePath,'r') as dat:
datLines = dat.readlines()
currentxyData = []
for datLine in datLines:
if len(datLine.replace('\n','').replace(' ',''))>0 and 'X' not in datLine:
lineParts = datLine.replace('\n','').split(' ')
rowVec = []
for linePart in lineParts:
if linePart!='':
rowVec.append(float(linePart))
currentxyData.append(rowVec)
normxData = []
xData = []
yData = []
for xyPair in currentxyData:
normxData.append(xyPair[0])
xData.append(pathStartVariable+(pathEndVariable-pathStartVariable)*xyPair[0])
yData.append(xyPair[1])
if 'EE11' in strainComp:
EExx.append(yData)
pathCoords.append(xData)
pathNormCoords.append(normxData)
pathVariables.append(pathVariable)
pathStartVariables.append(pathStartVariable)
pathEndVariables.append(pathEndVariable)
print(' --> Strain component is EE11.')
print(' ')
elif 'EE22' in strainComp:
EEyy.append(yData)
print(' --> Strain component is EE22.')
print(' ')
elif 'EE23' in strainComp:
EEyz.append(yData)
print(' --> Strain component is EE23.')
print(' ')
elif 'EE12' in strainComp:
EExy.append(yData)
print(' --> Strain component is EE12.')
print(' ')
elif 'EE13' in strainComp:
EEzx.append(yData)
print(' --> Strain component is EE13.')
print(' ')
elif 'EE33' in strainComp:
EEzz.append(yData)
EEzx.append(0.0)
EEyz.append(0.0)
print(' --> Strain component is EE33.')
print(' ')
currentEExx = EExx[-1]
currentEEyy = EEyy[-1]
currentEEzz = yData
currentEExy = EExy[-1]
currentEEzx = 0.0
currentEEyz = 0.0
currentEErr = []
currentEEtt = []
currentEErt = []
current3DEE1 = []
current3DEE2 = []
current3DEE3 = []
current3DEEI1 = []
current3DEEI2 = []
current3DEEI3 = []
current3DEEMises = []
current3DEEaver = []
current2DEE1 = []
current2DEE2 = []
current2DEEI1 = []
current2DEEI2 = []
current2DEEMises = []
current2DEEaver = []
nstrainPoints = np.min([len(currentEExx),len(currentEEyy),len(currentEEzz),len(currentEExy)])
for s in range(0,nstrainPoints):
rotateBy = np.arctan2(pathVariable,pathCoords[-1][s])
cosRot = np.cos(rotateBy)
sinRot = np.sin(rotateBy)
eexx = currentEExx[s]
eeyy = currentEEyy[s]
eezz = currentEEzz[s]
eexy = currentEExy[s]
eezx = 0.0
eeyz = 0.0
eerr = eexx*cosRot*cosRot+eeyy*sinRot*sinRot+2*eexy*cosRot*sinRot
eett = eexx*sinRot*sinRot+eeyy*cosRot*cosRot-2*eexy*cosRot*sinRot
eert = -eexx*cosRot*sinRot+eeyy*cosRot*sinRot+eexy*(cosRot*cosRot-sinRot*sinRot)
currentEErr.append(eerr)
currentEEtt.append(eett)
currentEErt.append(eert)
eei1d2 = eexx + eeyy
eei1d3 = eexx + eeyy + eezz
eei2d2 = eexx*eeyy - eexy*eexy
eei2d3 = eexx*eeyy + eeyy*eezz + eexx*eezz - eexy*eexy - eeyz*eeyz - eezx*eezx
eei3d3 = eexx*eeyy*eezz - eexx*eeyz*eeyz - eeyy*eezx*eezx - eezz*eexy*eexy + 2*eexy*eeyz*eezx
eeaverd2 = eei1d2/2.0
eeaverd3 = eei1d3/3.0
eemises2d = np.sqrt(eexx*eexx + eeyy*eeyy - eexx*eeyy + 3*eexy*eexy)
eemises3d = np.sqrt(eexx*eexx + eeyy*eeyy + eezz*eezz - eexx*eeyy - eeyy*eezz - eexx*eezz + 3*(eexy*eexy + eeyz*eeyz + eezx*eezx))
ee1d2 = 0.5*(eexx+eeyy)+np.sqrt((0.5*(eexx-eeyy))*(0.5*(eexx-eeyy))+eexy*eexy)
ee2d2 = 0.5*(eexx+eeyy)-np.sqrt((0.5*(eexx-eeyy))*(0.5*(eexx-eeyy))+eexy*eexy)
try:
princOrient = np.arccos((2*eei1d3*eei1d3*eei1d3-9*eei1d3*eei2d3+27*eei3d3)/(2*np.sqrt((eei1d3*eei1d3-3*eei2d3)*(eei1d3*eei1d3-3*eei2d3)*(eei1d3*eei1d3-3*eei2d3))))/3.0
ee1d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient)/3.0
ee2d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient-2*np.pi/3.0)/3.0
ee3d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient-4*np.pi/3.0)/3.0
except Exception:
ee1d3 = ee1d2
ee2d3 = ee2d2
ee3d3 = 0.0
current3DEE1.append(ee1d3)
current3DEE2.append(ee2d3)
current3DEE3.append(ee2d3)
current3DEEI1.append(eei1d3)
current3DEEI2.append(eei2d3)
current3DEEI3.append(eei3d3)
current3DEEMises.append(eemises3d)
current3DEEaver.append(eeaverd3)
current2DEE1.append(ee1d2)
current2DEE2.append(ee2d2)
current2DEEI1.append(eei1d2)
current2DEEI2.append(eei2d2)
current2DEEMises.append(eemises2d)
current2DEEaver.append(eeaverd2)
EErr.append(currentEErr)
EEtt.append(currentEEtt)
EErt.append(currentEErt)
EE1D3.append(current3DEE1)
EE2D3.append(current3DEE2)
EE3D3.append(current3DEE3)
EE1D2.append(current2DEE1)
EE2D2.append(current2DEE2)
EEI1D3.append(current3DEEI1)
EEI2D3.append(current3DEEI2)
EEI3D3.append(current3DEEI3)
EEI1D2.append(current2DEEI1)
EEI2D2.append(current2DEEI2)
EEMisesD3.append(current3DEEMises)
EEaverD3.append(current3DEEaver)
EEMisesD2.append(current2DEEMises)
EEaverD2.append(current2DEEaver)
currentEErr = []
currentEEtt = []
currentEErt = []
current3DEE1 = []
current3DEE2 = []
current3DEE3 = []
current3DEEI1 = []
current3DEEI2 = []
current3DEEI3 = []
current3DEEMises = []
current3DEEaver = []
current2DEE1 = []
current2DEE2 = []
current2DEEI1 = []
current2DEEI2 = []
current2DEEMises = []
current2DEEaver = []
pathVariableName = 'y [mum]'
pathStartVariableName = 'xi [mum]'
pathEndVariableName = 'xf [mum]'
pathCoordinateName = 'x [mum]'
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
horizontalpathsStrainSheetnames.append(datasheetName)
numberOfHorizontalpaths.append(len(pathVariables))
worksheet = horizontalpathsStrainWorkbook.add_worksheet(datasheetName.decode('utf-8'))
for p, pathVariable in enumerate(pathVariables):
worksheet.write(0,p*25,pathVariableName,horizontalpathsStrainstringFormat)
worksheet.write(1,p*25,pathVariable,horizontalpathsStrainnumberFormatReduced)
worksheet.write(0,p*25+1,pathStartVariableName,horizontalpathsStrainstringFormat)
worksheet.write(1,p*25+1,pathStartVariables[p],horizontalpathsStrainnumberFormat)
worksheet.write(0,p*25+2,pathEndVariableName,horizontalpathsStrainstringFormat)
worksheet.write(1,p*25+2,pathEndVariables[p],horizontalpathsStrainnumberFormat)
worksheet.write(2,p*25,pathCoordinateName,horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+1,'Norm ' + pathCoordinateName,horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+2,'EExx [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+3,'EEyy [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+4,'EEzz [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+5,'EExy [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+6,'EEzx [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+7,'EEyz [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+8,'EErr [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+9,'EEtt [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+10,'EErt [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+11,'EE1_3D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+12,'EE2_3D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+13,'EE3_3D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+14,'EE1_2D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+15,'EE2_2D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+16,'EEmises_3D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+17,'EEmises_2D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+18,'EEaverage_3D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+19,'EEaverage_2D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+20,'EEI1_3D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+21,'EEI2_3D [(mum/mum)^2]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+22,'EEI3_3D [(mum/mum)^3]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+23,'EEI1_2D [mum/mum]',horizontalpathsStrainstringFormat)
worksheet.write(2,p*25+24,'EEI2_2D [(mum/mum)^2]',horizontalpathsStrainstringFormat)
measureNum = np.min([len(EExx[p]),len(EEyy[p]),len(EEzz[p]),len(EExy[p])])
print(' number of path points = ' + str(measureNum))
for c in range(0,measureNum):
coord = pathCoords[p][c]
worksheet.write(3+c,p*25,coord,horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+1,pathNormCoords[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+2,EExx[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+3,EEyy[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+4,EEzz[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+5,EExy[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+6,0.0,horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+7,0.0,horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+8,EErr[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+9,EEtt[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+10,EErt[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+11,EE1D3[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+12,EE2D3[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+13,EE3D3[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+14,EE1D2[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+15,EE2D2[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+16,EEMisesD3[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+17,EEMisesD2[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+18,EEaverD3[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+19,EEaverD2[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+20,EEI1D3[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+21,EEI2D3[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+22,EEI3D3[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+23,EEI1D2[p][c],horizontalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+24,EEI2D2[p][c],horizontalpathsStrainnumberFormat)
graphsheetName = 'Graphs, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
graphworksheet = horizontalpathsStrainWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['EExx [mum/mum]','EEyy [mum/mum]','EEzz [mum/mum]','EExy [mum/mum]','EEzx [mum/mum]','EEyz [mum/mum]','EErr [mum/mum]','EEtt [mum/mum]','EErt [mum/mum]','EE1_3D [mum/mum]','EE2_3D [mum/mum]','EE3_3D [mum/mum]','EE1_2D [mum/mum]','EE2_2D [mum/mum]','EEmises_3D [mum/mum]','EEmises_2D [mum/mum]','EEaver_3D [mum/mum]','EEaver_2D [mum/mum]','EEI1_3D [mum/mum]','EEI2_3D [(mum/mum)^2]','EEI3_3D [(mum/mum)^3]','EEI1_2D [mum/mum]','EEI2_2D [(mum/mum)^2]']
#horizontalpathsStrainDatalengths.append(len(pathCoords[p]))
for v,variableName in enumerate(variableNames):
chartA = horizontalpathsStrainWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
#if v==0:
# horizontalpathsStrainDatalengths.append(dataLength)
chartA.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p,dataLength,25*p],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = horizontalpathsStrainWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
chartB.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p+1,dataLength,25*p+1],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
EExx = []
EEyy = []
EEzz = []
EExy = []
EEzx = []
EEyz = []
EErr = []
EEtt = []
EErt = []
EE1D3 = []
EE2D3 = []
EE3D3 = []
EEI1D3 = []
EEI2D3 = []
EEI3D3 = []
EEMisesD3 = []
EEaverD3 = []
EE1D2 = []
EE2D2 = []
EEI1D2 = []
EEI2D2 = []
EEMisesD2 = []
EEaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
print('<-----------------')
print('<-----------------')
if subFolder.split('/')[-1] + '-strainsverticalpaths' + '.csv' in listdir(subFolder):
print(' Analysis of vertical paths for folder ' + subFolder)
with open(verticalpathsStrainSummary,'r') as csv:
lines = csv.readlines()
EExx = []
EEyy = []
EEzz = []
EExy = []
EEzx = []
EEyz = []
EErr = []
EEtt = []
EErt = []
EE1D3 = []
EE2D3 = []
EE3D3 = []
EEI1D3 = []
EEI2D3 = []
EEI3D3 = []
EEMisesD3 = []
EEaverD3 = []
EE1D2 = []
EE2D2 = []
EEI1D2 = []
EEI2D2 = []
EEMisesD2 = []
EEaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
for line in lines[1:]:
strainComp = line.replace('\n','').replace(' ','').split(',')[0]
pathVariable = float(line.replace('\n','').replace(' ','').split(',')[1])
pathStartVariable = float(line.replace('\n','').replace(' ','').split(',')[2])
pathEndVariable = float(line.replace('\n','').replace(' ','').split(',')[3])
datfilePath = join(subFolder,line.replace('\n','').replace(' ','').split(',')[-1])
print(' Reading component ' + strainComp)
print(' ' + 'for vertical path at ' + str(pathVariable) + ' mum')
print(' ' + 'starting at ' + str(pathStartVariable) + ' mum')
print(' ' + 'ending at ' + str(pathEndVariable) + ' mum')
print(' ')
with open(datfilePath,'r') as dat:
datLines = dat.readlines()
currentxyData = []
for datLine in datLines:
if len(datLine.replace('\n','').replace(' ',''))>0 and 'X' not in datLine:
lineParts = datLine.replace('\n','').split(' ')
rowVec = []
for linePart in lineParts:
if linePart!='':
rowVec.append(float(linePart))
currentxyData.append(rowVec)
normxData = []
xData = []
yData = []
for xyPair in currentxyData:
normxData.append(xyPair[0])
xData.append(pathStartVariable+(pathEndVariable-pathStartVariable)*xyPair[0])
yData.append(xyPair[1])
if 'EE11' in strainComp:
EExx.append(yData)
pathCoords.append(xData)
pathNormCoords.append(normxData)
pathVariables.append(pathVariable)
pathStartVariables.append(pathStartVariable)
pathEndVariables.append(pathEndVariable)
print(' --> Strain component is EE11.')
print(' ')
elif 'EE22' in strainComp:
EEyy.append(yData)
print(' --> Strain component is EE22.')
print(' ')
elif 'EE23' in strainComp:
EEyz.append(yData)
print(' --> Strain component is EE23.')
print(' ')
elif 'EE12' in strainComp:
EExy.append(yData)
print(' --> Strain component is EE12.')
print(' ')
elif 'EE13' in strainComp:
EEzx.append(yData)
print(' --> Strain component is EE13.')
print(' ')
elif 'EE33' in strainComp:
EEzz.append(yData)
EEzx.append(0.0)
EEyz.append(0.0)
print(' --> Strain component is EE33.')
print(' ')
currentEExx = EExx[-1]
currentEEyy = EEyy[-1]
currentEEzz = yData
currentEExy = EExy[-1]
currentEEzx = 0.0
currentEEyz = 0.0
currentEErr = []
currentEEtt = []
currentEErt = []
current3DEE1 = []
current3DEE2 = []
current3DEE3 = []
current3DEEI1 = []
current3DEEI2 = []
current3DEEI3 = []
current3DEEMises = []
current3DEEaver = []
current2DEE1 = []
current2DEE2 = []
current2DEEI1 = []
current2DEEI2 = []
current2DEEMises = []
current2DEEaver = []
nstrainPoints = np.min([len(currentEExx),len(currentEEyy),len(currentEEzz),len(currentEExy)])
for s in range(0,nstrainPoints):
rotateBy = np.arctan2(xData[s],pathVariable)
cosRot = np.cos(rotateBy)
sinRot = np.sin(rotateBy)
eexx = currentEExx[s]
eeyy = currentEEyy[s]
eezz = currentEEzz[s]
eexy = currentEExy[s]
eezx = 0.0
eeyz = 0.0
eerr = eexx*cosRot*cosRot+eeyy*sinRot*sinRot+2*eexy*cosRot*sinRot
eett = eexx*sinRot*sinRot+eeyy*cosRot*cosRot-2*eexy*cosRot*sinRot
eert = -eexx*cosRot*sinRot+eeyy*cosRot*sinRot+eexy*(cosRot*cosRot-sinRot*sinRot)
currentEErr.append(eerr)
currentEEtt.append(eett)
currentEErt.append(eert)
eei1d2 = eexx + eeyy
eei1d3 = eexx + eeyy + eezz
eei2d2 = eexx*eeyy - eexy*eexy
eei2d3 = eexx*eeyy + eeyy*eezz + eexx*eezz - eexy*eexy - eeyz*eeyz - eezx*eezx
eei3d3 = eexx*eeyy*eezz - eexx*eeyz*eeyz - eeyy*eezx*eezx - eezz*eexy*eexy + 2*eexy*eeyz*eezx
eeaverd2 = eei1d2/2.0
eeaverd3 = eei1d3/3.0
eemises2d = np.sqrt(eexx*eexx + eeyy*eeyy - eexx*eeyy + 3*eexy*eexy)
eemises3d = np.sqrt(eexx*eexx + eeyy*eeyy + eezz*eezz - eexx*eeyy - eeyy*eezz - eexx*eezz + 3*(eexy*eexy + eeyz*eeyz + eezx*eezx))
ee1d2 = 0.5*(eexx+eeyy)+np.sqrt((0.5*(eexx-eeyy))*(0.5*(eexx-eeyy))+eexy*eexy)
ee2d2 = 0.5*(eexx+eeyy)-np.sqrt((0.5*(eexx-eeyy))*(0.5*(eexx-eeyy))+eexy*eexy)
try:
princOrient = np.arccos((2*eei1d3*eei1d3*eei1d3-9*eei1d3*eei2d3+27*eei3d3)/(2*np.sqrt((eei1d3*eei1d3-3*eei2d3)*(eei1d3*eei1d3-3*eei2d3)*(eei1d3*eei1d3-3*eei2d3))))/3.0
ee1d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient)/3.0
ee2d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient-2*np.pi/3.0)/3.0
ee3d3 = eei1d3/3.0 + 2*np.sqrt(eei1d3*eei1d3-3*eei2d3)*np.cos(princOrient-4*np.pi/3.0)/3.0
except Exception:
ee1d3 = ee1d2
ee2d3 = ee2d2
ee3d3 = 0.0
current3DEE1.append(ee1d3)
current3DEE2.append(ee2d3)
current3DEE3.append(ee2d3)
current3DEEI1.append(eei1d3)
current3DEEI2.append(eei2d3)
current3DEEI3.append(eei3d3)
current3DEEMises.append(eemises3d)
current3DEEaver.append(eeaverd3)
current2DEE1.append(ee1d2)
current2DEE2.append(ee2d2)
current2DEEI1.append(eei1d2)
current2DEEI2.append(eei2d2)
current2DEEMises.append(eemises2d)
current2DEEaver.append(eeaverd2)
EErr.append(currentEErr)
EEtt.append(currentEEtt)
EErt.append(currentEErt)
EE1D3.append(current3DEE1)
EE2D3.append(current3DEE2)
EE3D3.append(current3DEE3)
EE1D2.append(current2DEE1)
EE2D2.append(current2DEE2)
EEI1D3.append(current3DEEI1)
EEI2D3.append(current3DEEI2)
EEI3D3.append(current3DEEI3)
EEI1D2.append(current2DEEI1)
EEI2D2.append(current2DEEI2)
EEMisesD3.append(current3DEEMises)
EEaverD3.append(current3DEEaver)
EEMisesD2.append(current2DEEMises)
EEaverD2.append(current2DEEaver)
currentEErr = []
currentEEtt = []
currentEErt = []
current3DEE1 = []
current3DEE2 = []
current3DEE3 = []
current3DEEI1 = []
current3DEEI2 = []
current3DEEI3 = []
current3DEEMises = []
current3DEEaver = []
current2DEE1 = []
current2DEE2 = []
current2DEEI1 = []
current2DEEI2 = []
current2DEEMises = []
current2DEEaver = []
pathVariableName = 'x [mum]'
pathStartVariableName = 'yi [mum]'
pathEndVariableName = 'yf [mum]'
pathCoordinateName = 'y [mum]'
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
verticalpathsStrainSheetnames.append(datasheetName)
numberOfVerticalpaths.append(len(pathVariables))
worksheet = verticalpathsStrainWorkbook.add_worksheet(datasheetName.decode('utf-8'))
for p, pathVariable in enumerate(pathVariables):
worksheet.write(0,p*25,pathVariableName,verticalpathsStrainstringFormat)
worksheet.write(1,p*25,pathVariable,verticalpathsStrainnumberFormatReduced)
worksheet.write(0,p*25+1,pathStartVariableName,verticalpathsStrainstringFormat)
worksheet.write(1,p*25+1,pathStartVariables[p],verticalpathsStrainnumberFormat)
worksheet.write(0,p*25+2,pathEndVariableName,verticalpathsStrainstringFormat)
worksheet.write(1,p*25+2,pathEndVariables[p],verticalpathsStrainnumberFormat)
worksheet.write(2,p*25,pathCoordinateName,verticalpathsStrainstringFormat)
worksheet.write(2,p*25+1,'Norm ' + pathCoordinateName,verticalpathsStrainstringFormat)
worksheet.write(2,p*25+2,'EExx [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+3,'EEyy [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+4,'EEzz [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+5,'EExy [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+6,'EEzx [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+7,'EEyz [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+8,'EErr [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+9,'EEtt [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+10,'EErt [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+11,'EE1_3D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+12,'EE2_3D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+13,'EE3_3D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+14,'EE1_2D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+15,'EE2_2D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+16,'EEmises_3D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+17,'EEmises_2D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+18,'EEaverage_3D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+19,'EEaverage_2D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+20,'EEI1_3D [mum/mum]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+21,'EEI2_3D [(mum/mum)^2]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+22,'EEI3_3D [mum/mum^3]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+23,'EEI1_2D [(mum/mum)]',verticalpathsStrainstringFormat)
worksheet.write(2,p*25+24,'EEI2_2D [(mum/mum)^2]',verticalpathsStrainstringFormat)
measureNum = np.min([len(EExx[p]),len(EEyy[p]),len(EEzz[p]),len(EExy[p])])
print(' number of path points = ' + str(measureNum))
for c in range(0,measureNum):
coord = pathCoords[p][c]
worksheet.write(3+c,p*25,coord,verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+1,pathNormCoords[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+2,EExx[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+3,EEyy[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+4,EEzz[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+5,EExy[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+6,0.0,verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+7,0.0,verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+8,EErr[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+9,EEtt[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+10,EErt[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+11,EE1D3[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+12,EE2D3[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+13,EE3D3[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+14,EE1D2[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+15,EE2D2[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+16,EEMisesD3[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+17,EEMisesD2[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+18,EEaverD3[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+19,EEaverD2[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+20,EEI1D3[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+21,EEI2D3[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+22,EEI3D3[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+23,EEI1D2[p][c],verticalpathsStrainnumberFormat)
worksheet.write(3+c,p*25+24,EEI2D2[p][c],verticalpathsStrainnumberFormat)
graphsheetName = 'Graphs, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
graphworksheet = verticalpathsStrainWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['EExx [mum/mum]','EEyy [mum/mum]','EEzz [mum/mum]','EExy [mum/mum]','EEzx [mum/mum]','EEyz [mum/mum]','EErr [mum/mum]','EEtt [mum/mum]','EErt [mum/mum]','EE1_3D [mum/mum]','EE2_3D [mum/mum]','EE3_3D [mum/mum]','EE1_2D [mum/mum]','EE2_2D [mum/mum]','EEmises_3D [mum/mum]','EEmises_2D [mum/mum]','EEaver_3D [mum/mum]','EEaver_2D [mum/mum]','EEI1_3D [mum/mum]','EEI2_3D [(mum/mum)^2]','EEI3_3D [(mum/mum)^3]','EEI1_2D [mum/mum]','EEI2_2D [(mum/mum)^2]']
for v,variableName in enumerate(variableNames):
chartA = verticalpathsStrainWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
if v==0:
verticalpathsStrainDatalengths.append(dataLength)
chartA.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p,dataLength,25*p],
'values': [datasheetName,3,25*p+2+v,dataLength,25*p+2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = verticalpathsStrainWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for p, pathVariable in enumerate(pathVariables):
dataLength = len(pathCoords[p])
chartB.add_series({
'name': pathVariableName + '=' + str(pathVariable),
'categories': [datasheetName,3,25*p+1,dataLength,25*p+1],
'values': [datasheetName,3,2+v,dataLength,2+v],
})
print(' Series ' + str(p+1) + ': ' + pathVariableName + '=' + str(pathVariable))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
EExx = []
EEyy = []
EEzz = []
EExy = []
EEzx = []
EEyz = []
EErr = []
EEtt = []
EErt = []
EE1D3 = []
EE2D3 = []
EE3D3 = []
EEI1D3 = []
EEI2D3 = []
EEI3D3 = []
EEMisesD3 = []
EEaverD3 = []
EE1D2 = []
EE2D2 = []
EEI1D2 = []
EEI2D2 = []
EEMisesD2 = []
EEaverD2 = []
pathVariables = []
pathStartVariables = []
pathEndVariables = []
pathCoords = []
pathNormCoords = []
print('<-----------------')
print('<-----------------')
pathVariableName = 'pathAngle [deg]'
pathCoordinateName = 'R [mum]'
for n in range(0,min(numberOfRadialpaths)):
graphsheetName = 'Graphs, path n. ' + str(n)
graphworksheet = radialpathsWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['Sxx [MPa]','Syy [MPa]','Szz [MPa]','Sxy [MPa]','Szx [MPa]','Syz [MPa]','Srr [MPa]','Stt [MPa]','Srt [MPa]','S1_3D [MPa]','S2_3D [MPa]','S3_3D [MPa]','S1_2D [MPa]','S2_2D [MPa]','Smises_3D [MPa]','Smises_2D [MPa]','Saverage_3D [MPa]','Saverage_2D [MPa]','I1_3D [MPa]','I2_3D [MPa^2]','I3_3D [MPa^3]','I1_2D [MPa]','I2_2D [MPa^2]']
for v,variableName in enumerate(variableNames):
chartA = radialpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
for s,subFolder in enumerate(subfoldersList):
dataLength = radialpathsDatalengths[s]
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
chartA.add_series({
'name': 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'),
'categories': [datasheetName,3,n*25,dataLength,n*25],
'values': [datasheetName,3,n*25+2+v,dataLength,n*25+2+v],
})
print(' Series ' + str(s+1) + ': ' + 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = radialpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.B')
print(' ')
for s,subFolder in enumerate(subfoldersList):
dataLength = radialpathsDatalengths[s]
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
chartB.add_series({
'name': 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'),
'categories': [datasheetName,3,n*25+1,dataLength,n*25+1],
'values': [datasheetName,3,n*25+2+v,dataLength,n*25+2+v],
})
print(' Series ' + str(s+1) + ': ' + 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
pathVariableName = 'pathRadius [mum]'
pathCoordinateName = 'angle [deg]'
for n in range(0,min(numberOfCircumferentialpaths)):
graphsheetName = 'Graphs, path n. ' + str(n)
graphworksheet = circumferentialpathsWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['Sxx [MPa]','Syy [MPa]','Szz [MPa]','Sxy [MPa]','Szx [MPa]','Syz [MPa]','Srr [MPa]','Stt [MPa]','Srt [MPa]','S1_3D [MPa]','S2_3D [MPa]','S3_3D [MPa]','S1_2D [MPa]','S2_2D [MPa]','Smises_3D [MPa]','Smises_2D [MPa]','Saverage_3D [MPa]','Saverage_2D [MPa]','I1_3D [MPa]','I2_3D [MPa^2]','I3_3D [MPa^3]','I1_2D [MPa]','I2_2D [MPa^2]']
for v,variableName in enumerate(variableNames):
chartA = circumferentialpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for s,subFolder in enumerate(subfoldersList):
dataLength = circumferentialpathsDatalengths[s]
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
chartA.add_series({
'name': 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'),
'categories': [datasheetName,3,n*25,dataLength,n*25],
'values': [datasheetName,3,n*25+2+v,dataLength,n*25+2+v],
})
print(' Series ' + str(s+1) + ': ' + 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = circumferentialpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.B')
print(' ')
for s,subFolder in enumerate(subfoldersList):
dataLength = circumferentialpathsDatalengths[s]
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
chartB.add_series({
'name': 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'),
'categories': [datasheetName,3,n*25+1,dataLength,n*25+1],
'values': [datasheetName,3,n*25+2+v,dataLength,n*25+2+v],
})
print(' Series ' + str(s+1) + ': ' + 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
pathVariableName = 'y [mum]'
pathCoordinateName = 'x [mum]'
for n in range(0,min(numberOfHorizontalpaths)):
graphsheetName = 'Graphs, path n. ' + str(n)
graphworksheet = horizontalpathsWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['Sxx [MPa]','Syy [MPa]','Szz [MPa]','Sxy [MPa]','Szx [MPa]','Syz [MPa]','Srr [MPa]','Stt [MPa]','Srt [MPa]','S1_3D [MPa]','S2_3D [MPa]','S3_3D [MPa]','S1_2D [MPa]','S2_2D [MPa]','Smises_3D [MPa]','Smises_2D [MPa]','Saverage_3D [MPa]','Saverage_2D [MPa]','I1_3D [MPa]','I2_3D [MPa^2]','I3_3D [MPa^3]','I1_2D [MPa]','I2_2D [MPa^2]']
for v,variableName in enumerate(variableNames):
chartA = horizontalpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for s,subFolder in enumerate(subfoldersList):
dataLength = horizontalpathsDatalengths[s]
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
chartA.add_series({
'name': 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'),
'categories': [datasheetName,3,n*25,dataLength,n*25],
'values': [datasheetName,3,n*25+2+v,dataLength,n*25+2+v],
})
print(' Series ' + str(s+1) + ': ' + 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = horizontalpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.B')
print(' ')
for s,subFolder in enumerate(subfoldersList):
dataLength = horizontalpathsDatalengths[s]
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
chartB.add_series({
'name': 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'),
'categories': [datasheetName,3,n*25+1,dataLength,n*25+1],
'values': [datasheetName,3,n*25+2+v,dataLength,n*25+2+v],
})
print(' Series ' + str(s+1) + ': ' + 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
pathVariableName = 'x [mum]'
pathCoordinateName = 'y [mum]'
for n in range(0,min(numberOfVerticalpaths)):
graphsheetName = 'Graphs, path n. ' + str(n)
graphworksheet = verticalpathsWorkbook.add_worksheet(graphsheetName.decode('utf-8'))
print(' --> Writing worksheet')
print(' ' + graphsheetName)
print(' ')
variableNames = ['Sxx [MPa]','Syy [MPa]','Szz [MPa]','Sxy [MPa]','Szx [MPa]','Syz [MPa]','Srr [MPa]','Stt [MPa]','Srt [MPa]','S1_3D [MPa]','S2_3D [MPa]','S3_3D [MPa]','S1_2D [MPa]','S2_2D [MPa]','Smises_3D [MPa]','Smises_2D [MPa]','Saverage_3D [MPa]','Saverage_2D [MPa]','I1_3D [MPa]','I2_3D [MPa^2]','I3_3D [MPa^3]','I1_2D [MPa]','I2_2D [MPa^2]']
for v,variableName in enumerate(variableNames):
chartA = verticalpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.A')
print(' ')
for s,subFolder in enumerate(subfoldersList):
dataLength = verticalpathsDatalengths[s]
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
chartA.add_series({
'name': 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'),
'categories': [datasheetName,3,n*25,dataLength,n*25],
'values': [datasheetName,3,n*25+2+v,dataLength,n*25+2+v],
})
print(' Series ' + str(s+1) + ': ' + 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'))
print(' ')
chartA.set_title ({'name': variableName + ' vs path coordinates'})
chartA.set_x_axis({'name': pathCoordinateName})
chartA.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + pathCoordinateName)
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,0,chartA)
chartB = verticalpathsWorkbook.add_chart({'type': 'scatter','subtype': 'straight_with_markers'})
print(' Chart ' + str(v+1) + '.B')
print(' ')
for s,subFolder in enumerate(subfoldersList):
dataLength = verticalpathsDatalengths[s]
datasheetName = 'Values, deltatheta=' + subFolder.split('deltatheta')[-1].replace('_','.')
chartB.add_series({
'name': 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'),
'categories': [datasheetName,3,n*25+1,dataLength,n*25+1],
'values': [datasheetName,3,n*25+2+v,dataLength,n*25+2+v],
})
print(' Series ' + str(s+1) + ': ' + 'deltatheta' + '=' + subFolder.split('deltatheta')[-1].replace('_','.'))
print(' ')
chartB.set_title ({'name': variableName + ' vs normalized path coordinates'})
chartB.set_x_axis({'name': 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]'})
chartB.set_y_axis({'name': variableName})
print(' Title: ' + variableName + ' vs path coordinates')
print(' x axis: ' + 'Norm ' + pathCoordinateName.split(' ')[0] + ' [-]')
print(' y axis: ' + variableName)
print(' ')
print(' ')
graphworksheet.insert_chart(v*20,30,chartB)
print(' Close workbook ' + join(outdir,outputfileBasename + '-radialpathsData' + '.xlsx'))
radialpathsWorkbook.close()
print('Workbook closed.')
print(' Close workbook ' + join(outdir,outputfileBasename + '-circumferentialpathsData' + '.xlsx'))
circumferentialpathsWorkbook.close()
print('Workbook closed.')
print(' Close workbook ' + join(outdir,outputfileBasename + '-horizontalpathsData' + '.xlsx'))
horizontalpathsWorkbook.close()
print('Workbook closed.')
print(' Close workbook ' + join(outdir,outputfileBasename + '-verticalpathsData' + '.xlsx'))
verticalpathsWorkbook.close()
print('Workbook closed.')
print(' Close workbook ' + join(outdir,outputfileBasename + '-radialpathsStrainData' + '.xlsx'))
radialpathsStrainWorkbook.close()
print('Workbook closed.')
print(' Close workbook ' + join(outdir,outputfileBasename + '-circumferentialpathsStrainData' + '.xlsx'))
circumferentialpathsStrainWorkbook.close()
print('Workbook closed.')
print(' Close workbook ' + join(outdir,outputfileBasename + '-horizontalpathsStrainData' + '.xlsx'))
horizontalpathsStrainWorkbook.close()
print('Workbook closed.')
print(' Close workbook ' + join(outdir,outputfileBasename + '-verticalpathsStrainData' + '.xlsx'))
verticalpathsStrainWorkbook.close()
print('Workbook closed.')
if toLatex: # only for errts file
if not os.path.exists(join(reportFolder,'pics')):
os.mkdir(join(reportFolder,'pics'))
copyfile(join('D:/01_Luca/06_WD/thinPlyMechanics/tex/Templates/Template_reports','Docmase_logo.jpg'),join(reportFolder,'pics','Docmase_logo.jpg'))
copyfile(join('D:/01_Luca/06_WD/thinPlyMechanics/tex/Templates/Template_reports','erasmusmundus_logo.jpg'),join(reportFolder,'pics','erasmusmundus_logo.jpg'))
copyfile(join('D:/01_Luca/06_WD/thinPlyMechanics/tex/Templates/Template_slides','logo-eeigm.jpg'),join(reportFolder,'pics','logo-eeigm.jpg'))
copyfile(join('D:/01_Luca/06_WD/thinPlyMechanics/tex/Templates/Template_reports','lulea_logo1.jpg'),join(reportFolder,'pics','lulea_logo1.jpg'))
matrixProps = provideMatrixProperties()
G0meanstress = []
G0planestrainstress = []
G0planestrainstressharmonic = []
G0planestrainstressrve = []
G0meanstressharmonic = []
G0meanstressrve = []
G0strain = []
G0strainharmonic = []
G0strainrve = []
GIvcctonly = []
GIIvcctonly = []
GTOTvcctonly = []
GIvcctjint = []
GIIvcctjint = []
GTOTvcctjint = []
deltatheta = []
LoverRf = []
Vff = []
phiCZ = []
Y0atbound = []
sigmaXXatbound = []
sigmaZZatbound = []
sigmaXZatbound = []
currentG0meanstress = []
currentG0planestrainstress = []
currentG0planestrainstressharmonic = []
currentG0planestrainstressrve = []
currentG0meanstressharmonic = []
currentG0meanstressrve = []
currentG0strain = []
currentG0strainharmonic = []
currentG0strainrve = []
currentGIvcctonly = []
currentGIIvcctonly = []
currentGTOTvcctonly = []
currentGIvcctjint = []
currentGIIvcctjint = []
currentGTOTvcctjint = []
currentdeltatheta = []
currentLoverRf = []
currentVff = []
currentphiCZ = []
currentY0atbound = []
currentsigmaXXatbound = []
currentsigmaZZatbound = []
currentsigmaXZatbound = []
for line in lines[1:]:
csvPath = line.replace('\n','').split(',')[1]
inputdataPath = '_'.join(line.replace('\n','').split(',')[1].split('_')[:-1]) + '_InputData' + '.csv'
dataType = line.replace('\n','').split(',')[0]
try:
with open(csvPath,'r') as csv:
csvlines = csv.readlines()
except Exception,error:
continue
sys.exc_clear()
try:
with open(inputdataPath,'r') as csv:
inputdatalines = csv.readlines()
except Exception,error:
continue
sys.exit(2)
if 'ERRT' in dataType or 'ERRTS' in dataType or 'ERRTs' in dataType or 'errt' in dataType or 'errts' in dataType:
epsxx = float(inputdatalines[1].replace('\n','').split(',')[5])
Rf = float(inputdatalines[1].replace('\n','').split(',')[0])
for c,csvline in enumerate(csvlines[1:]):
values = csvline.replace('\n','').split(',')
if len(currentLoverRf)>0:
if float(values[3])!=currentLoverRf[-1]:
G0meanstress.append(currentG0meanstress)
G0planestrainstress.append(currentG0planestrainstress)
G0planestrainstressharmonic.append(currentG0planestrainstressharmonic)
G0planestrainstressrve.append(currentG0planestrainstressrve)
G0meanstressharmonic.append(currentG0meanstressharmonic)
G0meanstressrve.append(currentG0meanstressrve)
G0strain.append(currentG0strain)
G0strainharmonic.append(currentG0strainharmonic)
G0strainrve.append(currentG0strainrve)
GIvcctonly.append(currentGIvcctonly)
GIIvcctonly.append(currentGIIvcctonly)
GTOTvcctonly.append(currentGTOTvcctonly)
GIvcctjint.append(currentGIvcctjint)
GIIvcctjint.append(currentGIIvcctjint)
GTOTvcctjint.append(currentGTOTvcctjint)
deltatheta.append(currentdeltatheta)
LoverRf.append(currentLoverRf)
Vff.append(currentVff)
phiCZ.append(currentphiCZ)
currentG0stress = []
currentG0strain = []
currentGIvcctonly = []
currentGIIvcctonly = []
currentGTOTvcctonly = []
currentGIvcctjint = []
currentGIIvcctjint = []
currentGTOTvcctjint = []
currentdeltatheta = []
currentLoverRf = []
currentVff = []
currentphiCZ = []
currentG0meanstress.append(float(values[5]))
currentG0planestrainstress.append(np.pi*Rf*(matrixProps['E']*epsxx/(1-matrixProps['nu']*matrixProps['nu']))*(matrixProps['E']*epsxx/(1-matrixProps['nu']*matrixProps['nu']))*(1+matrixProps['k-planestrain'])/(8.0*matrixProps['G']))
currentG0planestrainstressharmonic.append()
currentG0planestrainstressrve.append()
currentG0meanstressharmonic.append()
currentG0meanstressrve.append()
currentG0strain.append(np.pi*Rf*(matrixProps['E']/(1-matrixProps['nu']*matrixProps['nu']))*epsxx*epsxx)
currentG0strainharmonic.append()
currentG0strainrve.append()
currentGIvcctonly.append(float(values[13]))
currentGIIvcctonly.append(float(values[14]))
currentGTOTvcctonly.append(float(values[15]))
currentGIvcctjint.append(float(values[16]))
currentGIIvcctjint.append(float(values[17]))
currentGTOTvcctjint.append(float(values[18]))
currentdeltatheta.append(float(values[0]))
currentLoverRf.append(float(values[3]))
currentVff.append(0.25*np.pi/float(values[3]))
currentphiCZ.append(float(values[4]))
elif 'stressesatboundary' in dataType or 'StressesAtBoundary' in dataType or 'stresses-at-boundary' in dataType or 'Stresses-At-Boundary' in dataType:
for c,csvline in enumerate(csvlines[1:]):
values = csvline.replace('\n','').split(',')
currentY0atbound.append(values[1])
currentsigmaXXatbound.append(values[4])
currentsigmaZZatbound.append(values[5])
currentsigmaXZatbound.append(values[7])
for s,valueSet in enumerate(GIvcctonly):
currentVff = Vff[s][0]
currentLoverRf = LoverRf[s][0]
debondSize = np.array(deltatheta[s])
CZsize = np.array(phiCZ[s])
GI = [np.array(GIvcctonly[s]),np.array(GIvcctjint[s])]
GII = [np.array(GIIvcctonly[s]),np.array(GIIvcctjint[s])]
GTOT = [np.array(GTOTvcctonly[s]),np.array(GTOTvcctjint[s])]
gMethod = ['VCCT only','VCCT/J-integral']
G0s = [G0meanstress[s],G0planestrainstress[s],G0strain[s]]
legendEntries = '{$GI/G0-FEM$,$GII/G0-FEM$,$GTOT/G0-FEM$,$GI/G0-BEM$,$GII/G0-BEM$,$GTOT/G0-BEM$}'
dataoptions = ['red,smooth,mark=square*',
'red,smooth,mark=triangle*',
'red,smooth,mark=*',
'black,smooth,mark=square*',
'black,smooth,mark=triangle*',
'black,smooth,mark=*',]
bemDSize = BEMdata['normGs'][:,0]
bemGI = BEMdata['normGs'][:,1]
bemGII = BEMdata['normGs'][:,2]
bemGTOT = BEMdata['normGs'][:,3]
for m,method in enumerate(gMethod):
titles = ['\\bf{Normalized Energy Release Rate, '+method+', $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{1+k_{m}}{8G_{m}}\\pi R_{f}\\left(\\frac{1}{2L}\\int_{-L}^{+L}\\sigma_{xx}\\left(L,z\\right)dz\\right)^{2}$}',
'\\bf{Normalized Energy Release Rate, '+method+', $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{1+k_{m}}{8G_{m}}\\pi R_{f}\\left(\\frac{E_{m}}{1-\\nu^{2}}\\varepsilon_{xx}\\right)^{2}$}',
'\\bf{Normalized Energy Release Rate, '+method+', $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{E_{m}}{1-\\nu^{2}}\\pi R_{f}\\varepsilon_{xx}^{2}$}']
fileoptionName = ['G0-mean-stress',
'G0-plane-strain-stress',
'G0-strain']
for g,G0 in enumerate(G0s):
normGI = GI[m]/GO
normGII = GII[m]/GO
normGTOT = GTOT[m]/GO
xyData = []
xyData.append(np.transpose(np.array([debondSize,normGI])))
xyData.append(np.transpose(np.array([debondSize,normGII])))
xyData.append(np.transpose(np.array([debondSize,normGTOT])))
xyData.append(np.transpose(np.array([bemDSize,bemGI])))
xyData.append(np.transpose(np.array([bemDSize,bemGII])))
xyData.append(np.transpose(np.array([bemDSize,bemGTOT])))
axisoptions = 'width=30cm,\n ' \
'title={'+titles[g]+'},\n ' \
'title style={font=\\fontsize{40}{8}\\selectfont},\n ' \
'xlabel style={at={(axis description cs:0.5,-0.02)},anchor=north,font=\\fontsize{44}{40}\\selectfont},\n ' \
'ylabel style={at={(axis description cs:-0.025,.5)},anchor=south,font=\\fontsize{44}{40}\\selectfont},\n ' \
'xlabel={$\\Delta\\theta\\left[^{\\circ}\\right]$},ylabel={$\\frac{G_{\\left(\\cdot\\cdot\\right)}}{G_{0}}\\left[-\\right]$},\n ' \
'xmin=' + str(0.0) + ',\n ' \
'xmax=' + str(160.0) + ',\n ' \
'ymin=' + str(0.0) + ',\n ' \
'ymax=' + str([np.max(normGTOT),np.max(bemGTOT)]) + ',\n ' \
'tick align=outside,\n ' \
'tick label style={font=\\huge},\n ' \
'xmajorgrids,\n ' \
'xtick={0.0,10.0,20.0,30.0,40.0,50.0,60.0,70.0,80.0,90.0,100.0,110.0,120.0,130.0,140.0,150.0,160.0,170.0,180.0},\n ' \
'x grid style={lightgray!92.026143790849673!black},\n ' \
'ymajorgrids,\n ' \
'y grid style={lightgray!92.026143790849673!black},\n ' \
'line width=0.5mm,\n ' \
'legend style={draw=white!80.0!black,font=\\fontsize{28}{24}\\selectfont,row sep=15pt},\n ' \
'legend entries={' + legendEntries + '},\n ' \
'legend image post style={xscale=2},\n ' \
'legend cell align={left}'
writeLatexMultiplePlots(outDir,'Gs-SUMMARY_Vff'+str(currentVff)+'-'+method.replace(' ','-').replace('/','-')+'-'+fileoptionName[g]+'.tex',xyData,axisoptions,dataoptions)
titles = ['\\bf{Normalized Mode I Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{1+k_{m}}{8G_{m}}\\pi R_{f}\\left(\\frac{1}{2L}\\int_{-L}^{+L}\\sigma_{xx}\\left(L,z\\right)dz\\right)^{2}$}',
'\\bf{Normalized Mode I Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{1+k_{m}}{8G_{m}}\\pi R_{f}\\left(\\frac{E_{m}}{1-\\nu^{2}}\\varepsilon_{xx}\\right)^{2}$}',
'\\bf{Normalized Mode I Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{E_{m}}{1-\\nu^{2}}\\pi R_{f}\\varepsilon_{xx}^{2}$}']
fileoptionName = ['G0-mean-stress',
'G0-plane-strain-stress',
'G0-strain']
legendEntries = '{$GI/G0-FEM,VCCT only$,$GI/G0-FEM,VCCT/J-integral$,$GI/G0-BEM$}'
dataoptions = ['red,smooth,mark=square*',
'blue,smooth,mark=square*',
'black,smooth,mark=square*']
for g,G0 in enumerate(G0s):
xyData = []
for m,method in enumerate(gMethod):
normGI = GI[m]/GO
xyData.append(np.transpose(np.array([debondSize,normGI])))
xyData.append(np.transpose(np.array([bemDSize,bemGI])))
axisoptions = 'width=30cm,\n ' \
'title={'+titles[g]+'},\n ' \
'title style={font=\\fontsize{40}{8}\\selectfont},\n ' \
'xlabel style={at={(axis description cs:0.5,-0.02)},anchor=north,font=\\fontsize{44}{40}\\selectfont},\n ' \
'ylabel style={at={(axis description cs:-0.025,.5)},anchor=south,font=\\fontsize{44}{40}\\selectfont},\n ' \
'xlabel={$\\Delta\\theta\\left[^{\\circ}\\right]$},ylabel={$\\frac{G_{I}}{G_{0}}\\left[-\\right]$},\n ' \
'xmin=' + str(0.0) + ',\n ' \
'xmax=' + str(160.0) + ',\n ' \
'ymin=' + str(0.0) + ',\n ' \
'ymax=' + str(np.max([np.max(xyData[0][:,1]),np.max(xyData[1][:,1]),np.max(xyData[2][:,1])])) + ',\n ' \
'tick align=outside,\n ' \
'tick label style={font=\\huge},\n ' \
'xmajorgrids,\n ' \
'xtick={0.0,10.0,20.0,30.0,40.0,50.0,60.0,70.0,80.0,90.0,100.0,110.0,120.0,130.0,140.0,150.0,160.0,170.0,180.0},\n ' \
'x grid style={lightgray!92.026143790849673!black},\n ' \
'ymajorgrids,\n ' \
'y grid style={lightgray!92.026143790849673!black},\n ' \
'line width=0.5mm,\n ' \
'legend style={draw=white!80.0!black,font=\\fontsize{28}{24}\\selectfont,row sep=15pt},\n ' \
'legend entries={' + legendEntries + '},\n ' \
'legend image post style={xscale=2},\n ' \
'legend cell align={left}'
writeLatexMultiplePlots(outDir,'GI-Method-Comparison_Vff'+str(currentVff)+'-'+fileoptionName[g]+'.tex',xyData,axisoptions,dataoptions)
titles = ['\\bf{Normalized Mode II Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{1+k_{m}}{8G_{m}}\\pi R_{f}\\left(\\frac{1}{2L}\\int_{-L}^{+L}\\sigma_{xx}\\left(L,z\\right)dz\\right)^{2}$}',
'\\bf{Normalized Mode II Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{1+k_{m}}{8G_{m}}\\pi R_{f}\\left(\\frac{E_{m}}{1-\\nu^{2}}\\varepsilon_{xx}\\right)^{2}$}',
'\\bf{Normalized Mode II Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{E_{m}}{1-\\nu^{2}}\\pi R_{f}\\varepsilon_{xx}^{2}$}']
fileoptionName = ['G0-mean-stress',
'G0-plane-strain-stress',
'G0-strain']
legendEntries = '{$GII/G0-FEM,VCCT only$,$GII/G0-FEM,VCCT/J-integral$,$GII/G0-BEM$}'
dataoptions = ['red,smooth,mark=triangle*',
'blue,smooth,mark=triangle*',
'black,smooth,mark=triangle*']
for g,G0 in enumerate(G0s):
xyData = []
for m,method in enumerate(gMethod):
normGII = GII[m]/GO
xyData.append(np.transpose(np.array([debondSize,normGII])))
xyData.append(np.transpose(np.array([bemDSize,bemGII])))
axisoptions = 'width=30cm,\n ' \
'title={'+titles[g]+'},\n ' \
'title style={font=\\fontsize{40}{8}\\selectfont},\n ' \
'xlabel style={at={(axis description cs:0.5,-0.02)},anchor=north,font=\\fontsize{44}{40}\\selectfont},\n ' \
'ylabel style={at={(axis description cs:-0.025,.5)},anchor=south,font=\\fontsize{44}{40}\\selectfont},\n ' \
'xlabel={$\\Delta\\theta\\left[^{\\circ}\\right]$},ylabel={$\\frac{G_{II}}{G_{0}}\\left[-\\right]$},\n ' \
'xmin=' + str(0.0) + ',\n ' \
'xmax=' + str(160.0) + ',\n ' \
'ymin=' + str(0.0) + ',\n ' \
'ymax=' + str(np.max([np.max(xyData[0][:,1]),np.max(xyData[1][:,1]),np.max(xyData[2][:,1])])) + ',\n ' \
'tick align=outside,\n ' \
'tick label style={font=\\huge},\n ' \
'xmajorgrids,\n ' \
'xtick={0.0,10.0,20.0,30.0,40.0,50.0,60.0,70.0,80.0,90.0,100.0,110.0,120.0,130.0,140.0,150.0,160.0,170.0,180.0},\n ' \
'x grid style={lightgray!92.026143790849673!black},\n ' \
'ymajorgrids,\n ' \
'y grid style={lightgray!92.026143790849673!black},\n ' \
'line width=0.5mm,\n ' \
'legend style={draw=white!80.0!black,font=\\fontsize{28}{24}\\selectfont,row sep=15pt},\n ' \
'legend entries={' + legendEntries + '},\n ' \
'legend image post style={xscale=2},\n ' \
'legend cell align={left}'
writeLatexMultiplePlots(outDir,'GII-Method-Comparison_Vff'+str(currentVff)+'-'+fileoptionName[g]+'.tex',xyData,axisoptions,dataoptions)
titles = ['\\bf{Normalized Total Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{1+k_{m}}{8G_{m}}\\pi R_{f}\\left(\\frac{1}{2L}\\int_{-L}^{+L}\\sigma_{xx}\\left(L,z\\right)dz\\right)^{2}$}',
'\\bf{Normalized Total Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{1+k_{m}}{8G_{m}}\\pi R_{f}\\left(\\frac{E_{m}}{1-\\nu^{2}}\\varepsilon_{xx}\\right)^{2}$}',
'\\bf{Normalized Total Energy Release Rate, $Vf_{f}='+str(currentVff)+'$, $\\frac{L}{R_{f}}='+str(currentLoverRf)+'$, $G_{0}=\\frac{E_{m}}{1-\\nu^{2}}\\pi R_{f}\\varepsilon_{xx}^{2}$}']
fileoptionName = ['G0-mean-stress',
'G0-plane-strain-stress',
'G0-strain']
legendEntries = '{$GTOT/G0-FEM,VCCT only$,$GTOT/G0-FEM,VCCT/J-integral$,$GTOT/G0-BEM$}'
dataoptions = ['red,smooth,mark=*',
'blue,smooth,mark=*',
'black,smooth,mark=*']
for g,G0 in enumerate(G0s):
xyData = []
for m,method in enumerate(gMethod):
normGTOT = GTOT[m]/GO
xyData.append(np.transpose(np.array([debondSize,normGTOT])))
xyData.append(np.transpose(np.array([bemDSize,bemGTOT])))
axisoptions = 'width=30cm,\n ' \
'title={'+titles[g]+'},\n ' \
'title style={font=\\fontsize{40}{8}\\selectfont},\n ' \
'xlabel style={at={(axis description cs:0.5,-0.02)},anchor=north,font=\\fontsize{44}{40}\\selectfont},\n ' \
'ylabel style={at={(axis description cs:-0.025,.5)},anchor=south,font=\\fontsize{44}{40}\\selectfont},\n ' \
'xlabel={$\\Delta\\theta\\left[^{\\circ}\\right]$},ylabel={$\\frac{G_{TOT}}{G_{0}}\\left[-\\right]$},\n ' \
'xmin=' + str(0.0) + ',\n ' \
'xmax=' + str(160.0) + ',\n ' \
'ymin=' + str(0.0) + ',\n ' \
'ymax=' + str(np.max([np.max(xyData[0][:,1]),np.max(xyData[1][:,1]),np.max(xyData[2][:,1])])) + ',\n ' \
'tick align=outside,\n ' \
'tick label style={font=\\huge},\n ' \
'xmajorgrids,\n ' \
'xtick={0.0,10.0,20.0,30.0,40.0,50.0,60.0,70.0,80.0,90.0,100.0,110.0,120.0,130.0,140.0,150.0,160.0,170.0,180.0},\n ' \
'x grid style={lightgray!92.026143790849673!black},\n ' \
'ymajorgrids,\n ' \
'y grid style={lightgray!92.026143790849673!black},\n ' \
'line width=0.5mm,\n ' \
'legend style={draw=white!80.0!black,font=\\fontsize{28}{24}\\selectfont,row sep=15pt},\n ' \
'legend entries={' + legendEntries + '},\n ' \
'legend image post style={xscale=2},\n ' \
'legend cell align={left}'
writeLatexMultiplePlots(outDir,'GTOT-Method-Comparison_Vff'+str(currentVff)+'-'+fileoptionName[g]+'.tex',xyData,axisoptions,dataoptions)
legendEntries = '{$Contact zone size$}'
dataoptions = ['blue,smooth,mark=*']
xyData = []
xyData.append(np.transpose(np.array([debondSize,CZsize])))
axisoptions = 'width=30cm,\n ' \
'title={Contact zone size as function of debond size},\n ' \
'title style={font=\\fontsize{40}{8}\\selectfont},\n ' \
'xlabel style={at={(axis description cs:0.5,-0.02)},anchor=north,font=\\fontsize{44}{40}\\selectfont},\n ' \
'ylabel style={at={(axis description cs:-0.025,.5)},anchor=south,font=\\fontsize{44}{40}\\selectfont},\n ' \
'xlabel={$\\Delta\\theta\\left[^{\\circ}\\right]$},ylabel={$\\Delta\\varphi\\left[^{\\circ}\\right]$},\n ' \
'xmin=' + str(0.0) + ',\n ' \
'xmax=' + str(160.0) + ',\n ' \
'ymin=' + str(0.0) + ',\n ' \
'ymax=' + str(np.max(CZsize)) + ',\n ' \
'tick align=outside,\n ' \
'tick label style={font=\\huge},\n ' \
'xmajorgrids,\n ' \
'xtick={0.0,10.0,20.0,30.0,40.0,50.0,60.0,70.0,80.0,90.0,100.0,110.0,120.0,130.0,140.0,150.0,160.0,170.0,180.0},\n ' \
'x grid style={lightgray!92.026143790849673!black},\n ' \
'ymajorgrids,\n ' \
'y grid style={lightgray!92.026143790849673!black},\n ' \
'line width=0.5mm,\n ' \
'legend style={draw=white!80.0!black,font=\\fontsize{28}{24}\\selectfont,row sep=15pt},\n ' \
'legend entries={' + legendEntries + '},\n ' \
'legend image post style={xscale=2},\n ' \
'legend cell align={left}'
if __name__ == "__main__":
main(sys.argv[1:])
|
apache-2.0
|
interactiveaudiolab/nussl
|
recipes/wham/mask_inference.py
|
1
|
8175
|
"""
This recipe trains and evaluates a mask infeerence model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
┌────────────────────┬────────────────────┬───────────────────┐
│ │ OVERALL (N = 6000) │ │
╞════════════════════╪════════════════════╪═══════════════════╡
│ SAR │ SDR │ SIR │
├────────────────────┼────────────────────┼───────────────────┤
│ 11.184634122040006 │ 10.030014257966346 │ 16.82237234679051 │
└────────────────────┴────────────────────┴───────────────────┘
Last run on 3/20/20.
"""
import nussl
from nussl import ml, datasets, utils, separation, evaluation
import os
import torch
import multiprocessing
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from torch import optim
import logging
import matplotlib.pyplot as plt
import shutil
import json
import tqdm
import glob
import numpy as np
import termtables
# ----------------------------------------------------
# ------------------- SETTING UP ---------------------
# ----------------------------------------------------
# seed this recipe for reproducibility
utils.seed(0)
# set up logging
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S', level=logging.INFO)
# make sure this is set to WHAM root directory
WHAM_ROOT = os.getenv("WHAM_ROOT")
CACHE_ROOT = os.getenv("CACHE_ROOT")
NUM_WORKERS = multiprocessing.cpu_count() // 4
OUTPUT_DIR = os.path.expanduser('~/.nussl/recipes/wham_mi/run2')
RESULTS_DIR = os.path.join(OUTPUT_DIR, 'results')
MODEL_PATH = os.path.join(OUTPUT_DIR, 'checkpoints', 'best.model.pth')
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
BATCH_SIZE = 25
MAX_EPOCHS = 100
CACHE_POPULATED = True
LEARNING_RATE = 1e-3
PATIENCE = 5
GRAD_NORM = 2e-5
shutil.rmtree(os.path.join(RESULTS_DIR), ignore_errors=True)
os.makedirs(RESULTS_DIR, exist_ok=True)
shutil.rmtree(os.path.join(OUTPUT_DIR, 'tensorboard'), ignore_errors=True)
def construct_transforms(cache_location):
# stft will be 32ms wlen, 8ms hop, sqrt-hann, at 8khz sample rate by default
tfm = datasets.transforms.Compose([
datasets.transforms.MagnitudeSpectrumApproximation(), # take stfts and get ibm
datasets.transforms.MagnitudeWeights(), # get magnitude weights
datasets.transforms.ToSeparationModel(), # convert to tensors
datasets.transforms.Cache(cache_location), # up to here gets cached
datasets.transforms.GetExcerpt(400) # get 400 frame excerpts (3.2 seconds)
])
return tfm
def cache_dataset(_dataset):
cache_dataloader = torch.utils.data.DataLoader(
_dataset, num_workers=NUM_WORKERS, batch_size=BATCH_SIZE)
ml.train.cache_dataset(cache_dataloader)
_dataset.cache_populated = True
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'tr'))
dataset = datasets.WHAM(WHAM_ROOT, split='tr', transform=tfm,
cache_populated=CACHE_POPULATED)
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'cv'))
val_dataset = datasets.WHAM(WHAM_ROOT, split='cv', transform=tfm,
cache_populated=CACHE_POPULATED)
if not CACHE_POPULATED:
# cache datasets for speed
cache_dataset(dataset)
cache_dataset(val_dataset)
# ----------------------------------------------------
# -------------------- TRAINING ----------------------
# ----------------------------------------------------
# reload after caching
train_sampler = torch.utils.data.sampler.RandomSampler(dataset)
val_sampler = torch.utils.data.sampler.RandomSampler(val_dataset)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=train_sampler)
val_dataloader = torch.utils.data.DataLoader(val_dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=val_sampler)
n_features = dataset[0]['mix_magnitude'].shape[1]
# builds a baseline model with 4 recurrent layers, 600 hidden units, bidirectional
# and 20 dimensional embedding
config = ml.networks.builders.build_recurrent_mask_inference(
n_features, 600, 4, True, 0.3, 2, ['sigmoid'],
normalization_class='BatchNorm'
)
model = ml.SeparationModel(config).to(DEVICE)
logging.info(model)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.5, patience=PATIENCE)
# set up the loss function
loss_dictionary = {
'PermutationInvariantLoss': {'args': ['L1Loss'], 'weight': 1.0}
}
# set up closures for the forward and backward pass on one batch
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
val_closure = ml.train.closures.ValidationClosure(
loss_dictionary, model)
# set up engines for training and validation
trainer, validator = ml.train.create_train_and_validation_engines(
train_closure, val_closure, device=DEVICE)
# attach handlers for visualizing output and saving the model
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(
OUTPUT_DIR, model, optimizer, dataset,
trainer, val_data=val_dataloader, validator=validator)
ml.train.add_tensorboard_handler(OUTPUT_DIR, trainer)
# add a handler to set up patience
@trainer.on(ml.train.ValidationEvents.VALIDATION_COMPLETED)
def step_scheduler(trainer):
val_loss = trainer.state.epoch_history['validation/loss'][-1]
scheduler.step(val_loss)
# add a handler to set up gradient clipping
@trainer.on(ml.train.BackwardsEvents.BACKWARDS_COMPLETED)
def clip_gradient(trainer):
torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_NORM)
# train the model
trainer.run(dataloader, max_epochs=MAX_EPOCHS)
# ----------------------------------------------------
# ------------------- EVALUATION ---------------------
# ----------------------------------------------------
test_dataset = datasets.WHAM(WHAM_ROOT, sample_rate=8000, split='tt')
# make a deep clustering separator with an empty audio signal initially
# this one will live on gpu and be used in a threadpool for speed
dme = separation.deep.DeepMaskEstimation(
nussl.AudioSignal(), model_path=MODEL_PATH, device='cuda')
def forward_on_gpu(audio_signal):
# set the audio signal of the object to this item's mix
dme.audio_signal = audio_signal
masks = dme.forward()
return masks
def separate_and_evaluate(item, masks):
separator = separation.deep.DeepMaskEstimation(item['mix'])
estimates = separator(masks)
evaluator = evaluation.BSSEvalScale(
list(item['sources'].values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
output_path = os.path.join(RESULTS_DIR, f"{item['mix'].file_name}.json")
with open(output_path, 'w') as f:
json.dump(scores, f)
pool = ThreadPoolExecutor(max_workers=NUM_WORKERS)
for i, item in enumerate(tqdm.tqdm(test_dataset)):
masks = forward_on_gpu(item['mix'])
if i == 0:
separate_and_evaluate(item, masks)
else:
pool.submit(separate_and_evaluate, item, masks)
pool.shutdown(wait=True)
json_files = glob.glob(f"{RESULTS_DIR}/*.json")
df = evaluation.aggregate_score_files(json_files)
overall = df.mean()
headers = ["", f"OVERALL (N = {df.shape[0]})", ""]
metrics = ["SAR", "SDR", "SIR"]
data = np.array(df.mean()).T
data = [metrics, data]
termtables.print(data, header=headers, padding=(0, 1), alignment="ccc")
|
mit
|
kcaluwae/tensegrity-el-simulator
|
examples/droptests/DropTestParameters.py
|
1
|
8571
|
'''
Original Coded by:
Ken Caluwaerts 2012-2013
<[email protected]>
Edited by:
Jonathan Bruce 2013
<[email protected]>
Edited by:
Kyle Morse summer 2013
<[email protected]>
'''
import simulator_v2 as simulator
import equilibrium_util as eu
import mass_matrix
import numpy as np
from matplotlib import pylab as plt
plt.ion()
#create structure
import icosahedron_payload_test
import datetime
from pytz import timezone
rodMass = 0
#################################################################################
#################### Use these to change simulation parameters ##################
########################### All units are in Metric #############################
rod_lengthMax = 4.5 #length of strut in meters
rod_lengthMin = 3.5 #length of strut in meters
rod_lengthStep = .5 #length of strut in meters
spring_outer_springConstantMax = 1600000 #N/m
spring_inner_springConstantMax = 40000 #N/m
spring_outer_springConstantMin = 1400000 #N/m
spring_inner_springConstantMin = 30000 #N/m
spring_outer_springConstantStep = 100000 #N/m
spring_inner_springConstantStep = 10000 #N/m
#drop_height = 5.0 #drop height in m/s
#v_o = -np.sqrt(2*9.81*drop_height) #velocity on earth in m/s
v_o = -11.4
D_o = 1.25 #outer diameter in inches
D_i = 1.18 #inner diameter of strut in inches
Area_b = 3.14*.25*(D_o**2-D_i**2)*0.000645 #Area of base converted to square meters
densityAl = 2780 #density of Al 2014 in kg/m^3
payload_mass = 70. #mass of IMU payload kg
payload_radius = .2
spring_damping = .10
default_gravity = True # The default is Earth's and will ignore user input for gravity
gravity = 1.352 # Titan's Gravity, change this value for something other than default
simulation_time = 150 # Simulation time in milliseconds
stretch_out = .000001 #stretch for pretension in meters
stretch_in = .000001 #stretch for pretension of inner springs in meters
runs = 1
for rod_height in np.arange(rod_lengthMin,rod_lengthMax,rod_lengthStep):
for ko in range(spring_outer_springConstantMin,spring_outer_springConstantMax,spring_outer_springConstantStep):
for ki in range(spring_inner_springConstantMin,spring_inner_springConstantMax,spring_inner_springConstantStep):
Volume_strut = Area_b*rod_height
rod_mass = densityAl*Volume_strut
N,B,C = icosahedron_payload_test.create_icosahedron(height=rod_height,payloadR=payload_radius)
if B.shape[0]==7:
rodMass = 1
N = N.T
B = -B
numInnerSprings = 12
#bar_sigma defines how to translate from generalized coordinates to Euclidean coordinates
#this has to be consistent with the point of reference for the mass matrix (see below)
#I always use 0
bar_sigma = np.zeros(B.shape[0])
constrained_nodes = np.zeros(N.shape[0]).astype(bool)
#note: you can't constrain two ends of a bar, although you can add external fixations!
#nodes 2,8,and 10 work best for constrained endpoints if more than 2 are constrained
constrained_nodes[2] = 0 #fix a number of strut endpoints
constrained_nodes[8] = 1 #fix a number of strut endpoints
constrained_nodes[10] = 1 #fix a number of strut endpoints
spring_k_1 = np.ones(C.shape[0]) #spring constants
for i in range (spring_k_1.shape[0]):
if i < spring_k_1.shape[0]-numInnerSprings:
spring_k_1[i] = ko #Outer Springs
else:
spring_k_1[i] = ki #Inner Springs
spring_d = np.ones(C.shape[0])*spring_damping #spring damping
bar_lengths,spring_lengths = eu.compute_lengths(B,C,N.T)
#spring equilibrium length
spring_l0 = np.zeros(C.shape[0])
for i in range(spring_l0.shape[0]):
if i<spring_l0.shape[0]-numInnerSprings:
spring_l0[i]=spring_lengths[i]-stretch_out
else:
spring_l0[i]=spring_lengths[i]-stretch_in
#compute mass matrices
density_1 = lambda x:rod_mass
density_2 = lambda x:payload_mass
if rodMass == 1:
bar_mass = np.array ([ mass_matrix.compute_mass_matrix (length = bar_lengths.ravel()[i], density = density_1, sigma=0.)[0] for i in range (B.shape[0]) ])
bar_mass[-1] = np.array([mass_matrix.compute_mass_matrix (length = bar_lengths.ravel()[i], density = density_2, sigma=0.)[0] ])
else:
bar_mass = np.array ([ mass_matrix.compute_mass_matrix (length = bar_lengths.ravel()[i], density = density_1, sigma=None)[0] for i in range (B.shape[0]) ])
#compute external forces (gravity :)
external_forces_1 = np.zeros(N.shape)
if default_gravity == True:
force_distribution_gravity = lambda x: (0,0,-9.81) # Earth's gravity
else:
force_distribution_gravity = lambda x: (0,0,-gravity) # Titan's Gravity
for i in xrange(B.shape[0]):
if i<(B.shape[0]-1):
f1_1,f2_1 = mass_matrix.compute_consistent_nodal_forces_vector(length=bar_lengths.ravel()[i],density=density_1,force_distribution=force_distribution_gravity)
from_ = B[i].argmin()
to_ = B[i].argmax()
external_forces_1[from_] = f1_1
external_forces_1[to_] = f2_1
else:
f1_1,f2_1 = mass_matrix.compute_consistent_nodal_forces_vector(length=bar_lengths.ravel()[i],density=density_2,force_distribution=force_distribution_gravity)
from_ = B[i].argmin()
to_ = B[i].argmax()
external_forces_1[from_] = f1_1
external_forces_1[to_] = f2_1
external_forces_1_func = lambda t: (1*t*external_forces_1) #gravity is applied
#the second argument is the initial nodal velocity
initVel = np.zeros(N.shape)
initVel[:][:,2] = v_o
#initialize the simulator
sim = simulator.Simulator(N, initVel, constrained_nodes, B, bar_mass, bar_sigma, C, spring_k_1, spring_d, spring_l0, nodal_damping=0.05, two_way_spring=False,external_nodal_forces=external_forces_1_func)
sim.initialize_ode()#rtol=1e-5,atol=1e-5)#integrator='rk4',control_dt=50000,internal_dt=2000,dt=0.002)
sim.simulate()
offsets = []
pos1 = []
l0 = sim.spring_l0.copy()
sForce = []
simVel = []
simAccel = []
endVel = 0.
posDiff = np.ones(simulation_time)
numMin = 0.
#FIRST SIMULATION: random forces applied along the springs.
#The random forces are recorded for retesting later.
for j in xrange(simulation_time):
sim.simulate()
pos1.append(sim.nodes_eucl.copy())
sForce.append(sim.spring_forces)
simVel.append(sim.nodes_dot)
simAccel.append(sim.Q_dot_dot)
#run simulation until velocity is 25% of the absolute value of the original velocity
#this stops the simulation just after the maximum deflection
if (endVel <= -0.25*v_o):
#Determine state of payload and nodes as well as forces
aAccel = np.array(simAccel)
payAccel = aAccel[:,6]
aPos = np.array(pos1)
CorrectPos = aPos[:,13]-aPos[0,13]*np.ones_like(aPos[:,13])
nodePos = aPos[:,0:11,2]-aPos[0,0:11,2]
aForce = np.array(sForce)
outerForce = aForce[:,0:24]
innerForce = aForce[:,24:36]
aVel = np.array(simVel)
endVel = aVel[j,13,2]
#determine if payload impacts strut below it
#MUST RECALCULATE SECOND TERM IF NOT TWO POINT CONTACT
posDiff[j] = aPos[j,13,2] - np.min(aPos[j,2,2])#time dependent only for 2 pt contact ie aPos[m,node,2]
if posDiff[j] <= 0:
num = np.abs(posDiff[j])
if num > numMin:
numMin = num
else:
break
if any(posDiff <= 0):
c = open('ContactRodMass.txt','a')
c.write('\r\n')
c.write(str(datetime.datetime.now(timezone('US/Pacific-New'))))
c.write('\nLength of rod: {}'.format(rod_height))
c.write('\nK_out: {}'.format(ko))
c.write('\nK_in: {}'.format(ki))
c.write('\nMax Force: {}'.format(np.max(sForce)))
c.write('\nMax Acceleration:{}'.format(np.amax(payAccel)/9.81))
c.write('\nOvershoot: {}'.format(numMin))
c.write('\nTime: {}'.format(j))
c.close()
else:
gap = np.min(posDiff)
n = open('No_ContactTest.txt','a')
n.write('\r\n')
n.write(str(datetime.datetime.now(timezone('US/Pacific-New'))))
n.write('\nK_out: {}'.format(ko))
n.write('\nK_in: {}'.format(ki))
n.write('\nPayload Mass: {}'.format(payload_mass))
n.write('\nLength of rod: {}'.format(rod_height))
n.write('\nMax Force Outer: {}'.format(np.max(outerForce)))
n.write('\nMax Force Inner: {}'.format(np.max(innerForce)))
n.write('\nMax Acceleration: {}'.format(np.amax(payAccel)/9.81))
n.write('\nPayload Deflection: {}'.format(np.min(CorrectPos)))
n.write('\nDistance from payload to strut: {}'.format(gap))
n.write('\nTime: {}'.format(j))
n.write('\r\n')
n.close()
print(runs)
runs += 1
|
mit
|
jgrizou/explauto
|
explauto/models/gaussian.py
|
2
|
4081
|
import numpy
class Gaussian(object):
"""
Represents a single Gaussian probability
density function.
WARNING: some methods of this class may accept either one vector, as a (d,)
shaped array, or many, as a (n, d) shaped array. For d = 1, do NOT use (n,)
shaped array instead of (n, 1). The last formulation brings an ambiguity
that is NOT handled.
"""
def __init__(self, mu, sigma, inv_sigma=False):
"""
Creates the Gaussian with the given parameters.
@param mu : mean, given as (d,) matrix
@param sigma : covariance matrix
@param inv_sigma : boolean indicating if sigma is the inverse covariance matrix or not (default: False)
"""
self.mu = mu
if not inv_sigma:
self.sigma = sigma
self.inv = numpy.linalg.inv(self.sigma)
else:
self.sigma = numpy.linalg.inv(sigma)
self.inv = self.sigma
self.det = numpy.absolute(numpy.linalg.det(self.sigma))
def generate(self, number=None):
"""Generates vectors from the Gaussian.
@param number: optional, if given, generates more than one vector.
@returns: generated vector(s), either as a one dimensional array
(shape (d,)) if number is not set, or as a two dimensional array
(shape (n, d)) if n is given as number parameter.
"""
if number is None:
return numpy.random.multivariate_normal(self.mu, self.sigma)
else:
return numpy.random.multivariate_normal(self.mu, self.sigma, number)
def normal(self, x):
"""Returns the density of probability of x or the one dimensional
array of all probabilities if many vectors are given.
@param x : may be of (n,) shape.
"""
return numpy.exp(self.log_normal(x))
def log_normal(self, x):
"""
Returns the log density of probability of x or the one dimensional
array of all log probabilities if many vectors are given.
@param x : may be of (n,) shape
"""
d = self.mu.shape[0]
xc = x - self.mu
if len(x.shape) == 1:
exp_term = numpy.sum(numpy.multiply(xc, numpy.dot(self.inv, xc)))
else:
exp_term = numpy.sum(numpy.multiply(xc, numpy.dot(xc, self.inv)), axis=1)
return -.5 * (d * numpy.log(2 * numpy.pi) + numpy.log(self.det) + exp_term)
def cond_gaussian(self, dims, v):
"""
Returns mean and variance of the conditional probability
defined by a set of dimension and at a given vector.
@param dims : set of dimension to which respect conditional
probability is taken
@param v : vector defining the position where the conditional
probability is taken. v shape is defined by the size
of the set of dims.
"""
(d, c, b, a) = numpy.split_matrix(self.sigma, dims)
(mu2, mu1) = numpy.split_vector(self.mu, dims)
d_inv = numpy.linalg.inv(d)
mu = mu1 + numpy.dot(numpy.dot(b, d_inv), v - mu2)
sigma = a - numpy.dot(b, numpy.dot(d_inv, c))
return Gaussian(mu, sigma)
# TODO : use a representation that allows different values of v
# without computing schur each time.
def get_entropy(self):
"""Computes (analyticaly) the entropy of the Gaussian distribution.
"""
dim = self.mu.shape[0]
entropy = 0.5 * (dim * (numpy.log(2. * numpy.pi) + 1.) + numpy.log(self.det))
return entropy
def get_display_ellipse2D(self):
from matplotlib.patches import Ellipse
if self.mu.shape != (2,):
raise ValueError('Not a 2 dimensional gaussian')
(val, vect) = numpy.linalg.eig(self.sigma)
el = Ellipse(self.mu,
3.5 * numpy.sqrt(val[0]),
3.5 * numpy.sqrt(val[1]),
180. * numpy.arctan2(vect[1, 0], vect[0, 0]) / numpy.pi,
fill=False,
linewidth=2)
return el
|
gpl-3.0
|
aminert/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
155
|
8058
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
|
bsd-3-clause
|
alexandrovteam/pyImagingMSpec
|
docs/conf.py
|
2
|
10147
|
# -*- coding: utf-8 -*-
#
# SM_distributed documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 9 15:19:18 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sphinx_rtd_theme
from recommonmark.parser import CommonMarkParser
sys.path.extend(['.', '..'])
import rtd_gen_docs
from pyImagingMSpec import __version__
pkg_name = u'pyImagingMSpec'
pkg_name_lowercase = u'pyimagingmspec'
rtd_gen_docs.main()
# see http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_mock_imports
autodoc_mock_imports = ['h5py',
'matplotlib.pyplot',
'numpy',
'pyMS',
'pyMS.mass_spectrum',
'pyspark',
'scipy',
'scipy.optimize',
'scipy.stats']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
source_parsers = {
'.md': CommonMarkParser,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = pkg_name
copyright = u'2016, Alexandrov Team'
author = u'Alexandrov Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = '{}doc'.format(pkg_name)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{}.tex'.format(pkg_name), u' Documentation'.format(pkg_name),
u'Alexandrov Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, pkg_name_lowercase, u'{} Documentation'.format(pkg_name),
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, pkg_name, u'{} Documentation'.format(pkg_name),
author, pkg_name, 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
apache-2.0
|
mattsmart/biomodels
|
agent_based_models/abm_conjugation_krone/plot_plasmids.py
|
1
|
1594
|
import matplotlib.pyplot as plt
def plasmid_stats(lattice, dict_counts):
keys = ['R', 'D', 'T']
plasmid_counts_by_type = {key: [0] * dict_counts[key] for key in keys}
cell_type_iterator = {key: 0 for key in keys}
# get distribution
n = len(lattice)
for i in xrange(n):
for j in xrange(n):
cell = lattice[i][j]
if cell.label != '_':
idx = cell_type_iterator[cell.label]
plasmid_counts_by_type[cell.label][idx] = cell.plasmid_amensal
cell_type_iterator[cell.label] += 1
return plasmid_counts_by_type
def plasmid_plotter(plasmid_counts, plot_path):
if len(plasmid_counts) > 0:
total_cells = len(plasmid_counts)
f = plt.figure()
plt.hist(plasmid_counts)
ax = plt.gca()
ax.set_title('Plasmid Count Histogram (cells = %d)' % total_cells)
ax.set_ylabel('Number of cells')
ax.set_xlabel('Plasmid Count')
f.set_size_inches(20.0, 8.0) # alternative: 20.0, 8.0
f.tight_layout()
plt.savefig(plot_path)
plt.clf()
return
def plasmid_plotter_wrapper(lattice, dict_counts, time, plot_dir):
plasmid_counts_by_type = plasmid_stats(lattice, dict_counts)
plasmid_plotter(plasmid_counts_by_type['R'], plot_dir + 'R_plasmid_histogram_at_time_%f' % time + '.png')
plasmid_plotter(plasmid_counts_by_type['D'], plot_dir + 'D_plasmid_histogram_at_time_%f' % time + '.png')
plasmid_plotter(plasmid_counts_by_type['T'], plot_dir + 'T_plasmid_histogram_at_time_%f' % time + '.png')
return
|
mit
|
SMTorg/smt
|
doc/preprocess_test.py
|
3
|
3771
|
"""
Author: Dr. John T. Hwang <[email protected]>
This package is distributed under New BSD license.
"""
import os, sys
import inspect
import importlib
import contextlib
try:
from StringIO import StringIO
except:
from io import StringIO
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def process_test(root, file_name, iline, line):
file_path = root + "/" + file_name
embed_num_indent = line.find(".. embed-test")
if line[:embed_num_indent] != " " * embed_num_indent:
return line
include_print_output = (
"embed-test-print" in line
or "embed-test-print-plot" in line
or "embed-test-print-plot" in line
)
include_plot_output = (
"embed-test-plot" in line
or "embed-test-print-plot" in line
or "embed-test-print-plot" in line
)
split_line = line.replace(" ", "").split(",")
if len(split_line) != 3 or len(split_line[0].split("::")) != 2:
raise Exception(
"Invalid format for embed-test in file {} line {}".format(
file_path, iline + 1
)
)
py_file_path = split_line[0].split("::")[1]
class_name = split_line[1]
method_name = split_line[2][:-1]
index = len(py_file_path.split("/")[-1])
py_root = py_file_path[:-index]
py_file_name = py_file_path[-index:]
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/" + py_root)
py_module = importlib.import_module(py_file_name[:-3])
obj = getattr(py_module, class_name)
method = getattr(obj, method_name)
method_lines = inspect.getsource(method).split("\n")
for imethod_line, method_line in enumerate(method_lines):
if "def" in method_line and method_name in method_line:
imethod_line += 1
break
method_lines = method_lines[imethod_line:]
first_line = method_lines[0]
py_num_indent = first_line.find(first_line.strip())
for imethod_line, method_line in enumerate(method_lines):
method_lines[imethod_line] = method_line[py_num_indent:]
replacement_lines = []
replacement_lines.append(" " * embed_num_indent + ".. code-block:: python\n")
replacement_lines.append("\n")
replacement_lines.extend(
[
" " * embed_num_indent + " " * 2 + method_line + "\n"
for method_line in method_lines
]
)
if include_print_output:
joined_method_lines = "\n".join(method_lines)
with stdoutIO() as s:
exec(joined_method_lines)
output_lines = s.getvalue().split("\n")
if len(output_lines) > 1:
replacement_lines.append(" " * embed_num_indent + "::\n")
replacement_lines.append("\n")
replacement_lines.extend(
[
" " * embed_num_indent + " " * 2 + output_line + "\n"
for output_line in output_lines
]
)
if include_plot_output:
joined_method_lines = "\n".join(method_lines)
plt.clf()
with stdoutIO() as s:
exec(joined_method_lines)
abs_plot_name = file_path[:-5] + ".png"
plt.savefig(abs_plot_name)
rel_plot_name = file_name[:-5] + ".png"
replacement_lines.append(
" " * embed_num_indent + ".. figure:: {}\n".format(rel_plot_name)
)
replacement_lines.append(" " * embed_num_indent + " :scale: 80 %\n")
replacement_lines.append(" " * embed_num_indent + " :align: center\n")
return replacement_lines
|
bsd-3-clause
|
trungnt13/scikit-learn
|
benchmarks/bench_isotonic.py
|
268
|
3046
|
"""
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
|
bsd-3-clause
|
marcocaccin/scikit-learn
|
sklearn/neighbors/approximate.py
|
30
|
22370
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
bsd-3-clause
|
depet/scikit-learn
|
sklearn/preprocessing/label.py
|
1
|
12652
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import unique
from ..utils import deprecated, column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
]
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
`classes_` : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
self.classes_, y = unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
Attributes
----------
`classes_` : array of shape [n_class]
Holds the label for each class.
`multilabel_` : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.multilabel_
False
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
>>> lb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> lb.classes_
array([1, 2, 3])
>>> lb.multilabel_
True
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1):
if neg_label >= pos_label:
raise ValueError("neg_label must be strictly less than pos_label.")
self.neg_label = neg_label
self.pos_label = pos_label
@property
@deprecated("Attribute `multilabel` was renamed to `multilabel_` in "
"0.14 and will be removed in 0.16")
def multilabel(self):
return self.multilabel_
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
self : returns an instance of self.
"""
y_type = type_of_target(y)
self.multilabel_ = y_type.startswith('multilabel')
if self.multilabel_:
self.indicator_matrix_ = y_type == 'multilabel-indicator'
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
Y : numpy array of shape [n_samples, n_classes]
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.multilabel_:
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
multilabel=self.multilabel_,
pos_label=self.pos_label,
neg_label=self.neg_label)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array of shape [n_samples, n_classes]
Target values.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
half = (self.pos_label - self.neg_label) / 2.0
threshold = self.neg_label + half
if self.multilabel_:
Y = np.array(Y > threshold, dtype=int)
# Return the predictions in the same format as in fit
if self.indicator_matrix_:
# Label indicator matrix format
return Y
else:
# Lists of tuples format
return [tuple(self.classes_[np.flatnonzero(Y[i])])
for i in range(Y.shape[0])]
if len(Y.shape) == 1 or Y.shape[1] == 1:
y = np.array(Y.ravel() > threshold, dtype=int)
else:
y = Y.argmax(axis=1)
return self.classes_[y]
def label_binarize(y, classes, multilabel=False, neg_label=0, pos_label=1):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels to encode.
classes : array of shape [n_classes]
Uniquely holds the label for each class.
multilabel : boolean
Set to true if y is encoding a multilabel tasks (with a variable
number of label assignements per sample) rather than a multiclass task
where one sample has one and only one label assigned.
neg_label: int (default: 0)
Value with which negative labels must be encoded.
pos_label: int (default: 1)
Value with which positive labels must be encoded.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
>>> label_binarize([(1, 2), (6,), ()], multilabel=True,
... classes=[1, 6, 4, 2])
array([[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 0, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
y_type = type_of_target(y)
if multilabel or len(classes) > 2:
if y_type == 'multilabel-indicator':
# nothing to do as y is already a label indicator matrix
return y
Y = np.zeros((len(y), len(classes)), dtype=np.int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
y_is_multilabel = y_type.startswith('multilabel')
if multilabel:
if not y_is_multilabel:
raise ValueError("y should be a list of label lists/tuples, "
"got %r" % (y,))
# inverse map: label => column index
imap = dict((v, k) for k, v in enumerate(classes))
for i, label_tuple in enumerate(y):
for label in label_tuple:
Y[i, imap[label]] = pos_label
return Y
else:
y = column_or_1d(y)
if len(classes) == 2:
Y[y == classes[1], 0] = pos_label
return Y
elif len(classes) >= 2:
for i, k in enumerate(classes):
Y[y == k, i] = pos_label
return Y
else:
# Only one class, returns a matrix with all negative labels.
return Y
|
bsd-3-clause
|
rlowrance/python_lib
|
applied_data_science3/summarize.py
|
2
|
1627
|
'''
Copyright 2017 Roy E. Lowrance
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on as "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing premission and
limitation under the license.
'''
import pandas as pd
import pdb
from pprint import pprint
from . import columns_contain
cc = columns_contain.columns_contain
def summarize(df):
'''return dataframe summarizing df
result.index = df.columns
result.column = attributes of the columns in df
'''
description = df.describe()
# print description
print(df.shape)
print(description.shape)
rows = []
for column_name in df.columns:
# print column_name
if column_name not in description.columns:
# non-numeric columns are omitted from description
print('description is missing', column_name)
continue
series = df[column_name]
d = {}
d['number_nan'] = sum(series.isnull())
d['number_distinct'] = len(series.unique())
for statistic_name in description.index:
d[statistic_name] = description[column_name][statistic_name]
rows.append(d)
result = pd.DataFrame(data=rows, index=description.columns)
return result
if False:
pprint()
pdb.set_trace()
|
apache-2.0
|
leesavide/pythonista-docs
|
Documentation/matplotlib/examples/user_interfaces/embedding_in_tk.py
|
9
|
1419
|
#!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
root = Tk.Tk()
root.wm_title("Embedding in TK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
# a tk.DrawingArea
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def on_key_event(event):
print('you pressed %s'%event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
Tk.mainloop()
# If you put root.destroy() here, it will cause an error if
# the window is closed with the window manager.
|
apache-2.0
|
hovren/crisp
|
crisp/pose.py
|
2
|
6831
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
"""
Relative pose calibration module
"""
__author__ = "Hannes Ovrén"
__copyright__ = "Copyright 2013, Hannes Ovrén"
__license__ = "GPL"
__email__ = "[email protected]"
import logging
logger = logging.getLogger()
import numpy as np
import matplotlib.pyplot as plt
import cv2
from . import timesync
from . import tracking
from . import rotations
def estimate_pose(image_sequences, imu_sequences, K):
"""Estimate sync between IMU and camera based on gyro readings and optical flow.
The user should first create at least two sequences of corresponding image and
gyroscope data.
From each sequence we calculate the rotation axis (one from images, one from IMU/gyro).
The final set of len(image_sequences) corresponding rotation axes are then used to calculate
the relative pose between the IMU and camera.
The returned rotation is such that it transfers vectors in the gyroscope coordinate
frame to the camera coordinate frame:
X_camera = R * X_gyro
Parameters
------------
image_sequences : list of list of ndarrays
List of image sequences (list of ndarrays) to use. Must have at least two sequences.
imu_sequences : list of (3, N) ndarray
Sequence of gyroscope measurements (angular velocities).
K : (3,3) ndarray
Camera calibration matrix
Returns
-----------
R : (3,3) ndarray
The relative pose (gyro-to-camera) such that X_camera = R * X_gyro
"""
assert len(image_sequences) == len(imu_sequences)
assert len(image_sequences) >= 2
# Note: list(image_sequence) here makes sure any generator type input is expanded to an actual list
sync_correspondences = [_get_point_correspondences(list(image_sequence)) for image_sequence in image_sequences]
# ) Procrustes on corresponding pairs
PROCRUSTES_MAX_POINTS = 15 # Number of tracks/points to use for procrustes
logger.debug("Running procrustes on track-retrack results")
image_rotation_axes = []
for i, points in enumerate(sync_correspondences):
if points.size < 1:
logger.error('Shape of points are %s', str(points.shape))
raise Exception("Did not get enough points when tracking")
num_points_to_use = min(PROCRUSTES_MAX_POINTS, points.shape[0])
logger.debug("Using %d tracks to calculate procrustes", num_points_to_use)
idxs_to_use = np.random.permutation(points.shape[0])[:num_points_to_use]
assert points.shape[-1] == 2
x = points[idxs_to_use,0,:].T.reshape(2,-1)
y = points[idxs_to_use,-1,:].T.reshape(2,-1)
x = np.vstack((x, np.ones((1, x.shape[1]))))
y = np.vstack((y, np.ones((1, y.shape[1]))))
K_inv = np.linalg.inv(K)
X = K_inv.dot(x)
Y = K_inv.dot(y)
# Depth must be positive
(R, t) = rotations.procrustes(X, Y, remove_mean=False) # X = R * Y + t
(v, theta) = rotations.rotation_matrix_to_axis_angle(R)
image_rotation_axes.append(v) # Save rotation axis
# Check the quality via the mean reprojection error
mean_error = np.mean(np.sqrt(np.sum((X - R.dot(Y))**2, axis=0)))
MEAN_ERROR_LIMIT = 0.1 # Arbitrarily chosen limit (in meters)
logger.debug('Image sequence %d: Rotation axis %s, degrees %.2f, mean error %.3f',
i, v, np.rad2deg(theta), mean_error)
if mean_error > MEAN_ERROR_LIMIT:
logger.warning("Procrustes solution mean error %.3f > %.3f", mean_error, MEAN_ERROR_LIMIT)
# ) Gyro principal rotation axis
gyro_rotation_axes = []
for i, gyro_seq in enumerate(imu_sequences):
assert gyro_seq.shape[0] == 3
v = principal_rotation_axis(gyro_seq)
logger.debug('Gyro sequence %d: Rotation axis %s', i, v)
gyro_rotation_axes.append(v)
# ) Procrustes to get rotation between coordinate frames
X = np.vstack(image_rotation_axes).T
Y = np.vstack(gyro_rotation_axes).T
(R,t) = rotations.procrustes(X, Y, remove_mean=False)
return (R, t)
#--------------------------------------------------------------------------
def pick_manual(image_sequence, imu_gyro, num_sequences=2):
"""Select N matching sequences and return data indices.
Parameters
---------------
image_sequence : list_like
A list, or generator, of image data
imu_gyro : (3, N) ndarray
Gyroscope data (angular velocities)
num_sequences : int
The number of matching sequences to pick
Returns
----------------
sync_sequences : list
List of (frame_pair, gyro_pair) tuples where each pair contains
(a, b) which are indices of the (inclusive) range [a, b] that was chosen
"""
assert num_sequences >= 2
# Create optical flow for user to select parts in
logger.info("Calculating optical flow")
flow = tracking.optical_flow_magnitude(image_sequence)
# ) Prompt user for sync slices
logger.debug("Prompting user for %d sequences" % num_sequences)
imu_fake_timestamps = np.linspace(0,1,num=imu_gyro.shape[1])
sync_sequences = [timesync.manual_sync_pick(flow, imu_fake_timestamps, imu_gyro) for i in range(num_sequences)]
return sync_sequences
#--------------------------------------------------------------------------
def principal_rotation_axis(gyro_data):
"""Get the principal rotation axis of angular velocity measurements.
Parameters
-------------
gyro_data : (3, N) ndarray
Angular velocity measurements
Returns
-------------
v : (3,1) ndarray
The principal rotation axis for the chosen sequence
"""
N = np.zeros((3,3))
for x in gyro_data.T: # Transpose because samples are stored as columns
y = x.reshape(3,1)
N += y.dot(y.T)
(eig_val, eig_vec) = np.linalg.eig(N)
i = np.argmax(eig_val)
v = eig_vec[:,i]
# Make sure v has correct sign
s = 0
for x in gyro_data.T: # Transpose because samples are stored as columns
s += v.T.dot(x.reshape(3,1))
v *= np.sign(s)
return v
#--------------------------------------------------------------------------
def _get_point_correspondences(image_list, max_corners=200, min_distance=5, quality_level=0.07):
max_retrack_distance = 0.5
initial_points = cv2.goodFeaturesToTrack(image_list[0], max_corners, quality_level, min_distance)
(tracks, status) = tracking.track_retrack(image_list, initial_points=initial_points, max_retrack_distance=max_retrack_distance) # Status is ignored
return tracks[:,(0,-1),:] # First and last frame only
|
gpl-3.0
|
xccui/flink
|
flink-python/pyflink/table/tests/test_row_based_operation.py
|
1
|
15438
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pandas.util.testing import assert_frame_equal
from pyflink.common import Row
from pyflink.table import expressions as expr, ListView
from pyflink.table.types import DataTypes
from pyflink.table.udf import udf, udtf, udaf, AggregateFunction, TableAggregateFunction, udtaf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \
PyFlinkBlinkStreamTableTestCase
class RowBasedOperationTests(object):
def test_map(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
func = udf(lambda x: Row(x + 1, x * x), result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())]))
t.map(func(t.b)).alias("a", "b") \
.map(func(t.a)).alias("a", "b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["4,9", "3,4", "7,36", "10,81", "5,16"])
def test_map_with_pandas_udf(self):
t = self.t_env.from_elements(
[(1, Row(2, 3)), (2, Row(1, 3)), (1, Row(5, 4)), (1, Row(8, 6)), (2, Row(3, 4))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b",
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())]))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
def func(x, y):
import pandas as pd
a = (x * 2).rename('b')
res = pd.concat([a, x], axis=1) + y
return res
pandas_udf = udf(func,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]),
func_type='pandas')
t.map(pandas_udf(t.a, t.b)).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["3,5", "3,7", "6,6", "9,8", "5,8"])
def test_flat_map(self):
t = self.t_env.from_elements(
[(1, "2,3", 3), (2, "1", 3), (1, "5,6,7", 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.STRING()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
@udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
def split(x, string):
for s in string.split(","):
yield x, s
t.flat_map(split(t.a, t.b)) \
.alias("a, b") \
.flat_map(split(t.a, t.b)) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2", "1,3", "2,1", "1,5", "1,6", "1,7"])
class BatchRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBlinkBatchTableTestCase):
def test_aggregate_with_pandas_udaf(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda a: (a.mean(), a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.group_by(t.a) \
.aggregate(pandas_udaf(t.b).alias("c", "d")) \
.select("a, c, d").execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,5.0,8", "2,2.0,3"])
def test_aggregate_with_pandas_udaf_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda a: Row(a.mean(), a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.aggregate(pandas_udaf(t.b).alias("c", "d")) \
.select("c, d").execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["3.8,8"])
def test_window_aggregate_with_pandas_udaf(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT(),
DataTypes.INT()
])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda a: (a.mean(), a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(tumble_window) \
.group_by("w") \
.aggregate(pandas_udaf(t.b).alias("d", "e")) \
.select("w.rowtime, d, e") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["2018-03-11 03:59:59.999,2.2,3",
"2018-03-11 04:59:59.999,8.0,8"])
class StreamRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBlinkStreamTableTestCase):
def test_aggregate(self):
import pandas as pd
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
function = CountAndSumAggregateFunction()
agg = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
result = t.group_by(t.a) \
.aggregate(agg(t.b).alias("c", "d")) \
.select("a, c, d") \
.to_pandas()
assert_frame_equal(result, pd.DataFrame([[1, 3, 15], [2, 2, 4]], columns=['a', 'c', 'd']))
def test_flat_aggregate(self):
import pandas as pd
self.t_env.register_function("mytop", Top2())
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(5, 'Hi2', 'hi'),
(7, 'Hi', 'Hello'),
(2, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by("c") \
.flat_aggregate("mytop(a)") \
.select("c, a") \
.flat_aggregate("mytop(a)") \
.select("a") \
.to_pandas()
assert_frame_equal(result, pd.DataFrame([[7], [5]], columns=['a']))
def test_flat_aggregate_list_view(self):
import pandas as pd
my_concat = udtaf(ListViewConcatTableAggregateFunction())
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "2")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi'),
(2, 'Hi', 'Hello'),
(1, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(3, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(2, 'Hi3', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c) \
.flat_aggregate(my_concat(t.b, ',').alias("b")) \
.select(t.b, t.c) \
.alias("a, c")
assert_frame_equal(result.to_pandas(),
pd.DataFrame([["Hi,Hi2,Hi,Hi3,Hi3", "hi"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"],
["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi,Hi2,Hi2,Hi3", "Hello"]],
columns=['a', 'c']))
class CountAndSumAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
from pyflink.common import Row
return Row(accumulator[0], accumulator[1])
def create_accumulator(self):
from pyflink.common import Row
return Row(0, 0)
def accumulate(self, accumulator, *args):
accumulator[0] += 1
accumulator[1] += args[0]
def retract(self, accumulator, *args):
accumulator[0] -= 1
accumulator[1] -= args[0]
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] += other_acc[0]
accumulator[1] += other_acc[1]
def get_accumulator_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())])
class Top2(TableAggregateFunction):
def emit_value(self, accumulator):
yield Row(accumulator[0])
yield Row(accumulator[1])
def create_accumulator(self):
return [None, None]
def accumulate(self, accumulator, *args):
if args[0] is not None:
if accumulator[0] is None or args[0] > accumulator[0]:
accumulator[1] = accumulator[0]
accumulator[0] = args[0]
elif accumulator[1] is None or args[0] > accumulator[1]:
accumulator[1] = args[0]
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - 1
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
self.accumulate(accumulator, other_acc[0])
self.accumulate(accumulator, other_acc[1])
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT())])
class ListViewConcatTableAggregateFunction(TableAggregateFunction):
def emit_value(self, accumulator):
result = accumulator[1].join(accumulator[0])
yield Row(result)
yield Row(result)
def create_accumulator(self):
return Row(ListView(), '')
def accumulate(self, accumulator, *args):
accumulator[1] = args[1]
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
raise NotImplementedError
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.LIST_VIEW(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW([DataTypes.FIELD("a", DataTypes.STRING())])
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
duthchao/kaggle-Otto
|
exp_XGB_RI_m_ntree.py
|
2
|
4061
|
"""
Experiment for XGBoost + RI
Aim: To find the best m and ntree(num_round)
m: [100, 120, 140, 160]
ntree: [140, 160, 180, 200, 220, 240, 260]
Averaging 20 models
Summary
loss ntree
m
100 0.450670 240
120 0.450491 220
140 0.449575 220
160 0.449249 220 *
Time: 2:56:52 on i7-4790k 32G MEM GTX660
I got a different result before I reinstalled ubuntu 14.04 LTS.
loss ntree
m
100 0.450663 240
120 0.449751 220
140 0.448961 220 *
160 0.449046 220
So I chose m=140, ntree=220.
"""
import numpy as np
import scipy as sp
import pandas as pd
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
from datetime import datetime
import os
import xgboost as xgb
from utility import *
path = os.getcwd() + '/'
path_log = path + 'logs/'
file_train = path + 'train.csv'
training = pd.read_csv(file_train, index_col = 0)
num_train = training.shape[0]
y = training['target'].values
yMat = pd.get_dummies(training['target']).values
X = training.iloc[:,:93].values
X1 = X / X.mean(0)
kf = StratifiedKFold(y, n_folds=5, shuffle = True, random_state = 345)
for train_idx, valid_idx in kf:
break
y_train_1 = yMat[train_idx].argmax(1)
y_train = yMat[train_idx]
y_valid = yMat[valid_idx]
#
nIter = 20
# RI
k = 2
# num_round
nt = 260
nt_lst = [140, 160, 180, 200, 220, 240, 260]
nt_len = len(nt_lst)
# max_depth
tc = 15
# colsample_bytree
cs = 50. / X.shape[1]
# min_child_weight
mb = 10
# eta
sh = .1
# subsample
bf = .8
scores = []
t0 = datetime.now()
for m in [100, 120, 140, 160]:
predAll_train = [np.zeros(y_train.shape) for i in range(nt_len)]
predAll_valid = [np.zeros(y_valid.shape) for i in range(nt_len)]
for i in range(nIter):
seed = i + 12398
X3 = RI(X1, m, k, normalize = False, seed = seed)
dtrain , dvalid= xgb.DMatrix(X3[train_idx], label = y_train_1), xgb.DMatrix(X3[valid_idx])
param = {'bst:max_depth':tc, 'bst:eta':sh, 'objective':'multi:softprob','num_class':9,
'min_child_weight':mb, 'subsample':bf, 'colsample_bytree':cs,
'nthread':8, 'seed':seed, 'silent':1}
plst = param.items()
bst = xgb.train(plst, dtrain, nt)
for j in range(nt_len):
ntree = nt_lst[j]
pred_train = bst.predict(dtrain, ntree_limit = ntree).reshape(y_train.shape)
pred_valid = bst.predict(dvalid, ntree_limit = ntree).reshape(y_valid.shape)
predAll_train[j] += pred_train
predAll_valid[j] += pred_valid
scores.append({'m':m, 'ntree':ntree, 'nModels': i + 1, 'seed':seed,
'train':log_loss(y_train, pred_train),
'valid':log_loss(y_valid, pred_valid),
'train_avg':log_loss(y_train, predAll_train[j] / (i + 1)),
'valid_avg':log_loss(y_valid, predAll_valid[j] / (i + 1))})
print scores[-1], datetime.now() - t0
df = pd.DataFrame(scores)
if os.path.exists(path_log) is False:
print 'mkdir', path_log
os.mkdir(path_log)
df.to_csv(path_log + 'exp_XGB_RI_m_ntree.csv')
keys = ['m', 'ntree']
grouped = df.groupby(keys)
print pd.DataFrame({'ntree':grouped['valid_avg'].last().unstack().idxmin(1),
'loss':grouped['valid_avg'].last().unstack().min(1)})
# loss ntree
# m
# 100 0.450670 240
# 120 0.450491 220
# 140 0.449575 220
# 160 0.449249 220
#
grouped = df[df['m'] == 140].groupby('ntree')
g = grouped[['valid']].mean()
g['valid_avg'] = grouped['valid_avg'].last()
print g
# valid valid_avg
# ntree
# 140 0.477779 0.454885
# 160 0.476271 0.452038
# 180 0.476112 0.450559
# 200 0.476564 0.449759
# 220 0.477543 0.449575
# 240 0.478995 0.449745
# 260 0.480710 0.450266
ax = g.plot()
ax.set_title('XGB+RI k=2, m=140')
ax.set_ylabel('Logloss')
fig = ax.get_figure()
fig.savefig(path_log + 'exp_XGB_RI_m_ntree.png')
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.